diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 8803eb3b7f93a..9f143a835c5a8 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -1,32 +1,34 @@ # Configuring Loki -Loki is configured in a YAML file (usually referred to as `loki.yaml`) +Loki is configured in a YAML file (usually referred to as `loki.yaml` ) which contains information on the Loki server and its individual components, depending on which mode Loki is launched in. -Configuration examples can be found in the [Configuration Examples](examples.md) document. +Configuration examples can be found in the [Configuration Examples](./examples.md) document. * [Printing Loki Config At Runtime](#printing-loki-config-at-runtime) * [Configuration File Reference](#configuration-file-reference) * [server_config](#server_config) * [distributor_config](#distributor_config) * [querier_config](#querier_config) +* [query_frontend_config](#query_frontend_config) +* [queryrange_config](#queryrange_config) +* [frontend_worker_config](#frontend_worker_config) * [ingester_client_config](#ingester_client_config) - * [grpc_client_config](#grpc_client_config) * [ingester_config](#ingester_config) - * [lifecycler_config](#lifecycler_config) - * [ring_config](#ring_config) +* [consul_config](#consul_config) +* [etcd_config](#etcd_config) * [memberlist_config](#memberlist_config) * [storage_config](#storage_config) - * [cache_config](#cache_config) * [chunk_store_config](#chunk_store_config) +* [cache_config](#cache_config) * [schema_config](#schema_config) - * [period_config](#period_config) +* [period_config](#period_config) * [limits_config](#limits_config) -* [frontend_worker_config](#frontend_worker_config) +* [grpc_client_config](#grpc_client_config) * [table_manager_config](#table_manager_config) - * [provision_config](#provision_config) - * [auto_scaling_config](#auto_scaling_config) +* [provision_config](#provision_config) +* [auto_scaling_config](#auto_scaling_config) * [tracing_config](#tracing_config) * [Runtime Configuration file](#runtime-configuration-file) @@ -58,22 +60,21 @@ non-list parameters the value is set to the specified default. Generic placeholders are defined as follows: -* ``: a boolean that can take the values `true` or `false` -* ``: any integer matching the regular expression `[1-9]+[0-9]*` -* ``: a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` -* ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` -* ``: a string of unicode characters -* ``: a valid path relative to current working directory or an - absolute path. -* ``: a valid string consisting of a hostname or IP followed by an optional port number -* ``: a regular string -* ``: a regular string that is a secret, such as a password +* `` : a boolean that can take the values `true` or `false` +* `` : any integer matching the regular expression `[1-9]+[0-9]*` +* `` : a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` +* `` : a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` +* `` : a string of unicode characters +* `` : a valid path relative to current working directory or an absolute path. +* `` : a valid string consisting of a hostname or IP followed by an optional port number +* `` : a regular string +* `` : a regular string that is a secret, such as a password Supported contents and default values of `loki.yaml`: ```yaml # The module to run Loki with. Supported values -# all, querier, table-manager, ingester, distributor +# all, distributor, ingester, querier, query-frontend, table-manager. [target: | default = "all"] # Enables authentication through the X-Scope-OrgID header, which must be present @@ -90,6 +91,13 @@ Supported contents and default values of `loki.yaml`: # just the querier. [querier: ] +# The query_frontend_config configures the Loki query-frontend. +[frontend: ] + +# The queryrange_config configures the query splitting and caching in the Loki +# query-frontend. +[query_range: ] + # Configures how the distributor will connect to ingesters. Only appropriate # when running all modules, the distributor, or the querier. [ingester_client: ] @@ -98,10 +106,6 @@ Supported contents and default values of `loki.yaml`: # key value store. [ingester: ] -# Configuration for an memberlist gossip ring. Only applies if -# store is "memberlist" -[memberlist: ] - # Configures where Loki will store data. [storage_config: ] @@ -124,56 +128,70 @@ Supported contents and default values of `loki.yaml`: # Configuration for "runtime config" module, responsible for reloading runtime configuration file. [runtime_config: ] -#Configuration for tracing +# Configuration for tracing [tracing: ] ``` ## server_config -The `server_config` block configures Promtail's behavior as an HTTP server: +The `server_config` block configures the HTTP and gRPC server of the launched service(s): ```yaml # HTTP server listen host +# CLI flag: -server.http-listen-address [http_listen_address: ] # HTTP server listen port +# CLI flag: -server.http-listen-port [http_listen_port: | default = 80] # gRPC server listen host +# CLI flag: -server.grpc-listen-address [grpc_listen_address: ] # gRPC server listen port +# CLI flag: -server.grpc-listen-port [grpc_listen_port: | default = 9095] # Register instrumentation handlers (/metrics, etc.) +# CLI flag: -server.register-instrumentation [register_instrumentation: | default = true] # Timeout for graceful shutdowns +# CLI flag: -server.graceful-shutdown-timeout [graceful_shutdown_timeout: | default = 30s] # Read timeout for HTTP server +# CLI flag: -server.http-read-timeout [http_server_read_timeout: | default = 30s] # Write timeout for HTTP server +# CLI flag: -server.http-write-timeout [http_server_write_timeout: | default = 30s] # Idle timeout for HTTP server +# CLI flag: -server.http-idle-timeout [http_server_idle_timeout: | default = 120s] # Max gRPC message size that can be received +# CLI flag: -server.grpc-max-recv-msg-size-bytes [grpc_server_max_recv_msg_size: | default = 4194304] # Max gRPC message size that can be sent +# CLI flag: -server.grpc-max-recv-msg-size-bytes [grpc_server_max_send_msg_size: | default = 4194304] # Limit on the number of concurrent streams for gRPC calls (0 = unlimited) +# CLI flag: -server.grpc-max-concurrent-streams [grpc_server_max_concurrent_streams: | default = 100] # Log only messages with the given severity or above. Supported values [debug, # info, warn, error] +# CLI flag: -log.level [log_level: | default = "info"] -# Base path to server all API routes from (e.g., /v1/). +# Base path to serve all API routes from (e.g., /v1/). +# CLI flag: -server.path-prefix [http_path_prefix: ] ``` @@ -184,7 +202,33 @@ The `distributor_config` block configures the Loki Distributor. ```yaml # Configures the distributors ring, used when the "global" ingestion rate # strategy is enabled. -[ring: ] +ring: + kvstore: + # The backend storage to use for the ring. Supported values are + # consul, etcd, inmemory, memberlist + # CLI flag: -distributor.ring.store + store: + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -distributor.ring.prefix + [prefix: | default = "collectors/"] + + # Configuration for a Consul client. Only applies if store is "consul" + # The CLI flags prefix for this block config is: distributor.ring + [consul: ] + + # Configuration for an ETCD v3 client. Only applies if store is "etcd" + # The CLI flags prefix for this block config is: distributor.ring + [etcd: ] + + # Configuration for Gossip memberlist. Only applies if store is "memberlist" + # The CLI flags prefix for this block config is: distributor.ring + [memberlist: ] + + # The heartbeat timeout after which ingesters are skipped for + # reading and writing. + # CLI flag: -distributor.ring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] ``` ## querier_config @@ -192,32 +236,118 @@ The `distributor_config` block configures the Loki Distributor. The `querier_config` block configures the Loki Querier. ```yaml -# Timeout when querying ingesters or storage during the execution of a -# query request. +# Timeout when querying ingesters or storage during the execution of a query request. +# CLI flag: -querier.query-timeout [query_timeout: | default = 1m] -# Limit of the duration for which live tailing requests should be -# served. +# Maximum duration for which the live tailing requests should be served. +# CLI flag: -querier.tail-max-duration [tail_max_duration: | default = 1h] -# Time to wait before sending more than the minimum successful query -# requests. +# Time to wait before sending more than the minimum successful query requests. +# CLI flag: -querier.extra-query-delay [extra_query_delay: | default = 0s] # Maximum lookback beyond which queries are not sent to ingester. # 0 means all queries are sent to ingester. +# CLI flag: -querier.query-ingesters-within [query_ingesters_within: | default = 0s] # Configuration options for the LogQL engine. engine: # Timeout for query execution + # CLI flag: -querier.engine.timeout [timeout: | default = 3m] # The maximum amount of time to look back for log lines. Only # applicable for instant log queries. + # CLI flag: -querier.engine.max-lookback-period [max_look_back_period: | default = 30s] ``` +## query_frontend_config + +The query_frontend_config configures the Loki query-frontend. + +```yaml +# Maximum number of outstanding requests per tenant per frontend; requests +# beyond this error with HTTP 429. +# CLI flag: -querier.max-outstanding-requests-per-tenant +[max_outstanding_per_tenant: | default = 100] + +# Compress HTTP responses. +# CLI flag: -querier.compress-http-responses +[compress_responses: | default = false] + +# URL of downstream Prometheus. +# CLI flag: -frontend.downstream-url +[downstream_url: | default = ""] + +# Log queries that are slower than the specified duration. Set to 0 to disable. +# Set to < 0 to enable on all queries. +# CLI flag: -frontend.log-queries-longer-than +[log_queries_longer_than: | default = 0s] +``` + +## queryrange_config + +The queryrange_config configures the query splitting and caching in the Loki query-frontend. + +```yaml +# Split queries by an interval and execute in parallel, 0 disables it. You +# should use in multiple of 24 hours (same as the storage bucketing scheme), +# to avoid queriers downloading and processing the same chunks. This also +# determines how cache keys are chosen when result caching is enabled +# CLI flag: -querier.split-queries-by-interval +[split_queries_by_interval: | default = 0s] + +# CLI flag: -querier.split-queries-by-day +[split_queries_by_day: | default = false] + +# Mutate incoming queries to align their start and end with their step. +# CLI flag: -querier.align-querier-with-step +[align_queries_with_step: | default = false] + +results_cache: + # The CLI flags prefix for this block config is: frontend + cache: + +# Cache query results. +# CLI flag: -querier.cache-results +[cache_results: | default = false] + +# Maximum number of retries for a single request; beyond this, the downstream +# error is returned. +# CLI flag: -querier.max-retries-per-request +[max_retries: | default = 5] + +# Perform query parallelisations based on storage sharding configuration and +# query ASTs. This feature is supported only by the chunks storage engine. +# CLI flag: -querier.parallelise-shardable-queries +[parallelise_shardable_queries: | default = false] +``` + +## `frontend_worker_config` + +The `frontend_worker_config` configures the worker - running within the Loki querier - picking up and executing queries enqueued by the query-frontend. + +```yaml +# Address of query frontend service, in host:port format. +# CLI flag: -querier.frontend-address +[frontend_address: | default = ""] + +# Number of simultaneous queries to process. +# CLI flag: -querier.worker-parallelism +[parallelism: | default = 10] + +# How often to query DNS. +# CLI flag: -querier.dns-lookup-period +[dns_lookup_duration: | default = 10s] + +# The CLI flags prefix for this block config is: querier.frontend-client +[grpc_client_config: ] +``` + ## ingester_client_config The `ingester_client_config` block configures how connections to ingesters @@ -227,10 +357,12 @@ operate. # Configures how connections are pooled pool_config: # Whether or not to do health checks. + # CLI flag: -distributor.health-check-ingesters [health_check_ingesters: | default = false] - # How frequently to clean up clients for servers that have gone away after + # How frequently to clean up clients for servers that have gone away after # a health check. + # CLI flag: -distributor.client-cleanup-period [client_cleanup_period: | default = 15s] # How quickly a dead client will be removed after it has been detected @@ -239,83 +371,113 @@ pool_config: [remotetimeout: ] # The remote request timeout on the client side. +# CLI flag: -ingester.client.healthcheck-timeout [remote_timeout: | default = 5s] -# Configures how the gRPC connection to ingesters work as a -# client. +# Configures how the gRPC connection to ingesters work as a client +# The CLI flags prefix for this block config is: ingester.client [grpc_client_config: ] ``` -### grpc_client_config - -The `grpc_client_config` block configures a client connection to a gRPC service. - -```yaml -# The maximum size in bytes the client can receive -[max_recv_msg_size: | default = 104857600] - -# The maximum size in bytes the client can send -[max_send_msg_size: | default = 16777216] - -# Whether or not messages should be compressed -[use_gzip_compression: | default = false] - -# Rate limit for gRPC client. 0 is disabled -[rate_limit: | default = 0] - -# Rate limit burst for gRPC client. -[rate_limit_burst: | default = 0] - -# Enable backoff and retry when a rate limit is hit. -[backoff_on_ratelimits: | default = false] - -# Configures backoff when enabled. -backoff_config: - # Minimum delay when backing off. - [min_period: | default = 100ms] - - # The maximum delay when backing off. - [max_period: | default = 10s] - - # Number of times to backoff and retry before failing. - [max_retries: | default = 10] -``` - ## ingester_config -The `ingester_config` block configures Ingesters. +The `ingester_config` block configures the Loki Ingesters. ```yaml # Configures how the lifecycle of the ingester will operate # and where it will register for discovery. -[lifecycler: ] +lifecycler: + ring: + kvstore: + # Backend storage to use for the ring. Supported values are: consul, etcd, + # inmemory, memberlist + # CLI flag: -ring.store + [store: | default = "consul"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -ring.prefix + [prefix: | default = "collectors/"] + + # The consul_config configures the consul client. + # CLI flag: + [consul: ] + + # The etcd_config configures the etcd client. + # CLI flag: + [etcd: ] + + # Configuration for Gossip memberlist. Only applies if store is "memberlist" + # CLI flag: + [memberlist: ] + + # The heartbeat timeout after which ingesters are skipped for reads/writes. + # CLI flag: -ring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # The number of ingesters to write to and read from. + # CLI flag: -distributor.replication-factor + [replication_factor: | default = 3] + + # The number of tokens the lifecycler will generate and put into the ring if + # it joined without transferring tokens from another lifecycler. + # CLI flag: -ingester.num-tokens + [num_tokens: | default = 128] + + # Period at which to heartbeat to the underlying ring. + # CLI flag: -ingester.heartbeat-period + [heartbeat_period: | default = 5s] + + # How long to wait to claim tokens and chunks from another member when + # that member is leaving. Will join automatically after the duration expires. + # CLI flag: -ingester.join-after + [join_after: | default = 0s] + + # Minimum duration to wait before becoming ready. This is to work around race + # conditions with ingesters exiting and updating the ring. + # CLI flag: -ingester.min-ready-duration + [min_ready_duration: | default = 1m] + + # Name of network interfaces to read addresses from. + # CLI flag: -ingester.lifecycler.interface + interface_names: + + - [ ... | default = ["eth0", "en0"]] + + # Duration to sleep before exiting to ensure metrics are scraped. + # CLI flag: -ingester.final-sleep + [final_sleep: | default = 30s] # Number of times to try and transfer chunks when leaving before # falling back to flushing to the store. Zero = no transfers are done. +# CLI flag: -ingester.max-transfer-retries [max_transfer_retries: | default = 10] # How many flushes can happen concurrently from each stream. +# CLI flag: -ingester.concurrent-flushes [concurrent_flushes: | default = 16] -# How often should the ingester see if there are any blocks -# to flush +# How often should the ingester see if there are any blocks to flush +# CLI flag: -ingester.flush-check-period [flush_check_period: | default = 30s] # The timeout before a flush is cancelled +# CLI flag: -ingester.flush-op-timeout [flush_op_timeout: | default = 10s] -# How long chunks should be retained in-memory after they've -# been flushed. +# How long chunks should be retained in-memory after they've been flushed. +# CLI flag: -ingester.chunks-retain-period [chunk_retain_period: | default = 15m] # How long chunks should sit in-memory with no updates before # being flushed if they don't hit the max block size. This means # that half-empty chunks will still be flushed after a certain # period as long as they receive no further activity. +# CLI flag: -ingester.chunks-idle-period [chunk_idle_period: | default = 30m] # The targeted _uncompressed_ size in bytes of a chunk block -# When this threshold is exceeded the head block will be cut and compressed inside the chunk +# When this threshold is exceeded the head block will be cut and compressed inside the chunk. +# CLI flag: -ingester.chunks-block-size [chunk_block_size: | default = 262144] # A target _compressed_ size in bytes for chunks. @@ -323,6 +485,7 @@ The `ingester_config` block configures Ingesters. # or significantly smaller if they get flushed for other reasons (e.g. chunk_idle_period) # The default value of 0 for this will create chunks with a fixed 10 blocks, # A non zero value will create chunks with a variable number of blocks to meet the target size. +# CLI flag: -ingester.chunk-target-size [chunk_target_size: | default = 0] # The compression algorithm to use for chunks. (supported: gzip, lz4, snappy) @@ -330,119 +493,82 @@ The `ingester_config` block configures Ingesters. # - `gzip` highest compression ratio but also slowest decompression speed. (144 kB per chunk) # - `lz4` fastest compression speed (188 kB per chunk) # - `snappy` fast and popular compression algorithm (272 kB per chunk) +# CLI flag: -ingester.chunk-encoding [chunk_encoding: | default = gzip] # Parameters used to synchronize ingesters to cut chunks at the same moment. # Sync period is used to roll over incoming entry to a new chunk. If chunk's utilization # isn't high enough (eg. less than 50% when sync_min_utilization is set to 0.5), then # this chunk rollover doesn't happen. +# CLI flag: -ingester.sync-period [sync_period: | default = 0] + +# CLI flag: -ingester.sync-min-utilization [sync_min_utilization: | Default = 0] # The maximum number of errors a stream will report to the user # when a push fails. 0 to make unlimited. +# CLI flag: -ingester.max-ignored-stream-errors [max_returned_stream_errors: | default = 10] # The maximum duration of a timeseries chunk in memory. If a timeseries runs for longer than this the current chunk will be flushed to the store and a new chunk created. +# CLI flag: -ingester.max-chunk-age [max_chunk_age: | default = 1h] # How far in the past an ingester is allowed to query the store for data. # This is only useful for running multiple loki binaries with a shared ring with a `filesystem` store which is NOT shared between the binaries # When using any "shared" object store like S3 or GCS this value must always be left as 0 -# It is an error to configure this to a non-zero value when using any object store other than `filesystem` +# It is an error to configure this to a non-zero value when using any object store other than `filesystem` # Use a value of -1 to allow the ingester to query the store infinitely far back in time. +# CLI flag: -ingester.query-store-max-look-back-period [query_store_max_look_back_period: | default = 0] - ``` -### lifecycler_config +## consul_config -The `lifecycler_config` is used by the Ingester to control how that ingester -registers itself into the ring and manages its lifecycle during its stay in the -ring. +The `consul_config` configures the consul client. The supported CLI flags used to reference this config block are: ```yaml -# Configures the ring the lifecycler connects to -[ring: ] - -# The number of tokens the lifecycler will generate and put into the ring if -# it joined without transferring tokens from another lifecycler. -[num_tokens: | default = 128] + # The hostname and port of Consul. +# CLI flag: -.consul.hostname +[host: | default = "localhost:8500"] -# Period at which to heartbeat to the underlying ring. -[heartbeat_period: | default = 5s] +# The ACL Token used to interact with Consul. +# CLI flag: -.consul.acl-token +[acl_token: ] -# How long to wait to claim tokens and chunks from another member when -# that member is leaving. Will join automatically after the duration expires. -[join_after: | default = 0s] +# The HTTP timeout when communicating with Consul +# CLI flag: -.consul.client-timeout +[http_client_timeout: | default = 20s] -# Minimum duration to wait before becoming ready. This is to work around race -# conditions with ingesters exiting and updating the ring. -[min_ready_duration: | default = 1m] - -# Name of network interfaces to read addresses from. -interface_names: - - [ ... | default = ["eth0", "en0"]] - -# Duration to sleep before exiting to ensure metrics are scraped. -[final_sleep: | default = 30s] +# Whether or not consistent reads to Consul are enabled. +# CLI flag: -.consul.consistent-reads +[consistent_reads: | default = true] ``` -### ring_config +## etcd_config -The `ring_config` is used to discover and connect to Ingesters. +The `etcd_config` configures the etcd client. The supported CLI flags used to reference this config block are: ```yaml -kvstore: - # The backend storage to use for the ring. Supported values are - # consul, etcd, inmemory, memberlist - store: - - # The prefix for the keys in the store. Should end with a /. - [prefix: | default = "collectors/"] - - # Configuration for a Consul client. Only applies if store - # is "consul" - consul: - # The hostname and port of Consul. - [host: | default = "localhost:8500"] - - # The ACL Token used to interact with Consul. - [acl_token: ] - - # The HTTP timeout when communicating with Consul - [http_client_timeout: | default = 20s] - - # Whether or not consistent reads to Consul are enabled. - [consistent_reads: | default = true] - - # Configuration for an ETCD v3 client. Only applies if - # store is "etcd" - etcd: - # The ETCD endpoints to connect to. - endpoints: - - +# The etcd endpoints to connect to. +# CLI flag: -.etcd.endpoints +[endpoints: | default = []] - # The Dial timeout for the ETCD connection. - [dial_timeout: | default = 10s] +# The dial timeout for the etcd connection. +# CLI flag: -.etcd.dial-timeout +[dial_timeout: | default = 10s] - # The maximum number of retries to do for failed ops to ETCD. - [max_retries: | default = 10] - -# The heartbeat timeout after which ingesters are skipped for -# reading and writing. -[heartbeat_timeout: | default = 1m] - -# The number of ingesters to write to and read from. Must be at least -# 1. -[replication_factor: | default = 3] +# The maximum number of retries to do for failed ops. +# CLI flag: -.etcd.max-retries +[max_retries: | default = 10] ``` ## memberlist_config The `memberlist_config` block configures the gossip ring to discover and connect between distributors, ingesters and queriers. The configuration is unique for all -three components to ensure a single shared ring. +three components to ensure a single shared ring. ```yaml # Name of the node in memberlist cluster. Defaults to hostname. @@ -545,138 +671,216 @@ three components to ensure a single shared ring. ## storage_config The `storage_config` block configures one of many possible stores for both the -index and chunks. Which configuration is read from depends on the schema_config -block and what is set for the store value. +index and chunks. Which configuration to be picked should be defined in schema_config +block. ```yaml # Configures storing chunks in AWS. Required options only required when aws is # present. aws: - # S3 or S3-compatible URL to connect to. If only region is specified as a - # host, the proper endpoint will be deduced. Use inmemory:/// to - # use a mock in-memory implementation. - s3: + # S3 or S3-compatible endpoint URL with escaped Key and Secret encoded. + # If only region is specified as a host, the proper endpoint will be deduced. + # Use inmemory:/// to use a mock in-memory implementation. + # CLI flag: -s3.url + [s3: ] # Set to true to force the request to use path-style addressing + # CLI flag: -s3.force-path-style [s3forcepathstyle: | default = false] + # Comma separated list of bucket names to evenly distribute chunks over. + # Overrides any buckets specified in s3.url flag + # CLI flag: -s3.buckets + [bucketnames: | default = ""] + + # S3 Endpoint to connect to. + # CLI flag: -s3.endpoint + [endpoint: | default = ""] + + # AWS region to use. + # CLI flag: -s3.region + [region: | default = ""] + + # AWS Access Key ID. + # CLI flag: -s3.access-key-id + [access_key_id: | default = ""] + + # AWS Secret Access Key. + # CLI flag: -s3.secret-access-key + [secret_access_key: | default = ""] + + # Disable https on S3 connection. + # CLI flag: -s3.insecure + [insecure: | default = false] + + # Enable AES256 AWS Server Side Encryption. + # CLI flag: -s3.sse-encryption + [sse_encryption: | default = false] + + http_config: + # The maximum amount of time an idle connection will be held open. + # CLI flag: -s3.http.idle-conn-timeout + [idle_conn_timeout: | default = 1m30s] + + # If non-zero, specifies the amount of time to wait for a server's response + # headers after fully writing the request. + # CLI flag: -s3.http.response-header-timeout + [response_header_timeout: | default = 0s] + + # Set to false to skip verifying the certificate chain and hostname. + # CLI flag: -s3.http.insecure-skip-verify + [insecure_skip_verify: | default = false] + # Configure the DynamoDB connection dynamodb: # URL for DynamoDB with escaped Key and Secret encoded. If only region is specified as a # host, the proper endpoint will be deduced. Use inmemory:/// to # use a mock in-memory implementation. + # CLI flag: -dynamodb.url dynamodb_url: # DynamoDB table management requests per-second limit. + # CLI flag: -dynamodb.api-limit [api_limit: | default = 2.0] # DynamoDB rate cap to back off when throttled. + # CLI flag: -dynamodb.throttle-limit [throttle_limit: | default = 10.0] # Metrics-based autoscaling configuration. metrics: # Use metrics-based autoscaling via this Prometheus query URL. + # CLI flag: -metrics.url [url: ] # Queue length above which we will scale up capacity. + # CLI flag: -metrics.target-queue-length [target_queue_length: | default = 100000] # Scale up capacity by this multiple + # CLI flag: -metrics.scale-up-factor [scale_up_factor: | default = 1.3] # Ignore throttling below this level (rate per second) + # CLI flag: -metrics.ignore-throttle-below [ignore_throttle_below: | default = 1] # Query to fetch ingester queue length + # CLI flag: -metrics.queue-length-query [queue_length_query: | default = "sum(avg_over_time(cortex_ingester_flush_queue_length{job="cortex/ingester"}[2m]))"] # Query to fetch throttle rates per table + # CLI flag: -metrics.write-throttle-query [write_throttle_query: | default = "sum(rate(cortex_dynamo_throttled_total{operation="DynamoDB.BatchWriteItem"}[1m])) by (table) > 0"] # Query to fetch write capacity usage per table + # CLI flag: -metrics.usage-query [write_usage_query: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.BatchWriteItem"}[15m])) by (table) > 0"] # Query to fetch read capacity usage per table + # CLI flag: -metrics.read-usage-query [read_usage_query: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.QueryPages"}[1h])) by (table) > 0"] # Query to fetch read errors per table + # CLI flag: -metrics.read-error-query [read_error_query: | default = "sum(increase(cortex_dynamo_failures_total{operation="DynamoDB.QueryPages",error="ProvisionedThroughputExceededException"}[1m])) by (table) > 0"] # Number of chunks to group together to parallelise fetches (0 to disable) + # CLI flag: -dynamodb.chunk-gang-size [chunk_gang_size: | default = 10] # Max number of chunk get operations to start in parallel. + # CLI flag: -dynamodb.chunk.get-max-parallelism [chunk_get_max_parallelism: | default = 32] # Configures storing chunks in Bigtable. Required fields only required # when bigtable is defined in config. bigtable: # BigTable project ID + # CLI flag: -bigtable.project project: # BigTable instance ID + # CLI flag: -bigtable.instance instance: # Configures the gRPC client used to connect to Bigtable. + # The CLI flags prefix for this block config is: bigtable [grpc_client_config: ] # Configures storing index in GCS. Required fields only required # when gcs is defined in config. gcs: # Name of GCS bucket to put chunks in. + # CLI flag: -gcs.bucketname bucket_name: # The size of the buffer that the GCS client uses for each PUT request. 0 # to disable buffering. + # CLI flag: -gcs.chunk-buffer-size [chunk_buffer_size: | default = 0] # The duration after which the requests to GCS should be timed out. + # CLI flag: -gcs.request-timeout [request_timeout: | default = 0s] # Configures storing chunks in Cassandra cassandra: # Comma-separated hostnames or IPs of Cassandra instances + # CLI flag: -cassandra.addresses addresses: # Port that cassandra is running on + # CLI flag: -cassandra.port [port: | default = 9042] # Keyspace to use in Cassandra + # CLI flag: -cassandra.keyspace keyspace: # Consistency level for Cassandra + # CLI flag: -cassandra.consistency [consistency: | default = "QUORUM"] # Replication factor to use in Cassandra. + # CLI flag: -cassandra.replication-factor [replication_factor: | default = 1] # Instruct the Cassandra driver to not attempt to get host # info from the system.peers table. + # CLI flag: -cassandra.disable-initial-host-lookup [disable_initial_host_lookup: | default = false] # Use SSL when connecting to Cassandra instances. + # CLI flag: -cassandra.ssl [SSL: | default = false] # Require SSL certificate validation when SSL is enabled. + # CLI flag: -cassandra.host-verification [host_verification: | default = true] - # Path to certificate file to verify the peer when SSL is - # enabled. + # Path to certificate file to verify the peer when SSL is enabled. + # CLI flag: -cassandra.ca-path [CA_path: ] # Enable password authentication when connecting to Cassandra. + # CLI flag: -cassandra.auth [auth: | default = false] # Username for password authentication when auth is true. + # CLI flag: -cassandra.username [username: ] # Password for password authentication when auth is true. + # CLI flag: -cassandra.password [password: ] # Timeout when connecting to Cassandra. + # CLI flag: -cassandra.timeout [timeout: | default = 600ms] # Initial connection timeout during initial dial to server. + # CLI flag: -cassandra.connect-timeout [connect_timeout: | default = 600ms] swift: @@ -743,131 +947,164 @@ swift: # required when boltdb is present in config. boltdb: # Location of BoltDB index files. + # CLI flag: -boltdb.dir directory: # Configures storing the chunks on the local filesystem. Required # fields only required when filesystem is present in config. filesystem: # Directory to store chunks in. + # CLI flag: -local.chunk-directory directory: # Cache validity for active index entries. Should be no higher than # the chunk_idle_period in the ingester settings. +# CLI flag: -store.index-cache-validity [index_cache_validity: | default = 5m] # The maximum number of chunks to fetch per batch. +# CLI flag: -store.max-chunk-batch-size [max_chunk_batch_size: | default = 50] -# Config for how the cache for index queries should -# be built. +# Config for how the cache for index queries should be built. +# The CLI flags prefix for this block config is: store.index-cache-read index_queries_cache_config: ``` -### cache_config +## chunk_store_config + +The `chunk_store_config` block configures how chunks will be cached and how long +to wait before saving them to the backing store. + +```yaml +# The cache configuration for storing chunks +# The CLI flags prefix for this block config is: store.chunks-cache +[chunk_cache_config: ] + +# The cache configuration for deduplicating writes +# The CLI flags prefix for this block config is: store.index-cache-write +[write_dedupe_cache_config: ] + +# The minimum time between a chunk update and being saved +# to the store. +[min_chunk_age: ] + +# Cache index entries older than this period. Default is disabled. +# CLI flag: -store.cache-lookups-older-than +[cache_lookups_older_than: ] + +# Limit how long back data can be queried. Default is disabled. +# This should always be set to a value less than or equal to +# what is set in `table_manager.retention_period` . +# CLI flag: -store.max-look-back-period +[max_look_back_period: ] +``` + +## cache_config The `cache_config` block configures how Loki will cache requests, chunks, and the index to a backing cache store. ```yaml # Enable in-memory cache. +# CLI flag: -.cache.enable-fifocache [enable_fifocache: ] # The default validity of entries for caches unless overridden. # NOTE In Loki versions older than 1.4.0 this was "defaul_validity". +# CLI flag: -.default-validity [default_validity: ] # Configures the background cache when memcached is used. background: # How many goroutines to use to write back to memcached. + # CLI flag: -.background.write-back-concurrency [writeback_goroutines: | default = 10] # How many chunks to buffer for background write back to memcached. + # CLI flagL -.background.write-back-buffer [writeback_buffer: = 10000] # Configures memcached settings. memcached: # Configures how long keys stay in memcached. + # CLI flag: -.memcached.expiration expiration: # Configures how many keys to fetch in each batch request. + # CLI flag: -.memcached.batchsize batch_size: # Maximum active requests to memcached. + # CLI flag: -.memcached.parallelism [parallelism: | default = 100] # Configures how to connect to one or more memcached servers. memcached_client: # The hostname to use for memcached services when caching chunks. If # empty, no memcached will be used. A SRV lookup will be used. + # CLI flag: -.memcached.hostname [host: ] # SRV service used to discover memcached servers. + # CLI flag: -.memcached.service [service: | default = "memcached"] # Maximum time to wait before giving up on memcached requests. + # CLI flag: -.memcached.timeout [timeout: | default = 100ms] - # The maximum number of idle connections in the memcached client - # pool. + # The maximum number of idle connections in the memcached client pool. + # CLI flag: -.memcached.max-idle-conns [max_idle_conns: | default = 100] # The period with which to poll the DNS for memcached servers. + # CLI flag: -.memcached.update-interval [update_interval: | default = 1m] - # Whether or not to use a consistent hash to discover multiple memcached - # servers. + # Whether or not to use a consistent hash to discover multiple memcached servers. + # CLI flag: -.memcached.consistent-hash [consistent_hash: ] redis: # Redis service endpoint to use when caching chunks. If empty, no redis will be used. + # CLI flag: -.redis.endpoint [endpoint: ] + # Maximum time to wait before giving up on redis requests. + # CLI flag: -.redis.timeout [timeout: | default = 100ms] + # How long keys stay in the redis. + # CLI flag: -.redis.expiration [expiration: | default = 0s] + # Maximum number of idle connections in pool. + # CLI flag: -.redis.max-idle-conns [max_idle_conns: | default = 80] + # Maximum number of active connections in pool. + # CLI flag: -.redis.max-active-conns [max_active_conns: | default = 0] + # Password to use when connecting to redis. + # CLI flag: -.redis.password [password: ] + # Enables connecting to redis with TLS. + # CLI flag: -.redis.enable-tls [enable_tls: | default = false] fifocache: # Number of entries to cache in-memory. + # CLI flag: -.fifocache.size [size: | default = 0] # The expiry duration for the in-memory cache. + # CLI flag: -.fifocache.duration [validity: | default = 0s] ``` -## chunk_store_config - -The `chunk_store_config` block configures how chunks will be cached and how long -to wait before saving them to the backing store. - -```yaml -# The cache configuration for storing chunks -[chunk_cache_config: ] - -# The cache configuration for deduplicating writes -[write_dedupe_cache_config: ] - -# The minimum time between a chunk update and being saved -# to the store. -[min_chunk_age: ] - -# Cache index entries older than this period. Default is -# disabled. -[cache_lookups_older_than: ] - -# Limit how long back data can be queried. Default is disabled. -# This should always be set to a value less than or equal to -# what is set in `table_manager.retention_period`. -[max_look_back_period: ] -``` - ## schema_config The `schema_config` block configures schemas from given dates. @@ -875,7 +1112,7 @@ The `schema_config` block configures schemas from given dates. ```yaml # The configuration for chunk index schemas. configs: - - [] +- [] ``` ### period_config @@ -887,12 +1124,13 @@ for from specific time periods. # The date of the first day that index buckets should be created. Use # a date in the past if this is your only period_config, otherwise # use a date when you want the schema to switch over. +# In YYYY-MM-DD format, for example: 2018-04-15. [from: ] # store and object_store below affect which key is # used. -# Which store to use for the index. Either aws, gcp, bigtable, bigtable-hashed, +# Which store to use for the index. Either aws, aws-dynamo, gcp, bigtable, bigtable-hashed, # cassandra, or boltdb. store: @@ -928,8 +1166,6 @@ chunks: [row_shards: | default = 16] ``` -Where `daytime` is a value in the format of `yyyy-mm-dd` like `2006-01-02`. - ## limits_config The `limits_config` block configures global and per-tenant limits for ingesting @@ -948,161 +1184,175 @@ logs in Loki. # replicas (it's automatically adjusted if the number of replicas change). # The global strategy requires the distributors to form their own ring, which # is used to keep track of the current number of healthy distributor replicas. +# CLI flag: -distributor.ingestion-rate-limit-strategy [ingestion_rate_strategy: | default = "local"] # Per-user ingestion rate limit in sample size per second. Units in MB. +# CLI flag: -distributor.ingestion-rate-limit-mb [ingestion_rate_mb: | default = 4] # Per-user allowed ingestion burst size (in sample size). Units in MB. # The burst size refers to the per-distributor local rate limiter even in the # case of the "global" strategy, and should be set at least to the maximum logs # size expected in a single push request. +# CLI flag: -distributor.ingestion-burst-size-mb [ingestion_burst_size_mb: | default = 6] # Maximum length of a label name. +# CLI flag: -validation.max-length-label-name [max_label_name_length: | default = 1024] # Maximum length of a label value. +# CLI flag: -validation.max-length-label-value [max_label_value_length: | default = 2048] # Maximum number of label names per series. +# CLI flag: -validation.max-label-names-per-series [max_label_names_per_series: | default = 30] # Whether or not old samples will be rejected. +# CLI flag: -validation.reject-old-samples [reject_old_samples: | default = false] # Maximum accepted sample age before rejecting. +# CLI flag: -validation.reject-old-samples.max-age [reject_old_samples_max_age: | default = 336h] # Duration for a table to be created/deleted before/after it's # needed. Samples won't be accepted before this time. +# CLI flag: -validation.create-grace-period [creation_grace_period: | default = 10m] # Enforce every sample has a metric name. +# CLI flag: -validation.enforce-metric-name [enforce_metric_name: | default = true] # Maximum number of active streams per user, per ingester. 0 to disable. +# CLI flag: -ingester.max-streams-per-user [max_streams_per_user: | default = 10000] # Maximum line size on ingestion path. Example: 256kb. # There is no limit when unset. +# CLI flag: -distributor.max-line-size [max_line_size: | default = none ] # Maximum number of log entries that will be returned for a query. 0 to disable. +# CLI flag: -validation.max-entries-limit [max_entries_limit_per_query: | default = 5000 ] # Maximum number of active streams per user, across the cluster. 0 to disable. # When the global limit is enabled, each ingester is configured with a dynamic # local limit based on the replication factor and the current number of healthy # ingesters, and is kept updated whenever the number of ingesters change. +# CLI flag: -ingester.max-global-streams-per-user [max_global_streams_per_user: | default = 0] # Maximum number of chunks that can be fetched by a single query. +# CLI flag: -store.query-chunk-limit [max_chunks_per_query: | default = 2000000] # The limit to length of chunk store queries. 0 to disable. +# CLI flag: -store.max-query-length [max_query_length: | default = 0] -# Maximum number of queries that will be scheduled in parallel by the -# frontend. +# Maximum number of queries that will be scheduled in parallel by the frontend. +# CLI flag: -querier.max-query-parallelism [max_query_parallelism: | default = 14] -# Cardinality limit for index queries +# Cardinality limit for index queries. +# CLI flag: -store.cardinality-limit [cardinality_limit: | default = 100000] # Maximum number of stream matchers per query. +# CLI flag: -querier.max-streams-matcher-per-query [max_streams_matchers_per_query: | default = 1000] -# Feature renamed to 'runtime configuration', flag deprecated in favor of -runtime-config.file (runtime_config.file in YAML) +# Feature renamed to 'runtime configuration', flag deprecated in favor of -runtime-config.file (runtime_config.file in YAML). +# CLI flag: -limits.per-user-override-config [per_tenant_override_config: ] -# Feature renamed to 'runtime configuration', flag deprecated in favor of -runtime-config.reload-period (runtime_config.period in YAML) +# Feature renamed to 'runtime configuration', flag deprecated in favor of -runtime-config.reload-period (runtime_config.period in YAML). +# CLI flag: -limits.per-user-override-period [per_tenant_override_period: | default = 10s] ``` -### `frontend_worker_config` +### grpc_client_config -The `frontend_worker_config` configures the worker - running within the Loki querier - picking up and executing queries enqueued by the query-frontend. +The `grpc_client_config` block configures a client connection to a gRPC service. ```yaml -# Address of query frontend service, in host:port format. -# CLI flag: -querier.frontend-address -[frontend_address: | default = ""] - -# Number of simultaneous queries to process. -# CLI flag: -querier.worker-parallelism -[parallelism: | default = 10] - -# How often to query DNS. -# CLI flag: -querier.dns-lookup-period -[dns_lookup_duration: | default = 10s] - -grpc_client_config: - # gRPC client max receive message size (bytes). - # CLI flag: -querier.frontend-client.grpc-max-recv-msg-size - [max_recv_msg_size: | default = 104857600] +# The maximum size in bytes the client can receive. +# CLI flag: -.grpc-max-recv-msg-size +[max_recv_msg_size: | default = 104857600] - # gRPC client max send message size (bytes). - # CLI flag: -querier.frontend-client.grpc-max-send-msg-size - [max_send_msg_size: | default = 16777216] +# The maximum size in bytes the client can send. +# CLI flag: -.grpc-max-send-msg-size +[max_send_msg_size: | default = 16777216] - # Use compression when sending messages. - # CLI flag: -querier.frontend-client.grpc-use-gzip-compression - [use_gzip_compression: | default = false] +# Whether or not messages should be compressed. +# CLI flag: -.grpc-use-gzip-compression +[use_gzip_compression: | default = false] - # Rate limit for gRPC client; 0 means disabled. - # CLI flag: -querier.frontend-client.grpc-client-rate-limit - [rate_limit: | default = 0] +# Rate limit for gRPC client. 0 is disabled. +# CLI flag: -.grpc-client-rate-limit +[rate_limit: | default = 0] - # Rate limit burst for gRPC client. - # CLI flag: -querier.frontend-client.grpc-client-rate-limit-burst - [rate_limit_burst: | default = 0] +# Rate limit burst for gRPC client. +# CLI flag: -.grpc-client-rate-limit-burst +[rate_limit_burst: | default = 0] - # Enable backoff and retry when we hit ratelimits. - # CLI flag: -querier.frontend-client.backoff-on-ratelimits - [backoff_on_ratelimits: | default = false] +# Enable backoff and retry when a rate limit is hit. +# CLI flag: -.backoff-on-ratelimits +[backoff_on_ratelimits: | default = false] - backoff_config: - # Minimum delay when backing off. - # CLI flag: -querier.frontend-client.backoff-min-period - [min_period: | default = 100ms] +# Configures backoff when enabled. +backoff_config: + # Minimum delay when backing off. + # CLI flag: -.backoff-min-period + [min_period: | default = 100ms] - # Maximum delay when backing off. - # CLI flag: -querier.frontend-client.backoff-max-period - [max_period: | default = 10s] + # The maximum delay when backing off. + # CLI flag: -.backoff-max-period + [max_period: | default = 10s] - # Number of times to backoff and retry before failing. - # CLI flag: -querier.frontend-client.backoff-retries - [max_retries: | default = 10] + # Number of times to backoff and retry before failing. + # CLI flag: -.backoff-retries + [max_retries: | default = 10] ``` ## table_manager_config -The `table_manager_config` block configures how the table manager operates -and how to provision tables when DynamoDB is used as the backing store. +The `table_manager_config` block configures the Loki table-manager. ```yaml -# Master 'off-switch' for table capacity updates, e.g. when troubleshooting +# Master 'off-switch' for table capacity updates, e.g. when troubleshooting. +# CLI flag: -table-manager.throughput-updates-disabled [throughput_updates_disabled: | default = false] -# Master 'on-switch' for table retention deletions +# Master 'on-switch' for table retention deletions. +# CLI flag: -table-manager.retention-deletes-enabled [retention_deletes_enabled: | default = false] # How far back tables will be kept before they are deleted. 0s disables # deletion. The retention period must be a multiple of the index / chunks # table "period" (see period_config). +# CLI flag: -table-manager.retention-period [retention_period: | default = 0s] # Period with which the table manager will poll for tables. +# CLI flag: -table-manager.poll-interval [poll_interval: | default = 2m] -# duration a table will be created before it is needed. +# Duration a table will be created before it is needed. +# CLI flag: -table-manager.periodic-table.grace-period [creation_grace_period: | default = 10m] # Configures management of the index tables for DynamoDB. +# The CLI flags prefix for this block config is: table-manager.index-table index_tables_provisioning: # Configures management of the chunk tables for DynamoDB. +# The CLI flags prefix for this block config is: table-manager.chunk-table chunk_tables_provisioning: ``` @@ -1113,40 +1363,52 @@ The `provision_config` block configures provisioning capacity for DynamoDB. ```yaml # Enables on-demand throughput provisioning for the storage # provider, if supported. Applies only to tables which are not autoscaled. +# CLI flag: -.enable-ondemand-throughput-mode [enable_ondemand_throughput_mode: | default = false] # DynamoDB table default write throughput. +# CLI flag: -.write-throughput [provisioned_write_throughput: | default = 3000] # DynamoDB table default read throughput. +# CLI flag: -.read-throughput [provisioned_read_throughput: | default = 300] # Enables on-demand throughput provisioning for the storage provide, # if supported. Applies only to tables which are not autoscaled. +# CLI flag: -.inactive-enable-ondemand-throughput-mode [enable_inactive_throughput_on_demand_mode: | default = false] # DynamoDB table write throughput for inactive tables. +# CLI flag: -.inactive-write-throughput [inactive_write_throughput: | default = 1] # DynamoDB table read throughput for inactive tables. +# CLI flag: -.inactive-read-throughput [inactive_read_throughput: | Default = 300] # Active table write autoscale config. +# The CLI flags prefix for this block config is: -.write-throughput [write_scale: ] # Inactive table write autoscale config. +# The CLI flags prefix for this block config is: -.inactive-write-throughput [inactive_write_scale: ] # Number of last inactive tables to enable write autoscale. +# CLI flag: -.enable-ondemand-throughput-mode [inactive_write_scale_lastn: ] # Active table read autoscale config. +# The CLI flags prefix for this block config is: -.read-throughput [read_scale: ] # Inactive table read autoscale config. +# The CLI flags prefix for this block config is: -.inactive-read-throughput [inactive_read_scale: ] # Number of last inactive tables to enable read autoscale. +# CLI flag: -.enable-ondemand-throughput-mode [inactive_read_scale_lastn: ] ``` @@ -1156,24 +1418,31 @@ The `auto_scaling_config` block configures autoscaling for DynamoDB. ```yaml # Whether or not autoscaling should be enabled. +# CLI flag: -.scale.enabled [enabled: : default = false] -# AWS AutoScaling role ARN +# AWS AutoScaling role ARN. +# CLI flag: -.scale.role-arn [role_arn: ] # DynamoDB minimum provision capacity. +# CLI flag: -.scale.min-capacity [min_capacity: | default = 3000] # DynamoDB maximum provision capacity. +# CLI flag: -.scale.max-capacity [max_capacity: | default = 6000] # DynamoDB minimum seconds between each autoscale up. +# CLI flag: -.scale.out-cooldown [out_cooldown: | default = 1800] # DynamoDB minimum seconds between each autoscale down. +# CLI flag: -.scale.in-cooldown [in_cooldown: | default = 1800] # DynamoDB target ratio of consumed capacity to provisioned capacity. +# CLI flag: -.scale.target-value [target: | default = 80] ``` @@ -1183,6 +1452,7 @@ The `tracing_config` block configures tracing for Jaeger. Currently limited to d ```yaml # Whether or not tracing should be enabled. +# CLI flag: -tracing.enabled [enabled: : default = true] ``` diff --git a/docs/configuration/examples.md b/docs/configuration/examples.md index abfb4961cb2e4..b40e152fc54dc 100644 --- a/docs/configuration/examples.md +++ b/docs/configuration/examples.md @@ -5,7 +5,8 @@ 3. [Cassandra Index](#cassandra-index) 4. [AWS](#aws) 5. [Almost zero dependencies setup with Memberlist and BoltDB Shipper](#almost-zero-dependencies-setup) -6. [Using the query-frontend](#query-frontend) +6. [Schema config to migrate to new changes such as store, schema, index period etc..](#schema_config) +7. [Using the query-frontend](#query-frontend) ## Complete Local config @@ -136,7 +137,7 @@ storage_config: ### S3-compatible APIs -S3-compatible APIs (e.g., Ceph Object Storage with an S3-compatible API) can be +S3-compatible APIs (e.g. Ceph Object Storage with an S3-compatible API) can be used. If the API supports path-style URL rather than virtual hosted bucket addressing, configure the URL in `storage_config` with the custom endpoint: @@ -147,13 +148,33 @@ storage_config: s3forcepathstyle: true ``` +### S3 Expanded Config + +S3 config now supports expanded config. Either `s3` endpoint URL can be used +or expanded config can be used. + +```yaml +storage_config: + aws: + endpoint: s3.endpoint.com + region: s3_region + access_key_id: s3_access_key_id + secret_access_key: s3_secret_access_key + insecure: false + sse_encryption: false + http_config: + idle_conn_timeout: 90s + response_header_timeout: 0s + insecure_skip_verify: false + s3forcepathstyle: true +``` + ## Almost zero dependencies setup This is a configuration to deploy Loki depending only on storage solution, e.g. an S3-compatible API like minio. The ring configuration is based on the gossip memberlist and the index is shipped to storage via [boltdb-shipper](../operations/storage/boltdb-shipper.md). - ```yaml auth_enabled: false @@ -218,6 +239,31 @@ limits_config: ``` +## schema_config + +```yaml +configs: + # Starting from 2018-04-15 Loki should store indexes on Cassandra + # using weekly periodic tables and chunks on filesystem. + # The index tables will be prefixed with "index_". + - from: "2018-04-15" + store: cassandra + object_store: filesystem + schema: v11 + index: + period: 168h + prefix: index_ + + # Starting from 2020-6-15 we moved from filesystem to AWS S3 for storing the chunks. + - from: "2020-06-15" + store: cassandra + object_store: s3 + schema: v11 + index: + period: 168h + prefix: index_ +``` + ## Query Frontend [example configuration](./query-frontend.md) diff --git a/docs/operations/upgrade.md b/docs/operations/upgrade.md index 707ed2e53d07d..2e4ee23efa703 100644 --- a/docs/operations/upgrade.md +++ b/docs/operations/upgrade.md @@ -6,11 +6,36 @@ Unfortunately Loki is software and software is hard and sometimes things are not On this page we will document any upgrade issues/gotchas/considerations we are aware of. + +## Master / Unreleased + +Configuration document has been re-orderd a bit and for all the config, corresponding `CLI` flag is +provided. + +S3 config now supports exapnded config. Example can be found here [s3_expanded_config](../configuration/examples.md#s3-expanded-config) + +### Breaking CLI flags changes + +```diff +- querier.query_timeout ++ querier.query-timeout + +- distributor.extra-query-delay ++ querier.extra-query-delay + +- max-chunk-batch-size ++ store.max-chunk-batch-size + +- ingester.concurrent-flushed ++ ingester.concurrent-flushes +``` + ## 1.6.0 A new ingester GRPC API has been added allowing to speed up metric queries, to ensure a rollout without query errors make sure you upgrade all ingesters first. Once this is done you can then proceed with the rest of the deployment, this is to ensure that queriers won't look for an API not yet available. + ## 1.5.0 Note: The required upgrade path outlined for version 1.4.0 below is still true for moving to 1.5.0 from any release older than 1.4.0 (e.g. 1.3.0->1.5.0 needs to also look at the 1.4.0 upgrade requirements). diff --git a/go.mod b/go.mod index bb3d895aacc16..9366f9c4da633 100644 --- a/go.mod +++ b/go.mod @@ -9,9 +9,9 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.2.1-0.20200702073552-0ea5a8b50b19 + github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker v17.12.0-ce-rc1.0.20200621004740-33fba35d42e7+incompatible + github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 github.com/dustin/go-humanize v1.0.0 @@ -34,17 +34,17 @@ require ( github.com/influxdata/go-syslog/v3 v3.0.1-0.20200510134747-836dce2cf6da github.com/jmespath/go-jmespath v0.3.0 github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible - github.com/json-iterator/go v1.1.9 + github.com/json-iterator/go v1.1.10 github.com/klauspost/compress v1.9.5 github.com/mitchellh/mapstructure v1.2.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 github.com/pierrec/lz4 v2.5.3-0.20200429092203-e876bbd321b3+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55 + github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.10.0 - github.com/prometheus/prometheus v1.8.2-0.20200626180636-d17d88935c8d + github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd diff --git a/go.sum b/go.sum index 85b0005dd9241..eb0a06caa2330 100644 --- a/go.sum +++ b/go.sum @@ -232,12 +232,10 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.2.1-0.20200702073552-0ea5a8b50b19 h1:7CFJdvhTn18bXjZgYWBCRMiAzv9YqkkGLK0E52qbNDc= -github.com/cortexproject/cortex v1.2.1-0.20200702073552-0ea5a8b50b19/go.mod h1:gnN56a4DbpodOjxbAS2KEa4BcCi0/XnLaE4g0v0ySVE= +github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d h1:L20hHQCwVNj8eQDCvTmYA3Q8Xai8ZH5LcpgTiMuOWK8= +github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d/go.mod h1:9Iy6tOOITdQb5Q8Boj9ke/q7AyztcJlnmgpSUW/k1eM= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= @@ -268,8 +266,8 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20200621004740-33fba35d42e7+incompatible h1:Nl7OFCKJMMUFC03JjpxhNf7pqvz0MIY/5xCqOt9EHeI= -github.com/docker/docker v17.12.0-ce-rc1.0.20200621004740-33fba35d42e7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA= @@ -738,6 +736,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= @@ -872,8 +872,6 @@ github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQz github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.0.0-20200611042045-63b9a826fb74 h1:kvRIeqJNICemq2UFLx8q/Pj+1IRNZS0XPTaMFkuNsvg= -github.com/moby/term v0.0.0-20200611042045-63b9a826fb74/go.mod h1:pJ0Ot5YGdTcMdxnPMyGCfAr6fKXe0g9cDlz16MuFEBE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1003,6 +1001,8 @@ github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55 h1:ADHic9K/n5JQDFx/OAQbxkwR+uPJ9oYmN0taBMyYrBo= github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1041,14 +1041,16 @@ github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLk github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/prometheus/prometheus v1.8.2-0.20200619100132-74207c04655e/go.mod h1:QV6T0PPQi5UFmqcLBJw3JiyIR8r1O7KEv9qlVw4VV40= -github.com/prometheus/prometheus v1.8.2-0.20200626180636-d17d88935c8d h1:deRGhuK2zxxqo6jZ29NX86tfhvmENcFq3Zj330eYWaM= -github.com/prometheus/prometheus v1.8.2-0.20200626180636-d17d88935c8d/go.mod h1:q8Yr0fMOK/s586U4qeOTHsb5PTO58GQU8b5lg/931/Q= +github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c h1:Iz2q3wgo4xiURb7Ku0MCrM7osAVHX03lF1vHNht1fb8= +github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1474,7 +1476,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1649,8 +1650,6 @@ gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8 h1:jL/vaozO53FMfZLySWM+4nulF gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 965166a8d6804..399e215711cf4 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -74,7 +74,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.LifecyclerConfig.RegisterFlags(f) f.IntVar(&cfg.MaxTransferRetries, "ingester.max-transfer-retries", 10, "Number of times to try and transfer chunks before falling back to flushing. If set to 0 or negative value, transfers are disabled.") - f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushed", 16, "") + f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 16, "") f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-check-period", 30*time.Second, "") f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Second, "") f.DurationVar(&cfg.RetainPeriod, "ingester.chunks-retain-period", 15*time.Minute, "") diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 05913455abe83..70547362faa0f 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -3,6 +3,7 @@ package logql import ( "context" "errors" + "flag" "math" "sort" "time" @@ -67,9 +68,14 @@ type EngineOpts struct { MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` } +func (opts *EngineOpts) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.DurationVar(&opts.Timeout, prefix+".engine.timeout", 5*time.Minute, "Timeout for query execution.") + f.DurationVar(&opts.MaxLookBackPeriod, prefix+".engine.max-lookback-period", 30*time.Second, "The maximum amount of time to look back for log lines. Used only for instant log queries.") +} + func (opts *EngineOpts) applyDefault() { if opts.Timeout == 0 { - opts.Timeout = 3 * time.Minute + opts.Timeout = 5 * time.Minute } if opts.MaxLookBackPeriod == 0 { opts.MaxLookBackPeriod = 30 * time.Second diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 4d73d9368afbe..d3a07a97f0ba1 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -50,9 +50,10 @@ type Config struct { // RegisterFlags register flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.Engine.RegisterFlagsWithPrefix("querier", f) f.DurationVar(&cfg.TailMaxDuration, "querier.tail-max-duration", 1*time.Hour, "Limit the duration for which live tailing request would be served") - f.DurationVar(&cfg.QueryTimeout, "querier.query_timeout", 1*time.Minute, "Timeout when querying backends (ingesters or storage) during the execution of a query request") - f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") + f.DurationVar(&cfg.QueryTimeout, "querier.query-timeout", 1*time.Minute, "Timeout when querying backends (ingesters or storage) during the execution of a query request") + f.DurationVar(&cfg.ExtraQueryDelay, "querier.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 484397e100da4..b433ebf03969d 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -295,7 +295,7 @@ func TestQuerier_validateQueryRequest(t *testing.T) { request.Start = request.End.Add(-3 * time.Minute) _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) - require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, length > limit (3m0s > 2m0s)"), err) + require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "the query time range exceeds the limit (query length: 3m0s, limit: 2m0s)"), err) } func TestQuerier_SeriesAPI(t *testing.T) { diff --git a/pkg/storage/store.go b/pkg/storage/store.go index c14970766f62e..abe396ea2952f 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -33,7 +33,7 @@ type Config struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Config.RegisterFlags(f) cfg.BoltDBShipperConfig.RegisterFlags(f) - f.IntVar(&cfg.MaxChunkBatchSize, "max-chunk-batch-size", 50, "The maximum number of chunks to fetch per batch.") + f.IntVar(&cfg.MaxChunkBatchSize, "store.max-chunk-batch-size", 50, "The maximum number of chunks to fetch per batch.") } // Store is the Loki chunk store to retrieve and save chunks. diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 31f1a3ac09b1e..0000000000000 --- a/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - "sync/atomic" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} -) - -// TraceLevel is the log level for tracing. Trace level is lower than debug level, -// and is usually used to trace detailed behavior of the program. -const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time is always the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// ParseLevel takes a string level and returns the Logrus log level constant. -// It supports trace level. -func ParseLevel(lvl string) (logrus.Level, error) { - if lvl == "trace" { - return TraceLevel, nil - } - return logrus.ParseLevel(lvl) -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L - } - - return logger.(*logrus.Entry) -} - -// Trace logs a message at level Trace with the log entry passed-in. -func Trace(e *logrus.Entry, args ...interface{}) { - level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) - if level >= TraceLevel { - e.Debug(args...) - } -} - -// Tracef logs a message at level Trace with the log entry passed-in. -func Tracef(e *logrus.Entry, format string, args ...interface{}) { - level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) - if level >= TraceLevel { - e.Debugf(format, args...) - } -} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go deleted file mode 100644 index 3ad22a10d0ce1..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ /dev/null @@ -1,229 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import specs "github.com/opencontainers/image-spec/specs-go/v1" - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) -// For ARMv7, will also match ARMv6 and ARMv5 -// For ARMv6, will also match ARMv5 -func Only(platform specs.Platform) MatchComparer { - platform = Normalize(platform) - if platform.Architecture == "arm" { - if platform.Variant == "v8" { - return orderedPlatformComparer{ - matchers: []Matcher{ - &matcher{ - Platform: platform, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v7", - }, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v6", - }, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v5", - }, - }, - }, - } - } - if platform.Variant == "v7" { - return orderedPlatformComparer{ - matchers: []Matcher{ - &matcher{ - Platform: platform, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v6", - }, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v5", - }, - }, - }, - } - } - if platform.Variant == "v6" { - return orderedPlatformComparer{ - matchers: []Matcher{ - &matcher{ - Platform: platform, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v5", - }, - }, - }, - } - } - } - - return singlePlatformComparer{ - Matcher: &matcher{ - Platform: platform, - }, - } -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type singlePlatformComparer struct { - Matcher -} - -func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { - return c.Match(p1) && !c.Match(p2) -} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go deleted file mode 100644 index 69b336d67f772..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "os" - "runtime" - "strings" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/pkg/errors" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -var cpuVariant string - -func init() { - if isArmArch(runtime.GOARCH) { - cpuVariant = getCPUVariant() - } else { - cpuVariant = "" - } -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" { - // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - switch variant { - case "8", "AArch64": - variant = "v8" - case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6TEJ": - variant = "v6" - case "5", "5T", "5TE", "5TEJ": - variant = "v5" - case "4", "4T": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go deleted file mode 100644 index 6ede94061eb88..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64": - arch = "amd64" - variant = "" - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go deleted file mode 100644 index a14d80e58cbf7..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultString returns the default string specifier for the platform. -func DefaultString() string { - return Format(DefaultSpec()) -} - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant, - } -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go deleted file mode 100644 index e8a7d5ffa0d69..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go deleted file mode 100644 index 0defbd36c0424..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: "amd64", - }) -} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go deleted file mode 100644 index 77d3f184ec1b8..0000000000000 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/containerd/containerd/errdefs" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) -) - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return Format(m.Platform) -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `||/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) - } - - parts := strings.Split(specifier, "/") - - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) - } - } - - var p specs.Platform - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - p.OS = normalizeOS(parts[0]) - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant != "v7" { - p.Variant = cpuVariant - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) - case 2: - // In this case, we treat as a regular os/arch pair. We don't care - // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - - return platform -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/compat.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/compat.go new file mode 100644 index 0000000000000..5dd6ef898cc0e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/compat.go @@ -0,0 +1,32 @@ +package alerts + +import "errors" + +var ( + ErrNotFound = errors.New("alertmanager config not found") +) + +// ToProto transforms a yaml Alertmanager config and map of template files to an AlertConfigDesc +func ToProto(cfg string, templates map[string]string, user string) (AlertConfigDesc, error) { + tmpls := []*TemplateDesc{} + for fn, body := range templates { + tmpls = append(tmpls, &TemplateDesc{ + Body: body, + Filename: fn, + }) + } + return AlertConfigDesc{ + User: user, + RawConfig: cfg, + Templates: tmpls, + }, nil +} + +// ParseTemplates returns a alertmanager config object +func ParseTemplates(cfg AlertConfigDesc) map[string]string { + templates := map[string]string{} + for _, t := range cfg.Templates { + templates[t.Filename] = t.Body + } + return templates +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go index 92dd964d9af95..939e96d48ebd7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go @@ -2,11 +2,15 @@ package configdb import ( "context" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" + "errors" "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/configs/client" + "github.com/cortexproject/cortex/pkg/configs/userconfig" +) + +var ( + errReadOnly = errors.New("configdb alertmanager config storage is read-only") ) // Store is a concrete implementation of RuleStore that sources rules from the config service @@ -59,3 +63,29 @@ func (c *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertCo return c.alertConfigs, nil } + +// GetAlertConfig finds and returns the AlertManager configuration of an user. +func (c *Store) GetAlertConfig(ctx context.Context, user string) (alerts.AlertConfigDesc, error) { + + // Refresh the local state before fetching an specific one. + _, err := c.ListAlertConfigs(ctx) + if err != nil { + return alerts.AlertConfigDesc{}, err + } + + cfg, exists := c.alertConfigs[user] + + if !exists { + return alerts.AlertConfigDesc{}, alerts.ErrNotFound + } + + return cfg, nil +} + +func (c *Store) SetAlertConfig(ctx context.Context, cfg alerts.AlertConfigDesc) error { + return errReadOnly +} + +func (c *Store) DeleteAlertConfig(ctx context.Context, user string) error { + return errReadOnly +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go index df8010e6d5cf2..c069d45106ff3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go @@ -14,6 +14,10 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts" ) +var ( + errReadOnly = errors.New("local alertmanager config storage is read-only") +) + // StoreConfig configures a static file alertmanager store type StoreConfig struct { Path string `yaml:"path"` @@ -76,3 +80,26 @@ func (f *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertCo return configs, nil } + +func (f *Store) GetAlertConfig(ctx context.Context, user string) (alerts.AlertConfigDesc, error) { + cfgs, err := f.ListAlertConfigs(ctx) + if err != nil { + return alerts.AlertConfigDesc{}, err + } + + cfg, exists := cfgs[user] + + if !exists { + return alerts.AlertConfigDesc{}, alerts.ErrNotFound + } + + return cfg, nil +} + +func (f *Store) SetAlertConfig(ctx context.Context, cfg alerts.AlertConfigDesc) error { + return errReadOnly +} + +func (f *Store) DeleteAlertConfig(ctx context.Context, user string) error { + return errReadOnly +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go new file mode 100644 index 0000000000000..52e2e19895801 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go @@ -0,0 +1,97 @@ +package objectclient + +import ( + "bytes" + "context" + "io/ioutil" + "path" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/chunk" +) + +// Object Alert Storage Schema +// ======================= +// Object Name: "alerts/" +// Storage Format: Encoded AlertConfigDesc + +const ( + alertPrefix = "alerts/" +) + +// AlertStore allows cortex alertmanager configs to be stored using an object store backend. +type AlertStore struct { + client chunk.ObjectClient +} + +// NewAlertStore returns a new AlertStore +func NewAlertStore(client chunk.ObjectClient) *AlertStore { + return &AlertStore{ + client: client, + } +} + +// ListAlertConfigs returns all of the active alert configs in this store +func (a *AlertStore) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + objs, _, err := a.client.List(ctx, alertPrefix) + if err != nil { + return nil, err + } + + cfgs := map[string]alerts.AlertConfigDesc{} + + for _, obj := range objs { + cfg, err := a.getAlertConfig(ctx, obj.Key) + if err != nil { + return nil, err + } + cfgs[cfg.User] = cfg + } + + return cfgs, nil +} + +func (a *AlertStore) getAlertConfig(ctx context.Context, key string) (alerts.AlertConfigDesc, error) { + reader, err := a.client.GetObject(ctx, key) + if err != nil { + return alerts.AlertConfigDesc{}, err + } + + buf, err := ioutil.ReadAll(reader) + if err != nil { + return alerts.AlertConfigDesc{}, err + } + + config := alerts.AlertConfigDesc{} + err = config.Unmarshal(buf) + if err != nil { + return alerts.AlertConfigDesc{}, err + } + + return config, nil +} + +// GetAlertConfig returns a specified user's alertmanager configuration +func (a *AlertStore) GetAlertConfig(ctx context.Context, user string) (alerts.AlertConfigDesc, error) { + cfg, err := a.getAlertConfig(ctx, path.Join(alertPrefix, user)) + if err == chunk.ErrStorageObjectNotFound { + return cfg, alerts.ErrNotFound + } + + return cfg, err +} + +// SetAlertConfig sets a specified user's alertmanager configuration +func (a *AlertStore) SetAlertConfig(ctx context.Context, cfg alerts.AlertConfigDesc) error { + cfgBytes, err := cfg.Marshal() + if err != nil { + return err + } + + return a.client.PutObject(ctx, path.Join(alertPrefix, cfg.User), bytes.NewReader(cfgBytes)) +} + +// DeleteAlertConfig deletes a specified user's alertmanager configuration +func (a *AlertStore) DeleteAlertConfig(ctx context.Context, user string) error { + return a.client.DeleteObject(ctx, path.Join(alertPrefix, user)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go new file mode 100644 index 0000000000000..e1ec2216e852f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go @@ -0,0 +1,120 @@ +package alertmanager + +import ( + "fmt" + "io/ioutil" + "net/http" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/util" + + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/user" + "gopkg.in/yaml.v2" +) + +const ( + errMarshallingYAML = "error marshalling YAML Alertmanager config" + errReadingConfiguration = "unable to read the Alertmanager config" + errStoringConfiguration = "unable to store the Alertmanager config" + errDeletingConfiguration = "unable to delete the Alertmanager config" + errNoOrgID = "unable to determine the OrgID" +) + +// UserConfig is used to communicate a users alertmanager configs +type UserConfig struct { + TemplateFiles map[string]string `yaml:"template_files"` + AlertmanagerConfig string `yaml:"alertmanager_config"` +} + +func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http.Request) { + logger := util.WithContext(r.Context(), am.logger) + + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) + return + } + + cfg, err := am.store.GetAlertConfig(r.Context(), userID) + if err != nil { + if err == alerts.ErrNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + d, err := yaml.Marshal(&UserConfig{ + TemplateFiles: alerts.ParseTemplates(cfg), + AlertmanagerConfig: cfg.RawConfig, + }) + + if err != nil { + level.Error(logger).Log("msg", errMarshallingYAML, "err", err, "user", userID) + http.Error(w, fmt.Sprintf("%s: %s", errMarshallingYAML, err.Error()), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/yaml") + if _, err := w.Write(d); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http.Request) { + logger := util.WithContext(r.Context(), am.logger) + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) + return + } + + payload, err := ioutil.ReadAll(r.Body) + if err != nil { + level.Error(logger).Log("msg", errReadingConfiguration, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errReadingConfiguration, err.Error()), http.StatusBadRequest) + return + } + + cfg := &UserConfig{} + err = yaml.Unmarshal(payload, cfg) + if err != nil { + level.Error(logger).Log("msg", errMarshallingYAML, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errMarshallingYAML, err.Error()), http.StatusBadRequest) + return + } + + cfgDesc, _ := alerts.ToProto(cfg.AlertmanagerConfig, cfg.TemplateFiles, userID) + err = am.store.SetAlertConfig(r.Context(), cfgDesc) + if err != nil { + level.Error(logger).Log("msg", errStoringConfiguration, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errStoringConfiguration, err.Error()), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusCreated) +} + +func (am *MultitenantAlertmanager) DeleteUserConfig(w http.ResponseWriter, r *http.Request) { + logger := util.WithContext(r.Context(), am.logger) + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) + return + } + + err = am.store.DeleteAlertConfig(r.Context(), userID) + if err != nil { + level.Error(logger).Log("msg", errDeletingConfiguration, "err", err.Error()) + http.Error(w, fmt.Sprintf("%s: %s", errDeletingConfiguration, err.Error()), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go index 92c129b5fc3fc..4a13ff1c2420f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go @@ -101,6 +101,8 @@ type MultitenantAlertmanagerConfig struct { AutoWebhookRoot string `yaml:"auto_webhook_root"` Store AlertStoreConfig `yaml:"storage"` + + EnableAPI bool `yaml:"enable_api"` } const defaultClusterAddr = "0.0.0.0:9094" @@ -121,6 +123,8 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.Peers, "cluster.peer", "Initial peers (may be repeated).") f.DurationVar(&cfg.PeerTimeout, "cluster.peer-timeout", time.Second*15, "Time to wait between peers to send notifications.") + f.BoolVar(&cfg.EnableAPI, "experimental.alertmanager.enable-api", false, "Enable the experimental alertmanager config api.") + cfg.Store.RegisterFlags(f) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go index 7a94b67611429..8808cdb28d07e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go @@ -8,12 +8,19 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb" "github.com/cortexproject/cortex/pkg/alertmanager/alerts/local" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient" + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/aws" + "github.com/cortexproject/cortex/pkg/chunk/gcp" "github.com/cortexproject/cortex/pkg/configs/client" ) // AlertStore stores and configures users rule configs type AlertStore interface { ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) + GetAlertConfig(ctx context.Context, user string) (alerts.AlertConfigDesc, error) + SetAlertConfig(ctx context.Context, cfg alerts.AlertConfigDesc) error + DeleteAlertConfig(ctx context.Context, user string) error } // AlertStoreConfig configures the alertmanager backend @@ -21,13 +28,19 @@ type AlertStoreConfig struct { Type string `yaml:"type"` ConfigDB client.Config `yaml:"configdb"` Local local.StoreConfig `yaml:"local"` + + GCS gcp.GCSConfig `yaml:"gcs"` + S3 aws.S3Config `yaml:"s3"` } // RegisterFlags registers flags. func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.Local.RegisterFlags(f) cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager.", f) - f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"local\".") + f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"gcs\", \"s3\", \"local\".") + + cfg.GCS.RegisterFlagsWithPrefix("alertmanager.storage.", f) + cfg.S3.RegisterFlagsWithPrefix("alertmanager.storage.", f) } // NewAlertStore returns a new rule storage backend poller and store @@ -41,7 +54,18 @@ func NewAlertStore(cfg AlertStoreConfig) (AlertStore, error) { return configdb.NewStore(c), nil case "local": return local.NewStore(cfg.Local) + case "gcs": + return newObjAlertStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCS, "")) + case "s3": + return newObjAlertStore(aws.NewS3ObjectClient(cfg.S3, "")) default: - return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: \"configdb\", \"local\"", cfg.Type) + return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: azure, configdb, gcs, local, s3", cfg.Type) + } +} + +func newObjAlertStore(client chunk.ObjectClient, err error) (AlertStore, error) { + if err != nil { + return nil, err } + return objectclient.NewAlertStore(client), nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index d81cc2f75ea46..c70f0a8b9248a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -130,7 +130,7 @@ func fakeRemoteAddr(handler http.Handler) http.Handler { // RegisterAlertmanager registers endpoints associated with the alertmanager. It will only // serve endpoints using the legacy http-prefix if it is not run as a single binary. -func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target bool) { +func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target, apiEnabled bool) { // Ensure this route is registered before the prefixed AM route a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false) @@ -144,6 +144,13 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, tar a.RegisterRoute("/status", am.GetStatusHandler(), false) a.RegisterRoutesWithPrefix(a.cfg.LegacyHTTPPrefix, am, true) } + + // MultiTenant Alertmanager Experimental API routes + if apiEnabled { + a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.GetUserConfig), true, "GET") + a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.SetUserConfig), true, "POST") + a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.DeleteUserConfig), true, "DELETE") + } } // RegisterAPI registers the standard endpoints associated with a running Cortex. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 9744d3bf0da3b..944ad2dab75a5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -2,18 +2,22 @@ package aws import ( "context" + "crypto/tls" "flag" - "fmt" "hash/fnv" "io" + "net" "net/http" "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" awscommon "github.com/weaveworks/common/aws" "github.com/weaveworks/common/instrument" @@ -38,8 +42,23 @@ func init() { // S3Config specifies config for storing chunks on AWS S3. type S3Config struct { S3 flagext.URLValue - BucketNames string S3ForcePathStyle bool + + BucketNames string + Endpoint string `yaml:"endpoint"` + Region string `yaml:"region"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` + Insecure bool `yaml:"insecure"` + SSEEncryption bool `yaml:"sse_encryption"` + HTTPConfig HTTPConfig `yaml:"http_config"` +} + +// HTTPConfig stores the http.Transport configuration +type HTTPConfig struct { + IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"` + ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -53,45 +72,134 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { "If only region is specified as a host, proper endpoint will be deduced. Use inmemory:/// to use a mock in-memory implementation.") f.BoolVar(&cfg.S3ForcePathStyle, prefix+"s3.force-path-style", false, "Set this to `true` to force the request to use path-style addressing.") f.StringVar(&cfg.BucketNames, prefix+"s3.buckets", "", "Comma separated list of bucket names to evenly distribute chunks over. Overrides any buckets specified in s3.url flag") + + f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "S3 Endpoint to connect to.") + f.StringVar(&cfg.Region, prefix+"s3.region", "", "AWS region to use.") + f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "AWS Access Key ID") + f.StringVar(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "", "AWS Secret Access Key") + f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "Disable https on s3 connection.") + f.BoolVar(&cfg.SSEEncryption, prefix+"s3.sse-encryption", false, "Enable AES256 AWS Server Side Encryption") + + f.DurationVar(&cfg.HTTPConfig.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The maximum amount of time an idle connection will be held open.") + f.DurationVar(&cfg.HTTPConfig.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 0, "If non-zero, specifies the amount of time to wait for a server's response headers after fully writing the request.") + f.BoolVar(&cfg.HTTPConfig.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "Set to false to skip verifying the certificate chain and hostname.") } type S3ObjectClient struct { - bucketNames []string - S3 s3iface.S3API - delimiter string + bucketNames []string + S3 s3iface.S3API + delimiter string + sseEncryption *string } // NewS3ObjectClient makes a new S3-backed ObjectClient. func NewS3ObjectClient(cfg S3Config, delimiter string) (*S3ObjectClient, error) { - if cfg.S3.URL == nil { - return nil, fmt.Errorf("no URL specified for S3") - } - s3Config, err := awscommon.ConfigFromURL(cfg.S3.URL) + s3Config, bucketNames, err := buildS3Config(cfg) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to build s3 config") } - s3Config = s3Config.WithS3ForcePathStyle(cfg.S3ForcePathStyle) // support for Path Style S3 url if has the flag - - s3Config = s3Config.WithMaxRetries(0) // We do our own retries, so we can monitor them - s3Config = s3Config.WithHTTPClient(&http.Client{Transport: defaultTransport}) sess, err := session.NewSession(s3Config) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to create new s3 session") } + s3Client := s3.New(sess) - bucketNames := []string{strings.TrimPrefix(cfg.S3.URL.Path, "/")} - if cfg.BucketNames != "" { - bucketNames = strings.Split(cfg.BucketNames, ",") // comma separated list of bucket names + + var sseEncryption *string + if cfg.SSEEncryption { + sseEncryption = aws.String("AES256") } + client := S3ObjectClient{ - S3: s3Client, - bucketNames: bucketNames, - delimiter: delimiter, + S3: s3Client, + bucketNames: bucketNames, + delimiter: delimiter, + sseEncryption: sseEncryption, } return &client, nil } +func buildS3Config(cfg S3Config) (*aws.Config, []string, error) { + var s3Config *aws.Config + var err error + + // if an s3 url is passed use it to initialize the s3Config and then override with any additional params + if cfg.S3.URL != nil { + s3Config, err = awscommon.ConfigFromURL(cfg.S3.URL) + if err != nil { + return nil, nil, err + } + } else { + s3Config = &aws.Config{} + s3Config = s3Config.WithRegion("dummy") + s3Config = s3Config.WithCredentials(credentials.AnonymousCredentials) + } + + s3Config = s3Config.WithMaxRetries(0) // We do our own retries, so we can monitor them + s3Config = s3Config.WithS3ForcePathStyle(cfg.S3ForcePathStyle) // support for Path Style S3 url if has the flag + + if cfg.Endpoint != "" { + s3Config = s3Config.WithEndpoint(cfg.Endpoint) + } + + if cfg.Insecure { + s3Config = s3Config.WithDisableSSL(true) + } + + if cfg.Region != "" { + s3Config = s3Config.WithRegion(cfg.Region) + } + + if cfg.AccessKeyID != "" && cfg.SecretAccessKey == "" || + cfg.AccessKeyID == "" && cfg.SecretAccessKey != "" { + return nil, nil, errors.New("must supply both an Access Key ID and Secret Access Key or neither") + } + + if cfg.AccessKeyID != "" && cfg.SecretAccessKey != "" { + creds := credentials.NewStaticCredentials(cfg.AccessKeyID, cfg.SecretAccessKey, "") + s3Config = s3Config.WithCredentials(creds) + } + + // While extending S3 configuration this http config was copied in order to + // to maintain backwards compatibility with previous versions of Cortex while providing + // more flexible configuration of the http client + // https://github.com/weaveworks/common/blob/4b1847531bc94f54ce5cf210a771b2a86cd34118/aws/config.go#L23 + s3Config = s3Config.WithHTTPClient(&http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: cfg.HTTPConfig.IdleConnTimeout, + MaxIdleConnsPerHost: 100, + TLSHandshakeTimeout: 3 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ResponseHeaderTimeout: time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout), + TLSClientConfig: &tls.Config{InsecureSkipVerify: cfg.HTTPConfig.InsecureSkipVerify}, + }, + }) + + // bucketnames + var bucketNames []string + if cfg.S3.URL != nil { + bucketNames = []string{strings.TrimPrefix(cfg.S3.URL.Path, "/")} + } + + if cfg.BucketNames != "" { + bucketNames = strings.Split(cfg.BucketNames, ",") // comma separated list of bucket names + } + + if len(bucketNames) == 0 { + return nil, nil, errors.New("at least one bucket name must be specified") + } + + return s3Config, bucketNames, nil +} + // Stop fulfills the chunk.ObjectClient interface func (a *S3ObjectClient) Stop() {} @@ -160,9 +268,10 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { return instrument.CollectedRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := a.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{ - Body: object, - Bucket: aws.String(a.bucketFromKey(objectKey)), - Key: aws.String(objectKey), + Body: object, + Bucket: aws.String(a.bucketFromKey(objectKey)), + Key: aws.String(objectKey), + ServerSideEncryption: a.sseEncryption, }) return err }) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go index 2e966a659a971..5d2df4a4f60da 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go @@ -120,8 +120,7 @@ type Purger struct { // we would only allow processing of singe delete request at a time since delete requests touching same chunks could change the chunk IDs of partially deleted chunks // and break the purge plan for other requests - inProcessRequests map[string]DeleteRequest - inProcessRequestIDsMtx sync.RWMutex + inProcessRequests *inProcessRequestsCollection // We do not want to limit pulling new delete requests to a fixed interval which otherwise would limit number of delete requests we process per user. // While loading delete requests if we find more requests from user pending to be processed, we just set their id in usersWithPendingRequests and @@ -149,7 +148,7 @@ func NewPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, sto pullNewRequestsChan: make(chan struct{}, 1), executePlansChan: make(chan deleteRequestWithLogger, 50), workerJobChan: make(chan workerJob, 50), - inProcessRequests: map[string]DeleteRequest{}, + inProcessRequests: newInProcessRequestsCollection(), usersWithPendingRequests: map[string]struct{}{}, pendingPlansCount: map[string]int{}, } @@ -231,9 +230,7 @@ func (p *Purger) workerJobCleanup(job workerJob) { delete(p.pendingPlansCount, job.deleteRequestID) p.pendingPlansCountMtx.Unlock() - p.inProcessRequestIDsMtx.Lock() - delete(p.inProcessRequests, job.userID) - p.inProcessRequestIDsMtx.Unlock() + p.inProcessRequests.remove(job.userID) // request loading of more delete request if // - user has more pending requests and @@ -367,12 +364,13 @@ func (p *Purger) loadInprocessDeleteRequests() error { return err } - for _, deleteRequest := range requestsWithBuildingPlanStatus { + for i := range requestsWithBuildingPlanStatus { + deleteRequest := requestsWithBuildingPlanStatus[i] req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) level.Info(req.logger).Log("msg", "loaded in process delete requests with status building plan") - p.inProcessRequests[deleteRequest.UserID] = deleteRequest err := p.buildDeletePlan(req) if err != nil { p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() @@ -389,11 +387,12 @@ func (p *Purger) loadInprocessDeleteRequests() error { return err } - for _, deleteRequest := range requestsWithDeletingStatus { + for i := range requestsWithDeletingStatus { + deleteRequest := requestsWithDeletingStatus[i] req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) level.Info(req.logger).Log("msg", "loaded in process delete requests with status deleting") - p.inProcessRequests[deleteRequest.UserID] = deleteRequest + p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) p.executePlansChan <- req } @@ -408,22 +407,21 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { return err } - p.inProcessRequestIDsMtx.RLock() - pendingDeleteRequestsCount := len(p.inProcessRequests) - p.inProcessRequestIDsMtx.RUnlock() - + pendingDeleteRequestsCount := p.inProcessRequests.len() now := model.Now() oldestPendingRequestCreatedAt := model.Time(0) // requests which are still being processed are also considered pending if pendingDeleteRequestsCount != 0 { - oldestInProcessRequest := p.getOldestInProcessRequest() + oldestInProcessRequest := p.inProcessRequests.getOldest() if oldestInProcessRequest != nil { oldestPendingRequestCreatedAt = oldestInProcessRequest.CreatedAt } } - for _, deleteRequest := range deleteRequests { + for i := range deleteRequests { + deleteRequest := deleteRequests[i] + // adding an extra minute here to avoid a race between cancellation of request and picking of the request for processing if deleteRequest.CreatedAt.Add(p.cfg.DeleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) { continue @@ -434,11 +432,7 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { oldestPendingRequestCreatedAt = deleteRequest.CreatedAt } - p.inProcessRequestIDsMtx.RLock() - inprocessDeleteRequest, ok := p.inProcessRequests[deleteRequest.UserID] - p.inProcessRequestIDsMtx.RUnlock() - - if ok { + if inprocessDeleteRequest := p.inProcessRequests.get(deleteRequest.UserID); inprocessDeleteRequest != nil { p.usersWithPendingRequestsMtx.Lock() p.usersWithPendingRequests[deleteRequest.UserID] = struct{}{} p.usersWithPendingRequestsMtx.Unlock() @@ -454,10 +448,7 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { return err } - p.inProcessRequestIDsMtx.Lock() - p.inProcessRequests[deleteRequest.UserID] = deleteRequest - p.inProcessRequestIDsMtx.Unlock() - + p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) level.Info(req.logger).Log("msg", "building plan for a new delete request") @@ -599,20 +590,6 @@ func (p *Purger) removeDeletePlan(ctx context.Context, userID, requestID string, return p.objectClient.DeleteObject(ctx, objectKey) } -func (p *Purger) getOldestInProcessRequest() *DeleteRequest { - p.inProcessRequestIDsMtx.RLock() - defer p.inProcessRequestIDsMtx.RUnlock() - - var oldestRequest *DeleteRequest - for _, request := range p.inProcessRequests { - if oldestRequest == nil || request.CreatedAt.Before(oldestRequest.CreatedAt) { - oldestRequest = &request - } - } - - return oldestRequest -} - // returns interval per plan func splitByDay(start, end model.Time) []model.Interval { numOfDays := numPlans(start, end) @@ -714,3 +691,56 @@ func makeDeleteRequestWithLogger(deleteRequest DeleteRequest, l log.Logger) dele logger := log.With(l, "user_id", deleteRequest.UserID, "request_id", deleteRequest.RequestID) return deleteRequestWithLogger{deleteRequest, logger} } + +// inProcessRequestsCollection stores DeleteRequests which are in process by each user. +// Currently we only allow processing of one delete request per user so it stores single DeleteRequest per user. +type inProcessRequestsCollection struct { + requests map[string]*DeleteRequest + mtx sync.RWMutex +} + +func newInProcessRequestsCollection() *inProcessRequestsCollection { + return &inProcessRequestsCollection{requests: map[string]*DeleteRequest{}} +} + +func (i *inProcessRequestsCollection) set(userID string, request *DeleteRequest) { + i.mtx.Lock() + defer i.mtx.Unlock() + + i.requests[userID] = request +} + +func (i *inProcessRequestsCollection) get(userID string) *DeleteRequest { + i.mtx.RLock() + defer i.mtx.RUnlock() + + return i.requests[userID] +} + +func (i *inProcessRequestsCollection) remove(userID string) { + i.mtx.Lock() + defer i.mtx.Unlock() + + delete(i.requests, userID) +} + +func (i *inProcessRequestsCollection) len() int { + i.mtx.RLock() + defer i.mtx.RUnlock() + + return len(i.requests) +} + +func (i *inProcessRequestsCollection) getOldest() *DeleteRequest { + i.mtx.RLock() + defer i.mtx.RUnlock() + + var oldestRequest *DeleteRequest + for _, request := range i.requests { + if oldestRequest == nil || request.CreatedAt.Before(oldestRequest.CreatedAt) { + oldestRequest = request + } + } + + return oldestRequest +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go index 05343c37c8237..da0c1209f1792 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" "flag" "fmt" "net/http" @@ -21,6 +22,10 @@ import ( tls_cfg "github.com/cortexproject/cortex/pkg/util/tls" ) +var ( + errBadURL = errors.New("configs_api_url is not set or valid") +) + // Config says where we can find the ruler userconfig. type Config struct { ConfigsAPIURL flagext.URLValue `yaml:"configs_api_url"` @@ -54,6 +59,11 @@ type Client interface { // New creates a new ConfigClient. func New(cfg Config) (*ConfigDBClient, error) { + + if cfg.ConfigsAPIURL.URL == nil { + return nil, errBadURL + } + client := &ConfigDBClient{ URL: cfg.ConfigsAPIURL.URL, Timeout: cfg.ClientTimeout, diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index d2be3e29438d2..ad2dbc8dbc8ac 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -169,7 +169,7 @@ func (t *Cortex) initDistributor() (serv services.Service, err error) { } func (t *Cortex) initQuerier() (serv services.Service, err error) { - queryable, engine := querier.New(t.Cfg.Querier, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) // Prometheus histograms for requests to the querier. querierRequestDuration := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ @@ -254,10 +254,6 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, r return querier.NewChunkStoreQueryable(cfg.Querier, chunkStore), nil case storage.StorageEngineTSDB: - if !cfg.TSDB.StoreGatewayEnabled { - return querier.NewBlockQueryable(cfg.TSDB, cfg.Server.LogLevel, reg) - } - // When running in single binary, if the blocks sharding is disabled and no custom // store-gateway address has been configured, we can set it to the running process. if cfg.Target == All && !cfg.StoreGateway.ShardingEnabled && cfg.Querier.StoreGatewayAddresses == "" { @@ -455,7 +451,7 @@ func (t *Cortex) initTableManager() (services.Service, error) { func (t *Cortex) initRuler() (serv services.Service, err error) { t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - queryable, engine := querier.New(t.Cfg.Querier, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger) if err != nil { @@ -487,7 +483,8 @@ func (t *Cortex) initAlertManager() (serv services.Service, err error) { if err != nil { return } - t.API.RegisterAlertmanager(t.Alertmanager, t.Cfg.Target == AlertManager) + + t.API.RegisterAlertmanager(t.Alertmanager, t.Cfg.Target == AlertManager, t.Cfg.Alertmanager.EnableAPI) return t.Alertmanager, nil } @@ -590,11 +587,11 @@ func (t *Cortex) setupModuleManager() error { Store: {Overrides, DeleteRequestsStore}, Ingester: {Overrides, Store, API, RuntimeConfig, MemberlistKV}, Flusher: {Store, API}, - Querier: {Distributor, Store, Ring, API, StoreQueryable}, + Querier: {Overrides, Distributor, Store, Ring, API, StoreQueryable}, StoreQueryable: {Store}, QueryFrontend: {API, Overrides, DeleteRequestsStore}, TableManager: {API}, - Ruler: {Distributor, Store, StoreQueryable}, + Ruler: {Overrides, Distributor, Store, StoreQueryable}, Configs: {API}, AlertManager: {API}, Compactor: {API}, diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index c26642fc29b07..da73d0f0decd4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -155,9 +155,9 @@ type Config struct { // for testing ingesterClientFactory ring_client.PoolFactory `yaml:"-"` - // when true the distributor does not validate labels at ingest time, Cortex doesn't directly use + // when true the distributor does not validate the label name, Cortex doesn't directly use // this (and should never use it) but this feature is used by other projects built on top of it - SkipLabelValidation bool `yaml:"-"` + SkipLabelNameValidation bool `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -336,10 +336,8 @@ func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica // Returns the validated series with it's labels/samples, and any error. func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, userID string) (client.PreallocTimeseries, error) { labelsHistogram.Observe(float64(len(ts.Labels))) - if !d.cfg.SkipLabelValidation { - if err := validation.ValidateLabels(d.limits, userID, ts.Labels); err != nil { - return emptyPreallocSeries, err - } + if err := validation.ValidateLabels(d.limits, userID, ts.Labels, d.cfg.SkipLabelNameValidation); err != nil { + return emptyPreallocSeries, err } metricName, _ := extract.MetricNameFromLabelAdapters(ts.Labels) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go index 73f619163fbd3..331a4a53db01f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go @@ -276,6 +276,11 @@ func ReuseSlice(ts []PreallocTimeseries) { // ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. func ReuseTimeseries(ts *TimeSeries) { + // Name and Value may point into a large gRPC buffer, so clear the reference to allow GC + for i := 0; i < len(ts.Labels); i++ { + ts.Labels[i].Name = "" + ts.Labels[i].Value = "" + } ts.Labels = ts.Labels[:0] ts.Samples = ts.Samples[:0] timeSeriesPool.Put(ts) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go index f3718f7437916..5cb7648b030dd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go @@ -1,122 +1,19 @@ package querier import ( - "context" "math" "sort" - "github.com/go-kit/kit/log/level" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/logging" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/spanlogger" ) -// BlockQueryable is a storage.Queryable implementation for blocks storage -type BlockQueryable struct { - services.Service - - us *BucketStoresService -} - -// NewBlockQueryable returns a client to query a block store -func NewBlockQueryable(cfg tsdb.Config, logLevel logging.Level, registerer prometheus.Registerer) (*BlockQueryable, error) { - util.WarnExperimentalUse("Blocks storage engine") - bucketClient, err := tsdb.NewBucketClient(context.Background(), cfg, "querier", util.Logger, registerer) - if err != nil { - return nil, err - } - - us, err := NewBucketStoresService(cfg, bucketClient, logLevel, util.Logger, registerer) - if err != nil { - return nil, err - } - - b := &BlockQueryable{us: us} - b.Service = services.NewIdleService(b.starting, b.stopping) - - return b, nil -} - -func (b *BlockQueryable) starting(ctx context.Context) error { - return errors.Wrap(services.StartAndAwaitRunning(ctx, b.us), "failed to start BucketStoresService") -} - -func (b *BlockQueryable) stopping(_ error) error { - return errors.Wrap(services.StopAndAwaitTerminated(context.Background(), b.us), "stopping BucketStoresService") -} - -// Querier returns a new Querier on the storage. -func (b *BlockQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - if s := b.State(); s != services.Running { - return nil, promql.ErrStorage{Err: errors.Errorf("BlockQueryable is not running: %v", s)} - } - - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, promql.ErrStorage{Err: err} - } - - return &blocksQuerier{ - ctx: ctx, - mint: mint, - maxt: maxt, - userID: userID, - userStores: b.us, - }, nil -} - -type blocksQuerier struct { - ctx context.Context - mint, maxt int64 - userID string - userStores *BucketStoresService -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (b *blocksQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(b.ctx, "blocksQuerier.Select") - defer log.Span.Finish() - - mint, maxt := b.mint, b.maxt - if sp != nil { - mint, maxt = sp.Start, sp.End - } - converted := convertMatchersToLabelMatcher(matchers) - - // Returned series are sorted. - // No processing of responses is done here. Dealing with multiple responses - // for the same series and overlapping chunks is done in blockQuerierSeriesSet. - series, warnings, err := b.userStores.Series(ctx, b.userID, &storepb.SeriesRequest{ - MinTime: mint, - MaxTime: maxt, - Matchers: converted, - PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, - }) - if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) - } - - level.Debug(log).Log("series", len(series), "warnings", len(warnings)) - - return &blockQuerierSeriesSet{ - series: series, - warnings: warnings, - } -} - func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMatcher { var converted []storepb.LabelMatcher for _, m := range matchers { @@ -141,21 +38,6 @@ func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMa return converted } -func (b *blocksQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { - // Cortex doesn't use this. It will ask ingesters for metadata. - return nil, nil, errors.New("not implemented") -} - -func (b *blocksQuerier) LabelNames() ([]string, storage.Warnings, error) { - // Cortex doesn't use this. It will ask ingesters for metadata. - return nil, nil, errors.New("not implemented") -} - -func (b *blocksQuerier) Close() error { - // nothing to do here. - return nil -} - // Implementation of storage.SeriesSet, based on individual responses from store client. type blockQuerierSeriesSet struct { series []*storepb.Series diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go deleted file mode 100644 index e40c388e0b07c..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go +++ /dev/null @@ -1,116 +0,0 @@ -package querier - -import ( - "context" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/logging" - "google.golang.org/grpc/metadata" - - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// BucketStoresService wraps BucketStores into a service which triggers both the initial -// sync at startup and a periodic sync honoring configured the sync interval. -type BucketStoresService struct { - services.Service - - cfg tsdb.Config - logger log.Logger - stores *storegateway.BucketStores -} - -func NewBucketStoresService(cfg tsdb.Config, bucketClient objstore.Bucket, logLevel logging.Level, logger log.Logger, registerer prometheus.Registerer) (*BucketStoresService, error) { - stores, err := storegateway.NewBucketStores(cfg, nil, bucketClient, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "querier"}, registerer)) - if err != nil { - return nil, err - } - - s := &BucketStoresService{ - cfg: cfg, - stores: stores, - logger: logger, - } - - s.Service = services.NewBasicService(s.starting, s.syncStoresLoop, nil) - - return s, nil -} - -func (s *BucketStoresService) starting(ctx context.Context) error { - if s.cfg.BucketStore.SyncInterval > 0 { - // Run an initial blocks sync, required in order to be able to serve queries. - if err := s.stores.InitialSync(ctx); err != nil { - return err - } - } - - return nil -} - -// syncStoresLoop periodically calls SyncBlocks() to synchronize the blocks for all tenants. -func (s *BucketStoresService) syncStoresLoop(ctx context.Context) error { - // If the sync is disabled we never sync blocks, which means the bucket store - // will be empty and no series will be returned once queried. - if s.cfg.BucketStore.SyncInterval <= 0 { - <-ctx.Done() - return nil - } - - syncInterval := s.cfg.BucketStore.SyncInterval - - // Since we've just run the initial sync, we should wait the next - // sync interval before resynching. - select { - case <-ctx.Done(): - return nil - case <-time.After(syncInterval): - } - - err := runutil.Repeat(syncInterval, ctx.Done(), func() error { - level.Info(s.logger).Log("msg", "synchronizing TSDB blocks for all users") - if err := s.stores.SyncBlocks(ctx); err != nil { - level.Warn(s.logger).Log("msg", "failed to synchronize TSDB blocks", "err", err) - } else { - level.Info(s.logger).Log("msg", "successfully synchronized TSDB blocks for all users") - } - - return nil - }) - - // This should never occur because the rununtil.Repeat() returns error - // only if the callback function returns error (which doesn't), but since - // we have to handle the error because of the linter, it's better to log it. - return errors.Wrap(err, "blocks synchronization has been halted due to an unexpected error") -} - -// Series makes a series request to the underlying user bucket store. -func (s *BucketStoresService) Series(ctx context.Context, userID string, req *storepb.SeriesRequest) ([]*storepb.Series, storage.Warnings, error) { - // Inject the user ID into the context metadata, as expected by BucketStores. - ctx = setUserIDToGRPCContext(ctx, userID) - - srv := storegateway.NewBucketStoreSeriesServer(ctx) - err := s.stores.Series(req, srv) - if err != nil { - return nil, nil, err - } - - return srv.SeriesSet, srv.Warnings, nil -} - -func setUserIDToGRPCContext(ctx context.Context, userID string) context.Context { - // We have to store it in the incoming metadata because we have to emulate the - // case it's coming from a gRPC request, while here we're running everything in-memory. - return metadata.NewIncomingContext(ctx, metadata.Pairs(tsdb.TenantIDExternalLabel, userID)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index ad578d00657ff..634fbd410dfe4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -4,6 +4,7 @@ import ( "context" "errors" "flag" + "fmt" "strings" "time" @@ -26,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/tls" + "github.com/cortexproject/cortex/pkg/util/validation" ) // Config contains the configuration require to create a querier @@ -132,7 +134,7 @@ func NewChunkStoreQueryable(cfg Config, chunkStore chunkstore.ChunkStore) storag } // New builds a queryable and promql engine. -func New(cfg Config, distributor Distributor, stores []QueryableWithFilter, tombstonesLoader *purger.TombstonesLoader, reg prometheus.Registerer) (storage.SampleAndChunkQueryable, *promql.Engine) { +func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, tombstonesLoader *purger.TombstonesLoader, reg prometheus.Registerer) (storage.SampleAndChunkQueryable, *promql.Engine) { iteratorFunc := getChunksIteratorFunction(cfg) distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterStreaming, iteratorFunc, cfg.QueryIngestersWithin) @@ -145,7 +147,7 @@ func New(cfg Config, distributor Distributor, stores []QueryableWithFilter, tomb } } - queryable := NewQueryable(distributorQueryable, ns, iteratorFunc, cfg, tombstonesLoader) + queryable := NewQueryable(distributorQueryable, ns, iteratorFunc, cfg, limits, tombstonesLoader) lazyQueryable := storage.QueryableFunc(func(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { querier, err := queryable.Querier(ctx, mint, maxt) @@ -205,7 +207,7 @@ type QueryableWithFilter interface { } // NewQueryable creates a new Queryable for cortex. -func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, chunkIterFn chunkIteratorFunc, cfg Config, tombstonesLoader *purger.TombstonesLoader) storage.Queryable { +func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, chunkIterFn chunkIteratorFunc, cfg Config, limits *validation.Overrides, tombstonesLoader *purger.TombstonesLoader) storage.Queryable { return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { now := time.Now() @@ -226,6 +228,7 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, maxt: maxt, chunkIterFn: chunkIterFn, tombstonesLoader: tombstonesLoader, + limits: limits, } dqr, err := distributor.Querier(ctx, mint, maxt) @@ -268,6 +271,7 @@ type querier struct { mint, maxt int64 tombstonesLoader *purger.TombstonesLoader + limits *validation.Overrides } // Select implements storage.Querier interface. @@ -293,7 +297,14 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) } - tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, model.Time(sp.Start), model.Time(sp.End)) + // Validate query time range. + startTime := model.Time(sp.Start) + endTime := model.Time(sp.End) + if maxQueryLength := q.limits.MaxQueryLength(userID); maxQueryLength > 0 && endTime.Sub(startTime) > maxQueryLength { + return storage.ErrSeriesSet(fmt.Errorf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) + } + + tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime) if err != nil { return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) } @@ -302,7 +313,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat seriesSet := q.queriers[0].Select(true, sp, matchers...) if tombstones.Len() != 0 { - seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: model.Time(sp.Start), End: model.Time(sp.End)}) + seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: startTime, End: endTime}) } return seriesSet @@ -331,7 +342,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat seriesSet := q.mergeSeriesSets(result) if tombstones.Len() != 0 { - seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: model.Time(sp.Start), End: model.Time(sp.End)}) + seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: startTime, End: endTime}) } return seriesSet } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go index 5aecb8c98e0a7..a7720434b27c2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go @@ -14,6 +14,21 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" ) +const ( + // Primary is a role to use KV store primarily. + Primary = role("primary") + // Secondary is a role for KV store used by "multi" KV store. + Secondary = role("secondary") +) + +// The role type indicates a role of KV store. +type role string + +// Labels method returns Prometheus labels relevant to itself. +func (r *role) Labels() prometheus.Labels { + return prometheus.Labels{"role": string(*r)} +} + // The NewInMemoryKVClient returned by NewClient() is a singleton, so // that distributors and ingesters started in the same process can // find themselves. @@ -104,10 +119,10 @@ func NewClient(cfg Config, codec codec.Codec, reg prometheus.Registerer) (Client return cfg.Mock, nil } - return createClient(cfg.Store, cfg.Prefix, cfg.StoreConfig, codec, reg) + return createClient(cfg.Store, cfg.Prefix, cfg.StoreConfig, codec, Primary, reg) } -func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Codec, reg prometheus.Registerer) (Client, error) { +func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Codec, role role, reg prometheus.Registerer) (Client, error) { var client Client var err error @@ -139,6 +154,10 @@ func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Co case "multi": client, err = buildMultiClient(cfg, codec, reg) + // This case is for testing. The mock KV client does not do anything internally. + case "mock": + client, err = buildMockClient() + default: return nil, fmt.Errorf("invalid KV store type: %s", backend) } @@ -151,20 +170,15 @@ func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Co client = PrefixClient(client, prefix) } - // If no Registerer is provided return the raw client + // If no Registerer is provided return the raw client. if reg == nil { return client, nil } - return newMetricsClient(backend, client, reg), nil + return newMetricsClient(backend, client, prometheus.WrapRegistererWith(role.Labels(), reg)), nil } func buildMultiClient(cfg StoreConfig, codec codec.Codec, reg prometheus.Registerer) (Client, error) { - var ( - primaryLabel = prometheus.Labels{"role": "primary"} - secondaryLabel = prometheus.Labels{"role": "secondary"} - ) - if cfg.Multi.Primary == "" || cfg.Multi.Secondary == "" { return nil, fmt.Errorf("primary or secondary store not set") } @@ -175,12 +189,12 @@ func buildMultiClient(cfg StoreConfig, codec codec.Codec, reg prometheus.Registe return nil, fmt.Errorf("primary and secondary stores must be different") } - primary, err := createClient(cfg.Multi.Primary, "", cfg, codec, prometheus.WrapRegistererWith(primaryLabel, reg)) + primary, err := createClient(cfg.Multi.Primary, "", cfg, codec, Primary, reg) if err != nil { return nil, err } - secondary, err := createClient(cfg.Multi.Secondary, "", cfg, codec, prometheus.WrapRegistererWith(secondaryLabel, reg)) + secondary, err := createClient(cfg.Multi.Secondary, "", cfg, codec, Secondary, reg) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go new file mode 100644 index 0000000000000..c899b634326ea --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go @@ -0,0 +1,40 @@ +package kv + +import ( + "context" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/util" +) + +// The mockClient does not anything. +// This is used for testing only. +type mockClient struct{} + +func buildMockClient() (Client, error) { + level.Warn(util.Logger).Log("msg", "created mockClient for testing only") + return mockClient{}, nil +} + +func (m mockClient) List(ctx context.Context, prefix string) ([]string, error) { + return []string{}, nil +} + +func (m mockClient) Get(ctx context.Context, key string) (interface{}, error) { + return "", nil +} + +func (m mockClient) Delete(ctx context.Context, key string) error { + return nil +} + +func (m mockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { + return nil +} + +func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +} + +func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 40cc70e4e6765..75315a71c72b9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -70,7 +70,6 @@ type Config struct { HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"` StripeSize int `yaml:"stripe_size"` WALCompressionEnabled bool `yaml:"wal_compression_enabled"` - StoreGatewayEnabled bool `yaml:"store_gateway_enabled"` FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"` // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup @@ -134,7 +133,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Dir, "experimental.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") f.Var(&cfg.BlockRanges, "experimental.tsdb.block-ranges-period", "TSDB blocks range period.") - f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give queriers enough time to discover newly uploaded blocks.") + f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.") f.DurationVar(&cfg.ShipInterval, "experimental.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") f.IntVar(&cfg.ShipConcurrency, "experimental.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") f.StringVar(&cfg.Backend, "experimental.tsdb.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) @@ -144,7 +143,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") f.IntVar(&cfg.StripeSize, "experimental.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") f.BoolVar(&cfg.WALCompressionEnabled, "experimental.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") - f.BoolVar(&cfg.StoreGatewayEnabled, "experimental.tsdb.store-gateway-enabled", false, "True if the Cortex cluster is running the store-gateway service and the querier should query the bucket store via the store-gateway.") f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.tsdb.flush-blocks-on-shutdown", false, "If true, and transfer of blocks on shutdown fails or is disabled, incomplete blocks are flushed to storage instead. If false, incomplete blocks will be reused after restart, and uploaded when finished.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go index 4f1afceb47a80..9f4db46651669 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go @@ -8,10 +8,10 @@ import ( "github.com/thanos-io/thanos/pkg/store/storepb" ) -// BucketStoreSeriesServer is an fake in-memory gRPC server used to +// bucketStoreSeriesServer is an fake in-memory gRPC server used to // call Thanos BucketStore.Series() without having to go through the // gRPC networking stack. -type BucketStoreSeriesServer struct { +type bucketStoreSeriesServer struct { // This field just exist to pseudo-implement the unused methods of the interface. storepb.Store_SeriesServer @@ -21,11 +21,11 @@ type BucketStoreSeriesServer struct { Warnings storage.Warnings } -func NewBucketStoreSeriesServer(ctx context.Context) *BucketStoreSeriesServer { - return &BucketStoreSeriesServer{ctx: ctx} +func newBucketStoreSeriesServer(ctx context.Context) *bucketStoreSeriesServer { + return &bucketStoreSeriesServer{ctx: ctx} } -func (s *BucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { +func (s *bucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { if r.GetWarning() != "" { s.Warnings = append(s.Warnings, errors.New(r.GetWarning())) } @@ -51,6 +51,6 @@ func (s *BucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { return nil } -func (s *BucketStoreSeriesServer) Context() context.Context { +func (s *bucketStoreSeriesServer) Context() context.Context { return s.ctx } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/time.go b/vendor/github.com/cortexproject/cortex/pkg/util/time.go index aeed6ab70d105..7a9a840b806b8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/time.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/time.go @@ -37,6 +37,11 @@ func ParseTime(s string) (int64, error) { } func DurationWithJitter(input time.Duration, variancePerc float64) time.Duration { + // No duration? No jitter. + if input == 0 { + return 0 + } + variance := int64(float64(input) * variancePerc) jitter := rand.Int63n(variance*2) - variance diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 75be64b9471eb..be984e0834dd6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -87,8 +87,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.") f.BoolVar(&l.EnforceMetadataMetricName, "validation.enforce-metadata-metric-name", true, "Enforce every metadata has a metric name.") - f.IntVar(&l.MaxSeriesPerQuery, "ingester.max-series-per-query", 100000, "The maximum number of series that a query can return.") - f.IntVar(&l.MaxSamplesPerQuery, "ingester.max-samples-per-query", 1000000, "The maximum number of samples that a query can return.") + f.IntVar(&l.MaxSeriesPerQuery, "ingester.max-series-per-query", 100000, "The maximum number of series for which a query can fetch samples from each ingester. This limit is enforced only in the ingesters (when querying samples not flushed to the storage yet) and it's a per-instance limit. This limit is ignored when running the Cortex blocks storage.") + f.IntVar(&l.MaxSamplesPerQuery, "ingester.max-samples-per-query", 1000000, "The maximum number of samples that a query can return. This limit only applies when running the Cortex chunks storage with -querier.ingester-streaming=false.") f.IntVar(&l.MaxLocalSeriesPerUser, "ingester.max-series-per-user", 5000000, "The maximum number of active series per user, per ingester. 0 to disable.") f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") @@ -100,10 +100,10 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") - f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.") - f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.") + f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is ignored when running the Cortex blocks storage.") + f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") - f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.") + f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides. [deprecated, use -runtime-config.file instead]") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 6434b34d1cd87..1cbd32160e767 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -39,8 +39,8 @@ const ( errDuplicateLabelName = "duplicate label name: %.200q metric %.200q" errLabelsNotSorted = "labels not sorted: %.200q metric %.200q" - // ErrQueryTooLong is used in chunk store and query frontend. - ErrQueryTooLong = "invalid query, length > limit (%s > %s)" + // ErrQueryTooLong is used in chunk store, querier and query frontend. + ErrQueryTooLong = "the query time range exceeds the limit (query length: %s, limit: %s)" missingMetricName = "missing_metric_name" invalidMetricName = "metric_name_invalid" @@ -112,9 +112,9 @@ type LabelValidationConfig interface { } // ValidateLabels returns an err if the labels are invalid. -func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelAdapter) error { - metricName, err := extract.MetricNameFromLabelAdapters(ls) +func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelAdapter, skipLabelNameValidation bool) error { if cfg.EnforceMetricName(userID) { + metricName, err := extract.MetricNameFromLabelAdapters(ls) if err != nil { DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName) @@ -139,7 +139,7 @@ func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelA var errTemplate string var reason string var cause interface{} - if !model.LabelName(l.Name).IsValid() { + if !skipLabelNameValidation && !model.LabelName(l.Name).IsValid() { reason = invalidLabel errTemplate = errInvalidLabel cause = l.Name diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index ad166ba8df6c1..c5f725bc19d39 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -4,7 +4,6 @@ Aanand Prasad Aaron Davidson Aaron Feng -Aaron Hnatiw Aaron Huslage Aaron L. Xu Aaron Lehmann @@ -18,7 +17,6 @@ Abhishek Chanda Abhishek Sharma Abin Shahab Adam Avilla -Adam Dobrawy Adam Eijdenberg Adam Kunk Adam Miller @@ -114,7 +112,6 @@ Anda Xu Anders Janmyr Andre Dublin <81dublin@gmail.com> Andre Granovsky -Andrea Denisse Gómez Andrea Luzzardi Andrea Turli Andreas Elvers @@ -179,10 +176,8 @@ Anusha Ragunathan apocas Arash Deshmeh ArikaChen -Arko Dasgupta Arnaud Lefebvre Arnaud Porterie -Arnaud Rebillout Arthur Barr Arthur Gautier Artur Meyster @@ -284,7 +279,6 @@ Carl Loa Odin Carl X. Su Carlo Mion Carlos Alexandro Becker -Carlos de Paula Carlos Sanchez Carol Fager-Higgins Cary @@ -334,7 +328,6 @@ Chris Gibson Chris Khoo Chris McKinnel Chris McKinnel -Chris Price Chris Seto Chris Snow Chris St. Pierre @@ -424,14 +417,12 @@ Daniel Norberg Daniel Nordberg Daniel Robinson Daniel S -Daniel Sweet Daniel Von Fange Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang Danny Berger -Danny Milosavljevic Danny Yates Danyal Khaliq Darren Coxall @@ -525,8 +516,6 @@ Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev Dolph Mathews -Dominic Tubach -Dominic Yin Dominik Dingel Dominik Finkbeiner Dominik Honnef @@ -595,7 +584,6 @@ Erik Weathers Erno Hopearuoho Erwin van der Koogh Ethan Bell -Ethan Mosbaugh Euan Kemp Eugen Krizo Eugene Yakubovich @@ -632,7 +620,6 @@ Fareed Dudhia Fathi Boudra Federico Gimenez Felipe Oliveira -Felipe Ruhland Felix Abecassis Felix Geisendörfer Felix Hupfeld @@ -667,7 +654,6 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang Fred Lifton Frederick F. Kautz IV Frederik Loeffert @@ -715,7 +701,6 @@ Gleb M Borisov Glyn Normington GoBella Goffert van Gool -Goldwyn Rodrigues Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao @@ -739,7 +724,6 @@ Guruprasad Gustav Sinder gwx296173 Günter Zöchbauer -Haichao Yang haikuoliu Hakan Özler Hamish Hutchings @@ -748,7 +732,6 @@ Hans Rødtang Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers -Harald Niesche Harley Laue Harold Cooper Harrison Turton @@ -768,11 +751,9 @@ Hobofan Hollie Teal Hong Xu Hongbin Lu -Hongxu Jia hsinko <21551195@zju.edu.cn> Hu Keping Hu Tao -HuanHuan Ye Huanzhong Zhang Huayi Zhang Hugo Duncan @@ -916,7 +897,6 @@ Jie Luo Jihyun Hwang Jilles Oldenbeuving Jim Alateras -Jim Ehrismann Jim Galasyn Jim Minter Jim Perrin @@ -954,7 +934,7 @@ John Feminella John Gardiner Myers John Gossman John Harris -John Howard +John Howard (VM) John Laswell John Maguire John Mulhausen @@ -968,7 +948,6 @@ John Willis Jon Johnson Jon Surrell Jon Wedaman -Jonas Dohse Jonas Pfenniger Jonathan A. Schweder Jonathan A. Sternberg @@ -1022,7 +1001,6 @@ Julio Montes Jun-Ru Chang Jussi Nummelin Justas Brazauskas -Justen Martin Justin Cormack Justin Force Justin Menga @@ -1031,7 +1009,6 @@ Justin Simonelis Justin Terry Justyn Temme Jyrki Puttonen -Jérémy Leherpeur Jérôme Petazzoni Jörg Thalheim K. Heller @@ -1069,7 +1046,6 @@ Ken Reese Kenfe-Mickaël Laventure Kenjiro Nakayama Kent Johnson -Kenta Tada Kevin "qwazerty" Houdebert Kevin Burke Kevin Clark @@ -1080,7 +1056,6 @@ Kevin Kern Kevin Menard Kevin Meredith Kevin P. Kucharczyk -Kevin Parsons Kevin Richardson Kevin Shi Kevin Wallace @@ -1171,7 +1146,6 @@ longliqiang88 <394564827@qq.com> Lorenz Leutgeb Lorenzo Fontana Lotus Fenn -Louis Delossantos Louis Opter Luca Favatella Luca Marturana @@ -1180,18 +1154,15 @@ Luca-Bogdan Grigorescu Lucas Chan Lucas Chi Lucas Molas -Lucas Silvestre Luciano Mores Luis Martínez de Bartolomé Izquierdo Luiz Svoboda -Lukas Heeren Lukas Waslowski lukaspustina Lukasz Zajaczkowski Luke Marsden Lyn Lynda O'Leary -lzhfromutsc Lénaïc Huard Ma Müller Ma Shimiao @@ -1325,7 +1296,6 @@ Michael Stapelberg Michael Steinert Michael Thies Michael West -Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala @@ -1410,7 +1380,6 @@ Neyazul Haque Nghia Tran Niall O'Higgins Nicholas E. Rabenau -Nick Adcock Nick DeCoursin Nick Irvine Nick Neisen @@ -1449,7 +1418,6 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified -Odin Ugedal Oguz Bilgic Oh Jinkyun Ohad Schneider @@ -1460,7 +1428,6 @@ Oliver Reason Olivier Gambier Olle Jonsson Olli Janatuinen -Olly Pomeroy Omri Shiv Oriol Francès Oskar Niburski @@ -1470,7 +1437,6 @@ Ovidio Mallo Panagiotis Moustafellos Paolo G. Giarrusso Pascal -Pascal Bach Pascal Borreli Pascal Hartig Patrick Böänziger @@ -1495,7 +1461,6 @@ Paul Nasrat Paul Weaver Paulo Ribeiro Pavel Lobashov -Pavel Matěja Pavel Pletenev Pavel Pospisil Pavel Sutyrin @@ -1607,7 +1572,6 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy -Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle @@ -1616,13 +1580,11 @@ Robert Schneider Robert Stern Robert Terhaar Robert Wallis -Robert Wang Roberto G. Hashioka Roberto Muñoz Fernández Robin Naundorf Robin Schneider Robin Speekenbrink -Robin Thoni robpc Rodolfo Carvalho Rodrigo Vaz @@ -1656,7 +1618,6 @@ Rozhnov Alexandr Rudolph Gottesheim Rui Cao Rui Lopes -Ruilin Li Runshen Zhu Russ Magee Ryan Abrams @@ -1695,7 +1656,6 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs -Sam Whited Sambuddha Basu Sami Wagiaalla Samuel Andaya @@ -1710,7 +1670,6 @@ sapphiredev Sargun Dhillon Sascha Andres Sascha Grunert -SataQiu Satnam Singh Satoshi Amemiya Satoshi Tagomori @@ -1759,7 +1718,6 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar -Shu-Wai Chow shuai-z Shukui Yang Shuwei Hao @@ -1770,7 +1728,6 @@ Silas Sewell Silvan Jegen Simão Reis Simei He -Simon Barendse Simon Eskildsen Simon Ferquel Simon Leinen @@ -1779,7 +1736,6 @@ Simon Taranto Simon Vikstrom Sindhu S Sjoerd Langkemper -skanehira Solganik Alexander Solomon Hykes Song Gao @@ -1791,18 +1747,16 @@ Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu Srinivasan Srivatsan -Staf Wagemakers Stanislav Bondarenko Steeve Morin Stefan Berger Stefan J. Wernli Stefan Praszalowicz Stefan S. -Stefan Scherer +Stefan Scherer Stefan Staudenmeyer Stefan Weil Stephan Spindler -Stephen Benjamin Stephen Crosby Stephen Day Stephen Drake @@ -1819,12 +1773,10 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor -Stig Larsson Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> Sun Jianbo -Sune Keller Sunny Gogoi Suryakumar Sudar Sven Dowideit @@ -1875,7 +1827,6 @@ Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low -Tim Tim Bart Tim Bosse Tim Dettrick @@ -1961,7 +1912,6 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K -Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS @@ -2019,7 +1969,6 @@ Wenyu You <21551128@zju.edu.cn> Wenzhi Liang Wes Morgan Wewang Xiaorenfine -Wiktor Kwapisiewicz Will Dietz Will Rouesnel Will Weaver @@ -2047,7 +1996,6 @@ xichengliudui <1693291525@qq.com> xiekeyang Ximo Guanter Gonzálbez Xinbo Weng -Xinfeng Liu Xinzi Zhou Xiuming Chen Xuecong Liao @@ -2062,7 +2010,6 @@ Yang Pengfei yangchenliang Yanqiang Miao Yao Zaiyong -Yash Murty Yassine Tijani Yasunori Mahata Yazhong Liu @@ -2077,7 +2024,6 @@ Yongxin Li Yongzhi Pan Yosef Fertel You-Sheng Yang (楊有勝) -youcai Youcef YEKHLEF Yu Changchun Yu Chengxia @@ -2114,7 +2060,6 @@ Zhoulin Xie Zhu Guihua Zhu Kunjia Zhuoyun Wei -Ziheng Liu Zilin Du zimbatm Ziming Dong @@ -2123,7 +2068,7 @@ zmarouf Zoltan Tombol Zou Yu zqh -Zuhayr Elahi +Zuhayr Elahi Zunayed Ali Álex González Álvaro Lázaro diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 1565e2af64745..aa146cdaeb1a2 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.41" + DefaultVersion = "1.40" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index c877f2dcf626a..ce4b80ac5edeb 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,26 +19,20 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.41" +basePath: "/v1.40" info: title: "Docker Engine API" - version: "1.41" + version: "1.40" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the - Docker client uses to communicate with the Engine, so everything the Docker - client can do can be done with the API. + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. - Most of the client's commands map directly to API endpoints (e.g. `docker ps` - is `GET /containers/json`). The notable exception is running containers, - which consists of several API calls. + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors - The API uses standard HTTP status codes to indicate the success or failure - of the API call. The body of the response will be JSON in the following - format: + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { @@ -55,8 +49,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.41) is used. - For example, calling `/info` is the same as calling `/v1.41/info`. Using the + If you omit the version-prefix, the current version of the API (v1.40) is used. + For example, calling `/info` is the same as calling `/v1.40/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -71,11 +65,7 @@ info: # Authentication - Authentication for registries is handled client side. The client has to send - authentication details to various endpoints that need to communicate with - registries, such as `POST /images/(name)/push`. These are sent as - `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) - (JSON) string with the following structure: + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { @@ -86,11 +76,9 @@ info: } ``` - The `serveraddress` is a domain/IP without a protocol. Throughout this - structure, double quotes are required. + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), - you can just pass this instead of credentials: + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { @@ -116,9 +104,7 @@ tags: - name: "Network" x-displayName: "Networks" description: | - Networks are user-defined networks that containers can be attached to. - See the [networking documentation](https://docs.docker.com/network/) - for more information. + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. - name: "Volume" x-displayName: "Volumes" description: | @@ -126,46 +112,34 @@ tags: - name: "Exec" x-displayName: "Exec" description: | - Run new commands inside running containers. Refer to the - [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) - for more information. - - To exec a command in a container, you first need to create an exec instance, - then start it. These two API endpoints are wrapped up in a single command-line - command, `docker exec`. + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | - Engines can be clustered together in a swarm. Refer to the - [swarm mode documentation](https://docs.docker.com/engine/swarm/) - for more information. + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode - must be enabled for these endpoints to work. + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must - be enabled for these endpoints to work. + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | - A task is a container running on a swarm. It is the atomic scheduling unit - of swarm. Swarm mode must be enabled for these endpoints to work. + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | - Secrets are sensitive data that can be used by services. Swarm mode must - be enabled for these endpoints to work. + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | - Configs are application configurations that can be used by services. Swarm - mode must be enabled for these endpoints to work. + Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" @@ -371,11 +345,9 @@ definitions: RestartPolicy: description: | - The behavior to apply when the container exits. The default is not to - restart. + The behavior to apply when the container exits. The default is not to restart. - An ever increasing delay (double the previous delay, starting at 100ms) is - added before each restart to prevent flooding the server. + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: @@ -392,8 +364,7 @@ definitions: - "on-failure" MaximumRetryCount: type: "integer" - description: | - If `on-failure` is used, the number of times to retry before giving up. + description: "If `on-failure` is used, the number of times to retry before giving up" Resources: description: "A container's resources (cgroups config, ulimits, etc)" @@ -401,9 +372,7 @@ definitions: properties: # Applicable to all platforms CpuShares: - description: | - An integer value representing this container's relative CPU weight - versus other containers. + description: "An integer value representing this container's relative CPU weight versus other containers." type: "integer" Memory: description: "Memory limit in bytes." @@ -412,11 +381,7 @@ definitions: default: 0 # Applicable to UNIX platforms CgroupParent: - description: | - Path to `cgroups` under which the container's `cgroup` is created. If - the path is not absolute, the path is considered to be relative to the - `cgroups` path of the init process. Cgroups are created if they do not - already exist. + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." type: "string" BlkioWeight: description: "Block IO weight (relative weight)." @@ -425,11 +390,7 @@ definitions: maximum: 1000 BlkioWeightDevice: description: | - Block IO weight (relative device weight) in the form: - - ``` - [{"Path": "device_path", "Weight": weight}] - ``` + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. type: "array" items: type: "object" @@ -441,41 +402,25 @@ definitions: minimum: 0 BlkioDeviceReadBps: description: | - Limit read rate (bytes per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | - Limit write rate (bytes per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | - Limit read rate (IO per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | - Limit write rate (IO per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. type: "array" items: $ref: "#/definitions/ThrottleDevice" @@ -484,31 +429,23 @@ definitions: type: "integer" format: "int64" CpuQuota: - description: | - Microseconds of CPU time that the container can get in a CPU period. + description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" format: "int64" CpuRealtimePeriod: - description: | - The length of a CPU real-time period in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." type: "integer" format: "int64" CpuRealtimeRuntime: - description: | - The length of a CPU real-time runtime in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." type: "integer" format: "int64" CpusetCpus: - description: | - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" type: "string" example: "0-3" CpusetMems: - description: | - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only - effective on NUMA systems. + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." type: "string" Devices: description: "A list of devices to add to the container." @@ -522,8 +459,7 @@ definitions: type: "string" example: "c 13:* rwm" DeviceRequests: - description: | - A list of requests for devices to be sent to device drivers. + description: "a list of requests for devices to be sent to device drivers" type: "array" items: $ref: "#/definitions/DeviceRequest" @@ -541,15 +477,11 @@ definitions: type: "integer" format: "int64" MemorySwap: - description: | - Total memory limit (memory + swap). Set as `-1` to enable unlimited - swap. + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." type: "integer" format: "int64" MemorySwappiness: - description: | - Tune a container's memory swappiness behavior. Accepts an integer - between 0 and 100. + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." type: "integer" format: "int64" minimum: 0 @@ -562,26 +494,18 @@ definitions: description: "Disable OOM Killer for the container." type: "boolean" Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." type: "boolean" x-nullable: true PidsLimit: description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` - to not change. + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | - A list of resource limits to set in the container. For example: - - ``` - {"Name": "nofile", "Soft": 1024, "Hard": 2048} - ``` + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" @@ -600,18 +524,14 @@ definitions: description: | The number of usable CPUs (Windows only). - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: @@ -619,37 +539,12 @@ definitions: type: "integer" format: "int64" IOMaximumBandwidth: - description: | - Maximum IO in bytes per second for the container system drive - (Windows only). + description: "Maximum IO in bytes per second for the container system drive (Windows only)" type: "integer" format: "int64" - Limit: - description: | - An object describing a limit on resources which can be requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - Pids: - description: | - Limits the maximum number of PIDs in the container. Set `0` for unlimited. - type: "integer" - format: "int64" - default: 0 - example: 100 - ResourceObject: - description: | - An object describing the resources which can be advertised by a node and - requested by a task. + description: "An object describing the resources which can be advertised by a node and requested by a task" type: "object" properties: NanoCPUs: @@ -664,9 +559,7 @@ definitions: $ref: "#/definitions/GenericResources" GenericResources: - description: | - User-defined resources can be either Integer resources (e.g, `SSD=3`) or - String resources (e.g, `GPU=UUID1`). + description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" type: "array" items: type: "object" @@ -713,25 +606,16 @@ definitions: items: type: "string" Interval: - description: | - The time to wait between checks in nanoseconds. It should be 0 or at - least 1000000 (1 ms). 0 means inherit. + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" Timeout: - description: | - The time to wait before considering the check to have hung. It should - be 0 or at least 1000000 (1 ms). 0 means inherit. + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" Retries: - description: | - The number of consecutive failures needed to consider a container as - unhealthy. 0 means inherit. + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." type: "integer" StartPeriod: - description: | - Start period for the container to initialize before starting - health-retries countdown in nanoseconds. It should be 0 or at least - 1000000 (1 ms). 0 means inherit. + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" Health: @@ -874,33 +758,25 @@ definitions: type: "string" NetworkMode: type: "string" - description: | - Network mode to use for this container. Supported standard values - are: `bridge`, `host`, `none`, and `container:`. Any - other value is taken as a custom network's name to which this - container should connect to. + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" - description: | - Automatically remove the container when the container's process - exits. This has no effect if `RestartPolicy` is set. + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" - description: | - A list of volumes to inherit from another container, specified in - the form `[:]`. + description: "A list of volumes to inherit from another container, specified in the form `[:]`." items: type: "string" Mounts: - description: | - Specification for mounts to be added to the container. + description: "Specification for mounts to be added to the container." type: "array" items: $ref: "#/definitions/Mount" @@ -909,39 +785,21 @@ definitions: Capabilities: type: "array" description: | - A list of kernel capabilities to be available for container (this - overrides the default set). + A list of kernel capabilities to be available for container (this overrides the default set). Conflicts with options 'CapAdd' and 'CapDrop'" items: type: "string" CapAdd: type: "array" - description: | - A list of kernel capabilities to add to the container. Conflicts - with option 'Capabilities'. + description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'" items: type: "string" CapDrop: type: "array" - description: | - A list of kernel capabilities to drop from the container. Conflicts - with option 'Capabilities'. + description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" items: type: "string" - CgroupnsMode: - type: "string" - enum: - - "private" - - "host" - description: | - cgroup namespace mode for the container. Possible values are: - - - `"private"`: the container runs in its own private cgroup namespace - - `"host"`: use the host system's cgroup namespace - - If not specified, the daemon default is used, which can either be `"private"` - or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." @@ -960,49 +818,43 @@ definitions: ExtraHosts: type: "array" description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` - file. Specified in the form `["hostname:IP"]`. + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" - description: | - A list of additional groups that the container process will run as. + description: "A list of additional groups that the container process will run as." items: type: "string" IpcMode: type: "string" description: | - IPC sharing mode for the container. Possible values are: + IPC sharing mode for the container. Possible values are: - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" - description: | - A list of links for the container in the form `container_name:alias`. + description: "A list of links for the container in the form `container_name:alias`." items: type: "string" OomScoreAdj: type: "integer" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." example: 500 PidMode: type: "string" description: | - Set the PID (Process) Namespace mode for the container. It can be - either: + Set the PID (Process) Namespace mode for the container. It can be either: - `"container:"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container @@ -1015,13 +867,11 @@ definitions: Allocates an ephemeral host port for all of a container's exposed ports. - Ports are de-allocated when the container stops and allocated when - the container starts. The allocated port might be changed when - restarting the container. + Ports are de-allocated when the container stops and allocated when the container starts. + The allocated port might be changed when restarting the container. - The port is selected from the ephemeral port range that depends on - the kernel. For example, on Linux the range is defined by - `/proc/sys/net/ipv4/ip_local_port_range`. + The port is selected from the ephemeral port range that depends on the kernel. + For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." @@ -1040,12 +890,7 @@ definitions: Tmpfs: type: "object" description: | - A map of container directories which should be replaced by tmpfs - mounts, and their corresponding mount options. For example: - - ``` - { "/run": "rw,noexec,nosuid,size=65536k" } - ``` + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. additionalProperties: type: "string" UTSMode: @@ -1053,23 +898,15 @@ definitions: description: "UTS namespace to use for the container." UsernsMode: type: "string" - description: | - Sets the usernamespace mode for the container when usernamespace - remapping option is enabled. + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." ShmSize: type: "integer" - description: | - Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." minimum: 0 Sysctls: type: "object" description: | - A list of kernel parameters (sysctls) to set in the container. - For example: - - ``` - {"net.ipv4.ip_forward": "1"} - ``` + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` additionalProperties: type: "string" Runtime: @@ -1078,8 +915,7 @@ definitions: # Applicable to Windows ConsoleSize: type: "array" - description: | - Initial console size, as an `[height, width]` array. (Windows only) + description: "Initial console size, as an `[height, width]` array. (Windows only)" minItems: 2 maxItems: 2 items: @@ -1087,24 +923,19 @@ definitions: minimum: 0 Isolation: type: "string" - description: | - Isolation technology of the container. (Windows only) + description: "Isolation technology of the container. (Windows only)" enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" - description: | - The list of paths to be masked inside the container (this overrides - the default set of paths). + description: "The list of paths to be masked inside the container (this overrides the default set of paths)" items: type: "string" ReadonlyPaths: type: "array" - description: | - The list of paths to be set as read-only inside the container - (this overrides the default set of paths). + description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" items: type: "string" @@ -1145,8 +976,7 @@ definitions: - {} default: {} Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." type: "boolean" default: false OpenStdin: @@ -1159,15 +989,12 @@ definitions: default: false Env: description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: - description: | - Command to run specified as a string or an array of strings. + description: "Command to run specified as a string or an array of strings." type: "array" items: type: "string" @@ -1177,13 +1004,10 @@ definitions: description: "Command is already escaped (Windows only)" type: "boolean" Image: - description: | - The name of the image to use when creating the container/ + description: "The name of the image to use when creating the container" type: "string" Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. + description: "An object mapping mount point paths inside the container to empty objects." type: "object" additionalProperties: type: "object" @@ -1197,9 +1021,7 @@ definitions: description: | The entry point for the container as a string or an array of strings. - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" @@ -1210,8 +1032,7 @@ definitions: description: "MAC address of the container." type: "string" OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." type: "array" items: type: "string" @@ -1221,8 +1042,7 @@ definitions: additionalProperties: type: "string" StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. + description: "Signal to stop a container as a string or unsigned integer." type: "string" default: "SIGTERM" StopTimeout: @@ -1230,8 +1050,7 @@ definitions: type: "integer" default: 10 Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." type: "array" items: type: "string" @@ -1678,16 +1497,13 @@ definitions: type: "string" Scope: type: "string" - description: | - The level at which the volume exists. Either `global` for cluster-wide, - or `local` for machine level. + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" - description: | - The driver specific options used when creating the volume. + description: "The driver specific options used when creating the volume." additionalProperties: type: "string" UsageData: @@ -1805,12 +1621,7 @@ definitions: type: "string" default: "default" Config: - description: | - List of IPAM configuration options, specified as a map: - - ``` - {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } - ``` + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" type: "array" items: type: "object" @@ -2156,9 +1967,7 @@ definitions: x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: - description: - True if the plugin is running. False if the plugin is not running, - only installed. + description: "True if the plugin is running. False if the plugin is not running, only installed." type: "boolean" x-nullable: false example: true @@ -2360,16 +2169,13 @@ definitions: ObjectVersion: description: | - The version number of the object such as node, service, etc. This is needed - to avoid conflicting writes. The client must send the version number along - with the modified specification when updating these objects. - - This approach ensures safe concurrency and determinism in that the change - on the object may not be applied if the version number has changed from the - last read. In other words, if two update requests specify the same base - version, only one of the requests can succeed. As a result, two separate - update requests that happen at the same time will not unintentionally - overwrite each other. + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentionally overwrite each other. type: "object" properties: Index: @@ -2538,23 +2344,17 @@ definitions: Name: "vieux/sshfs:latest" TLSInfo: - description: | - Information about the issuer of leaf TLS certificates and the trusted root - CA certificate. + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" type: "object" properties: TrustRoot: - description: | - The root CA certificate(s) that are used to validate leaf TLS - certificates. + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" type: "string" CertIssuerSubject: - description: - The base64-url-safe-encoded raw subject bytes of the issuer. + description: "The base64-url-safe-encoded raw subject bytes of the issuer" type: "string" CertIssuerPublicKey: - description: | - The base64-url-safe-encoded raw public key bytes of the issuer. + description: "The base64-url-safe-encoded raw public key bytes of the issuer" type: "string" example: TrustRoot: | @@ -2650,9 +2450,7 @@ definitions: x-nullable: true properties: TaskHistoryRetentionLimit: - description: | - The number of historic tasks to keep per instance or node. If - negative, never remove completed or failed tasks. + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." type: "integer" format: "int64" example: 10 @@ -2666,34 +2464,26 @@ definitions: format: "uint64" example: 10000 KeepOldSnapshots: - description: | - The number of snapshots to keep beyond the current snapshot. + description: "The number of snapshots to keep beyond the current snapshot." type: "integer" format: "uint64" LogEntriesForSlowFollowers: - description: | - The number of log entries to keep around to sync up slow followers - after a snapshot is created. + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." type: "integer" format: "uint64" example: 500 ElectionTick: description: | - The number of ticks that a follower will wait for a message from - the leader before becoming a candidate and starting an election. - `ElectionTick` must be greater than `HeartbeatTick`. + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, - the leader will send a heartbeat to the followers. + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: @@ -2702,8 +2492,7 @@ definitions: x-nullable: true properties: HeartbeatPeriod: - description: | - The delay for an agent to send a heartbeat to the dispatcher. + description: "The delay for an agent to send a heartbeat to the dispatcher." type: "integer" format: "int64" example: 5000000000 @@ -2718,53 +2507,36 @@ definitions: format: "int64" example: 7776000000000000 ExternalCAs: - description: | - Configuration for forwarding signing requests to an external - certificate authority. + description: "Configuration for forwarding signing requests to an external certificate authority." type: "array" items: type: "object" properties: Protocol: - description: | - Protocol for communication with the external CA (currently - only `cfssl` is supported). + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." type: "string" enum: - "cfssl" default: "cfssl" URL: - description: | - URL where certificate signing requests should be sent. + description: "URL where certificate signing requests should be sent." type: "string" Options: - description: | - An object with key/value pairs that are interpreted as - protocol-specific options for the external CA driver. + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." type: "object" additionalProperties: type: "string" CACert: - description: | - The root CA certificate (in PEM format) this external CA uses - to issue TLS certificates (assumed to be to the current swarm - root CA certificate if not provided). + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." type: "string" SigningCACert: - description: | - The desired signing CA certificate for all swarm node TLS leaf - certificates, in PEM format. + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." type: "string" SigningCAKey: - description: | - The desired signing CA key for all swarm node TLS leaf certificates, - in PEM format. + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." type: "string" ForceRotate: - description: | - An integer whose purpose is to force swarm to generate a new - signing CA certificate and key, if none have been specified in - `SigningCACert` and `SigningCAKey` + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" format: "uint64" type: "integer" EncryptionConfig: @@ -2772,9 +2544,7 @@ definitions: type: "object" properties: AutoLockManagers: - description: | - If set, generate a key and use it to lock data stored on the - managers. + description: "If set, generate a key and use it to lock data stored on the managers." type: "boolean" example: false TaskDefaults: @@ -2840,8 +2610,7 @@ definitions: TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: - description: | - Whether there is currently a root CA rotation in progress for the swarm + description: "Whether there is currently a root CA rotation in progress for the swarm" type: "boolean" example: false DataPathPort: @@ -2855,8 +2624,7 @@ definitions: example: 4789 DefaultAddrPool: description: | - Default Address Pool specifies default subnet pools for global scope - networks. + Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" @@ -2864,8 +2632,7 @@ definitions: example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | - SubnetSize specifies the subnet size of the networks created from the - default subnet pool. + SubnetSize specifies the subnet size of the networks created from the default subnet pool type: "integer" format: "uint32" maximum: 29 @@ -2925,9 +2692,7 @@ definitions: PluginPrivilege: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. + description: "Describes a permission accepted by the user upon installing the plugin." type: "object" properties: Name: @@ -2969,13 +2734,10 @@ definitions: items: type: "string" Hostname: - description: | - The hostname to use for the container, as a valid - [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Env: - description: | - A list of environment variables in the form `VAR=value`. + description: "A list of environment variables in the form `VAR=value`." type: "array" items: type: "string" @@ -2987,8 +2749,7 @@ definitions: type: "string" Groups: type: "array" - description: | - A list of additional groups that the container process will run as. + description: "A list of additional groups that the container process will run as." items: type: "string" Privileges: @@ -3004,43 +2765,37 @@ definitions: example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs - field with the Runtime property set. + The specified config must also be present in the Configs field with the Runtime property set.


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | - Load credential spec from this file. The file is read by - the daemon, and must be present in the `CredentialSpecs` - subdirectory in the docker data directory, which defaults - to `C:\ProgramData\Docker\` on Windows. + Load credential spec from this file. The file is read by the daemon, and must be present in the + `CredentialSpecs` subdirectory in the docker data directory, which defaults to + `C:\ProgramData\Docker\` on Windows. - For example, specifying `spec.json` loads - `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`.


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | - Load credential spec from this value in the Windows - registry. The specified registry value must be located in: + Load credential spec from this value in the Windows registry. The specified registry value must be + located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" @@ -3070,9 +2825,7 @@ definitions: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: - description: | - Specification for mounts to be added to containers created as part - of the service. + description: "Specification for mounts to be added to containers created as part of the service." type: "array" items: $ref: "#/definitions/Mount" @@ -3080,9 +2833,7 @@ definitions: description: "Signal to stop the container." type: "string" StopGracePeriod: - description: | - Amount of time to wait for the container to terminate before - forcefully killing it. + description: "Amount of time to wait for the container to terminate before forcefully killing it." type: "integer" format: "int64" HealthCheck: @@ -3099,9 +2850,7 @@ definitions: items: type: "string" DNSConfig: - description: | - Specification for DNS related configurations in resolver configuration - file (`resolv.conf`). + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." type: "object" properties: Nameservers: @@ -3115,28 +2864,22 @@ definitions: items: type: "string" Options: - description: | - A list of internal resolver variables to be modified (e.g., - `debug`, `ndots:3`, etc.). + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." type: "array" items: type: "string" Secrets: - description: | - Secrets contains references to zero or more secrets that will be - exposed to the service. + description: "Secrets contains references to zero or more secrets that will be exposed to the service." type: "array" items: type: "object" properties: File: - description: | - File represents a specific target that is backed by a file. + description: "File represents a specific target that is backed by a file." type: "object" properties: Name: - description: | - Name represents the final filename in the filesystem. + description: "Name represents the final filename in the filesystem." type: "string" UID: description: "UID represents the file UID." @@ -3149,20 +2892,15 @@ definitions: type: "integer" format: "uint32" SecretID: - description: | - SecretID represents the ID of the specific secret that we're - referencing. + description: "SecretID represents the ID of the specific secret that we're referencing." type: "string" SecretName: description: | - SecretName is the name of the secret that this references, - but this is just provided for lookup/display purposes. The - secret in the reference will be identified by its ID. + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: - description: | - Configs contains references to zero or more configs that will be - exposed to the service. + description: "Configs contains references to zero or more configs that will be exposed to the service." type: "array" items: type: "object" @@ -3177,8 +2915,7 @@ definitions: type: "object" properties: Name: - description: | - Name represents the final filename in the filesystem. + description: "Name represents the final filename in the filesystem." type: "string" UID: description: "UID represents the file UID." @@ -3192,39 +2929,29 @@ definitions: format: "uint32" Runtime: description: | - Runtime represents a target that is not mounted into the - container but is used by the task + Runtime represents a target that is not mounted into the container but is used by the task


- > **Note**: `Configs.File` and `Configs.Runtime` are mutually - > exclusive + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" ConfigID: - description: | - ConfigID represents the ID of the specific config that we're - referencing. + description: "ConfigID represents the ID of the specific config that we're referencing." type: "string" ConfigName: description: | - ConfigName is the name of the config that this references, - but this is just provided for lookup/display purposes. The - config in the reference will be identified by its ID. + ConfigName is the name of the config that this references, but this is just provided for + lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" - description: | - Isolation technology of the containers running the service. - (Windows only) + description: "Isolation technology of the containers running the service. (Windows only)" enum: - "default" - "process" - "hyperv" Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." type: "boolean" x-nullable: true Sysctls: @@ -3239,19 +2966,6 @@ definitions: type: "object" additionalProperties: type: "string" - # This option is not used by Windows containers - Capabilities: - type: "array" - description: | - A list of kernel capabilities to be available for container (this - overrides the default set). - items: - type: "string" - example: - - "CAP_NET_RAW" - - "CAP_SYS_ADMIN" - - "CAP_SYS_CHROOT" - - "CAP_SYSLOG" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay @@ -3269,21 +2983,17 @@ definitions: description: "ID of the container represented by this task" type: "string" Resources: - description: | - Resource requirements which apply to each individual container created - as part of the service. + description: "Resource requirements which apply to each individual container created as part of the service." type: "object" properties: Limits: description: "Define resources limits." - $ref: "#/definitions/Limit" + $ref: "#/definitions/ResourceObject" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: - description: | - Specification for the restart policy which applies to containers - created as part of this service. + description: "Specification for the restart policy which applies to containers created as part of this service." type: "object" properties: Condition: @@ -3298,16 +3008,12 @@ definitions: type: "integer" format: "int64" MaxAttempts: - description: | - Maximum attempts to restart a given container before giving up - (default value is 0, which is ignored). + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." type: "integer" format: "int64" default: 0 Window: - description: | - Windows is the time window used to evaluate the restart policy - (default value is 0, which is unbounded). + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." type: "integer" format: "int64" default: 0 @@ -3346,10 +3052,7 @@ definitions: - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: - description: | - Preferences provide a way to make the scheduler aware of factors - such as topology. They are provided in order from highest to - lowest precedence. + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." type: "array" items: type: "object" @@ -3358,8 +3061,7 @@ definitions: type: "object" properties: SpreadDescriptor: - description: | - label descriptor, such as `engine.labels.az`. + description: "label descriptor, such as engine.labels.az" type: "string" example: - Spread: @@ -3367,9 +3069,7 @@ definitions: - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: - description: | - Maximum number of replicas for per node (default value is 0, which - is unlimited) + description: "Maximum number of replicas for per node (default value is 0, which is unlimited)" type: "integer" format: "int64" default: 0 @@ -3383,13 +3083,10 @@ definitions: items: $ref: "#/definitions/Platform" ForceUpdate: - description: | - A counter that triggers an update even if no relevant parameters have - been changed. + description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" Runtime: - description: | - Runtime is the type of runtime specified for the task executor. + description: "Runtime is the type of runtime specified for the task executor." type: "string" Networks: description: "Specifies which networks the service should attach to." @@ -3397,10 +3094,7 @@ definitions: items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: - description: | - Specifies the log driver to use for tasks created from this spec. If - not present, the default one for the swarm will be used, finally - falling back to the engine default if not specified. + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." type: "object" properties: Name: @@ -3486,12 +3180,6 @@ definitions: type: "integer" DesiredState: $ref: "#/definitions/TaskState" - JobIteration: - description: | - If the Service this Task belongs to is a job-mode service, contains - the JobIteration of the Service this Task was created for. Absent if - the Task was created for a Replicated or Global Service. - $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: @@ -3584,37 +3272,12 @@ definitions: format: "int64" Global: type: "object" - ReplicatedJob: - description: | - The mode used for services with a finite number of tasks that run - to a completed state. - type: "object" - properties: - MaxConcurrent: - description: | - The maximum number of replicas to run simultaneously. - type: "integer" - format: "int64" - default: 1 - TotalCompletions: - description: | - The total number of replicas desired to reach the Completed - state. If unset, will default to the value of `MaxConcurrent` - type: "integer" - format: "int64" - GlobalJob: - description: | - The mode used for services which run a task to the completed state - on each valid node. - type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: - description: | - Maximum number of tasks to be updated in one iteration (0 means - unlimited parallelism). + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." type: "integer" format: "int64" Delay: @@ -3622,32 +3285,22 @@ definitions: type: "integer" format: "int64" FailureAction: - description: | - Action to take if an updated task fails to run, or stops running - during the update. + description: "Action to take if an updated task fails to run, or stops running during the update." type: "string" enum: - "continue" - "pause" - "rollback" Monitor: - description: | - Amount of time to monitor each updated task for failures, in - nanoseconds. + description: "Amount of time to monitor each updated task for failures, in nanoseconds." type: "integer" format: "int64" MaxFailureRatio: - description: | - The fraction of tasks that may fail during an update before the - failure action is invoked, specified as a floating point number - between 0 and 1. + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." type: "number" default: 0 Order: - description: | - The order of operations when rolling out an updated task. Either - the old task is shut down before the new task is started, or the - new task is started before the old task is shut down. + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." type: "string" enum: - "stop-first" @@ -3657,42 +3310,29 @@ definitions: type: "object" properties: Parallelism: - description: | - Maximum number of tasks to be rolled back in one iteration (0 means - unlimited parallelism). + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." type: "integer" format: "int64" Delay: - description: | - Amount of time between rollback iterations, in nanoseconds. + description: "Amount of time between rollback iterations, in nanoseconds." type: "integer" format: "int64" FailureAction: - description: | - Action to take if an rolled back task fails to run, or stops - running during the rollback. + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." type: "string" enum: - "continue" - "pause" Monitor: - description: | - Amount of time to monitor each rolled back task for failures, in - nanoseconds. + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." type: "integer" format: "int64" MaxFailureRatio: - description: | - The fraction of tasks that may fail during a rollback before the - failure action is invoked, specified as a floating point number - between 0 and 1. + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." type: "number" default: 0 Order: - description: | - The order of operations when rolling back a task. Either the old - task is shut down before the new task is started, or the new task - is started before the old task is shut down. + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." type: "string" enum: - "stop-first" @@ -3755,9 +3395,7 @@ definitions: - "dnsrr" default: "vip" Ports: - description: | - List of exposed ports that this service is accessible on from the - outside. Ports can only be provided if `vip` resolution mode is used. + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." type: "array" items: $ref: "#/definitions/EndpointPortConfig" @@ -3813,61 +3451,6 @@ definitions: format: "dateTime" Message: type: "string" - ServiceStatus: - description: | - The status of the service's tasks. Provided only when requested as - part of a ServiceList operation. - type: "object" - properties: - RunningTasks: - description: | - The number of tasks for the service currently in the Running state. - type: "integer" - format: "uint64" - example: 7 - DesiredTasks: - description: | - The number of tasks for the service desired to be running. - For replicated services, this is the replica count from the - service spec. For global services, this is computed by taking - count of all tasks for the service with a Desired State other - than Shutdown. - type: "integer" - format: "uint64" - example: 10 - CompletedTasks: - description: | - The number of tasks for a job that are in the Completed state. - This field must be cross-referenced with the service type, as the - value of 0 may mean the service is not in a job mode, or it may - mean the job-mode service has no tasks yet Completed. - type: "integer" - format: "uint64" - JobStatus: - description: | - The status of the service when it is in one of ReplicatedJob or - GlobalJob modes. Absent on Replicated and Global mode services. The - JobIteration is an ObjectVersion, but unlike the Service's version, - does not need to be sent with an update request. - type: "object" - properties: - JobIteration: - description: | - JobIteration is a value increased each time a Job is executed, - successfully or otherwise. "Executed", in this case, means the - job as a whole has been started, not that an individual Task has - been launched. A job is "Executed" when its ServiceSpec is - updated. JobIteration can be used to disambiguate Tasks belonging - to different executions of a job. Though JobIteration will - increase with each subsequent execution, it may not necessarily - increase by 1, and so JobIteration should not be used to - $ref: "#/definitions/ObjectVersion" - LastExecution: - description: | - The last time, as observed by the server, that this job was - started. - type: "string" - format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: @@ -4064,9 +3647,7 @@ definitions: type: "string" example: "" Driver: - description: | - Name of the secrets driver used to fetch the secret's value from an - external secret store. + description: "Name of the secrets driver used to fetch the secret's value from an external secret store" $ref: "#/definitions/Driver" Templating: description: | @@ -4171,8 +3752,7 @@ definitions: type: "boolean" example: false OOMKilled: - description: | - Whether this container has been killed because it ran out of memory. + description: "Whether this container has been killed because it ran out of memory." type: "boolean" example: false Dead: @@ -4274,6 +3854,44 @@ definitions: on Windows. type: "string" example: "/var/lib/docker" + SystemStatus: + description: | + Status information about this node (standalone Swarm API). + +


+ + > **Note**: The information returned in this field is only propagated + > by the Swarm standalone API, and is empty (`null`) when using + > built-in swarm mode. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Role", "primary"] + - ["State", "Healthy"] + - ["Strategy", "spread"] + - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] + - ["Nodes", "2"] + - [" swarm-agent-00", "192.168.99.102:2376"] + - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] + - [" └ ServerVersion", "17.06.0-ce"] + - [" swarm-manager", "192.168.99.101:2376"] + - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] + - [" └ ServerVersion", "17.06.0-ce"] Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: @@ -4289,20 +3907,15 @@ definitions: type: "boolean" example: true CpuCfsPeriod: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) period is supported by - the host. + description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." type: "boolean" example: true CpuCfsQuota: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by - the host. + description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." type: "boolean" example: true CPUShares: - description: | - Indicates if CPU Shares limiting is supported by the host. + description: "Indicates if CPU Shares limiting is supported by the host." type: "boolean" example: true CPUSet: @@ -4332,9 +3945,7 @@ definitions: type: "boolean" example: true Debug: - description: | - Indicates if the daemon is running in debug-mode / with debug-level - logging enabled. + description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." type: "boolean" example: true NFd: @@ -4368,13 +3979,6 @@ definitions: enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" - CgroupVersion: - description: | - The version of the cgroup. - type: "string" - enum: ["1", "2"] - default: "1" - example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" @@ -4394,17 +3998,6 @@ definitions: or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" - OSVersion: - description: | - Version of the host's operating system - -


- - > **Note**: The information returned in this field, including its - > very existence, and the formatting of values, should not be considered - > stable, and may change without notice. - type: "string" - example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the @@ -4521,7 +4114,7 @@ definitions:


- > **Deprecated**: This field is only propagated when using standalone Swarm + > **Note**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. @@ -4535,7 +4128,7 @@ definitions:


- > **Deprecated**: This field is only propagated when using standalone Swarm + > **Note**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. @@ -4986,23 +4579,19 @@ definitions: type: "string" NetworkAttachmentConfig: - description: | - Specifies how a service should be attached to a particular network. + description: "Specifies how a service should be attached to a particular network." type: "object" properties: Target: - description: | - The target network for attachment. Must be a network name or ID. + description: "The target network for attachment. Must be a network name or ID." type: "string" Aliases: - description: | - Discoverable alternate names for the service on this network. + description: "Discoverable alternate names for the service on this network." type: "array" items: type: "string" DriverOpts: - description: | - Driver attachment options for the network target. + description: "Driver attachment options for the network target" type: "object" additionalProperties: type: "string" @@ -5012,42 +4601,32 @@ paths: get: summary: "List containers" description: | - Returns a list of containers. For details on the format, see the - [inspect endpoint](#operation/ContainerInspect). + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). - Note that it uses a different, smaller representation of a container - than inspecting a single container. For example, the list of linked - containers is not propagated . + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" - description: | - Return all containers. By default, only running containers are shown. + description: "Return all containers. By default, only running containers are shown" type: "boolean" default: false - name: "limit" in: "query" - description: | - Return this number of most recently created containers, including - non-running ones. + description: "Return this number of most recently created containers, including non-running ones." type: "integer" - name: "size" in: "query" - description: | - Return the size of container as fields `SizeRw` and `SizeRootFs`. + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." type: "boolean" default: false - name: "filters" in: "query" description: | - Filters to process on the container list, encoded as JSON (a - `map[string][]string`). For example, `{"status": ["paused"]}` will - only return paused containers. - - Available filters: + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`[:]`, ``, or ``) - `before`=(`` or ``) @@ -5218,9 +4797,7 @@ paths: parameters: - name: "name" in: "query" - description: | - Assign the specified name to the container. Must match - `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`." type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" @@ -5238,9 +4815,7 @@ paths: type: "object" properties: EndpointsConfig: - description: | - A mapping of network name to endpoint configuration - for that network. + description: "A mapping of network name to endpoint configuration for that network." type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" @@ -5439,7 +5014,7 @@ paths: x-nullable: true $ref: "#/definitions/ContainerState" Image: - description: "The container's image ID" + description: "The container's image" type: "string" ResolvConfPath: type: "string" @@ -5449,6 +5024,9 @@ paths: type: "string" LogPath: type: "string" + Node: + description: "TODO" + type: "object" Name: type: "string" RestartCount: @@ -5474,9 +5052,7 @@ paths: GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: - description: | - The size of files that have been created or changed by this - container. + description: "The size of files that have been created or changed by this container." type: "integer" format: "int64" SizeRootFs: @@ -5671,9 +5247,7 @@ paths: /containers/{id}/top: get: summary: "List processes running inside a container" - description: | - On Unix systems, this is done by running the `ps` command. This endpoint - is not supported on Windows. + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." operationId: "ContainerTop" responses: 200: @@ -5689,9 +5263,7 @@ paths: items: type: "string" Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" type: "array" items: type: "array" @@ -5756,16 +5328,15 @@ paths: description: | Get `stdout` and `stderr` logs from a container. - Note: This endpoint works only for containers with the `json-file` or - `journald` logging driver. + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not - upgrade the connection and does not set Content-Type. + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not + set Content-Type. schema: type: "string" format: "binary" @@ -5818,9 +5389,7 @@ paths: default: false - name: "tail" in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." type: "string" default: "all" tags: ["Container"] @@ -5926,12 +5495,6 @@ paths: If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. - - On a cgroup v2 host, the following fields are not set - * `blkio_stats`: all fields other than `io_service_bytes_recursive` - * `cpu_stats`: `cpu_usage.percpu_usage` - * `memory_stats`: `max_usage` and `failcnt` - Also, `memory_stats.stats` fields are incompatible with cgroup v1. operationId: "ContainerStats" produces: ["application/json"] responses: @@ -6050,18 +5613,9 @@ paths: type: "string" - name: "stream" in: "query" - description: | - Stream the output. If false, the stats will be output once and then - it will disconnect. + description: "Stream the output. If false, the stats will be output once and then it will disconnect." type: "boolean" default: true - - name: "one-shot" - in: "query" - description: | - Only get a single stat instead of waiting for 2 cycles. Must be used - with `stream=false`. - type: "boolean" - default: false tags: ["Container"] /containers/{id}/resize: post: @@ -6094,11 +5648,11 @@ paths: type: "string" - name: "h" in: "query" - description: "Height of the TTY session in characters" + description: "Height of the tty session in characters" type: "integer" - name: "w" in: "query" - description: "Width of the TTY session in characters" + description: "Width of the tty session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: @@ -6129,10 +5683,7 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: | - Override the key sequence for detaching a container. Format is a - single character `[a-Z]` or `ctrl-` where `` is one - of: `a-z`, `@`, `^`, `[`, `,` or `_`. + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." type: "string" tags: ["Container"] /containers/{id}/stop: @@ -6198,9 +5749,7 @@ paths: /containers/{id}/kill: post: summary: "Kill a container" - description: | - Send a POSIX signal to a container, defaulting to killing to the - container. + description: "Send a POSIX signal to a container, defaulting to killing to the container." operationId: "ContainerKill" responses: 204: @@ -6238,9 +5787,7 @@ paths: /containers/{id}/update: post: summary: "Update a container" - description: | - Change various configuration options of a container without having to - recreate it. + description: "Change various configuration options of a container without having to recreate it." operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] @@ -6340,10 +5887,7 @@ paths: description: | Use the freezer cgroup to suspend all processes in a container. - Traditionally, when suspending a process the `SIGSTOP` signal is used, - which is observable by the process being suspended. With the freezer - cgroup the process is unaware, and unable to capture, that it is being - suspended, and subsequently resumed. + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: @@ -6396,20 +5940,15 @@ paths: post: summary: "Attach to a container" description: | - Attach to a container to read its output or send it input. You can attach - to the same container multiple times and you can reattach to containers - that have been detached. + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. - Either the `stream` or `logs` parameter must be `true` for this endpoint - to do anything. + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. - See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) - for more details. + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, - and `stderr` on the same socket. + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: @@ -6420,11 +5959,9 @@ paths: [STREAM] ``` - After the headers and two new lines, the TCP connection can now be used - for raw, bidirectional communication between the client and server. + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. - To hint potential proxies about connection hijacking, the Docker client - can also optionally send connection upgrade headers. + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: @@ -6434,8 +5971,7 @@ paths: Connection: Upgrade ``` - The Docker daemon will respond with a `101 UPGRADED` response, and will - similarly follow with the raw stream: + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED @@ -6448,14 +5984,9 @@ paths: ### Stream format - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream over the hijacked connected is multiplexed to separate out - `stdout` and `stderr`. The stream consists of a series of frames, each - containing a header and a payload. + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. - The header contains the information which the stream writes (`stdout` or - `stderr`). It also contains the size of the associated frame encoded in - the last four bytes (`uint32`). + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: @@ -6469,11 +6000,9 @@ paths: - 1: `stdout` - 2: `stderr` - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size - encoded as big endian. + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. - Following the header is the payload, which is the specified number of - bytes of `STREAM_TYPE`. + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: @@ -6485,10 +6014,7 @@ paths: ### Stream format when using a TTY - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream is not multiplexed. The data exchanged over the hijacked - connection is simply the raw data from the process PTY and client's - `stdin`. + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: @@ -6521,28 +6047,21 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,` or `_`. + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. - This is useful for attaching to a container that has started and you - want to output everything since the container started. + This is useful for attaching to a container that has started and you want to output everything since the container started. - If `stream` is also enabled, once all the previous output has been - returned, it will seamlessly transition into streaming current - output. + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" - description: | - Stream attached streams from the time the request was made onwards. + description: "Stream attached streams from the time the request was made onwards" type: "boolean" default: false - name: "stdin" @@ -6593,10 +6112,7 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,`, or `_`. + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." type: "string" - name: "logs" in: "query" @@ -6669,9 +6185,7 @@ paths: type: "string" - name: "condition" in: "query" - description: | - Wait until a container state reaches the given condition, either - 'not-running' (default), 'next-exit', or 'removed'. + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." type: "string" default: "not-running" tags: ["Container"] @@ -6699,9 +6213,7 @@ paths: $ref: "#/definitions/ErrorResponse" examples: application/json: - message: | - You cannot remove a running container: c2ada9df5af8. Stop the - container before attempting removal or force remove + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" 500: description: "server error" schema: @@ -6731,10 +6243,7 @@ paths: /containers/{id}/archive: head: summary: "Get information about files in a container" - description: | - A response header `X-Docker-Container-Path-Stat` is returned, containing - a base64 - encoded JSON object with some filesystem header information - about the path. + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." operationId: "ContainerArchiveInfo" responses: 200: @@ -6742,9 +6251,7 @@ paths: headers: X-Docker-Container-Path-Stat: type: "string" - description: | - A base64 - encoded JSON object with some filesystem header - information about the path + description: "A base64 - encoded JSON object with some filesystem header information about the path" 400: description: "Bad parameter" schema: @@ -6753,10 +6260,7 @@ paths: - type: "object" properties: message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." type: "string" x-nullable: false 404: @@ -6798,10 +6302,7 @@ paths: - type: "object" properties: message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." type: "string" x-nullable: false 404: @@ -6867,24 +6368,16 @@ paths: type: "string" - name: "noOverwriteDirNonDir" in: "query" - description: | - If `1`, `true`, or `True` then it will be an error if unpacking the - given content would cause an existing directory to be replaced with - a non-directory and vice versa. + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." type: "string" - name: "copyUIDGID" in: "query" - description: | - If `1`, `true`, then it will copy UID/GID maps to the dest file or - dir + description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir" type: "string" - name: "inputStream" in: "body" required: true - description: | - The input stream must be a tar archive compressed with one of the - following algorithms: `identity` (no compression), `gzip`, `bzip2`, - or `xz`. + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" @@ -6982,10 +6475,7 @@ paths: - name: "filters" in: "query" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the images list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`[:]`, `` or ``) - `dangling=true` @@ -7201,11 +6691,7 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the list of build cache objects. - - Available filters: - + A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=` - `parent=` @@ -7285,11 +6771,7 @@ paths: required: false - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" type: "string" - name: "platform" in: "query" @@ -7488,9 +6970,7 @@ paths: description: | Push an image to a registry. - If you wish to push an image on to a private registry, that image must - already have a tag which references the registry. For example, - `registry.example.com/myimage:latest`. + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" @@ -7519,11 +6999,7 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" type: "string" required: true tags: ["Image"] @@ -7727,9 +7203,7 @@ paths: /auth: post: summary: "Check auth configuration" - description: | - Validate credentials for a registry and, if available, get an identity - token for accessing the registry without password. + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] @@ -8240,16 +7714,11 @@ paths: get: summary: "Export several images" description: | - Get a tarball containing all images and metadata for several image - repositories. + Get a tarball containing all images and metadata for several image repositories. - For each value of the `names` parameter: if it is a specific name and - tag (e.g. `ubuntu:latest`), then only that image (and its parents) are - returned; if it is an image ID, similarly only that image (and its parents) - are returned and there would be no names referenced in the 'repositories' - file for this image ID. + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. - For details on the format, see the [export image endpoint](#operation/ImageGet). + For details on the format, see [the export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" @@ -8277,7 +7746,7 @@ paths: description: | Load a set of images and tags into a repository. - For details on the format, see the [export image endpoint](#operation/ImageGet). + For details on the format, see [the export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" @@ -8350,16 +7819,12 @@ paths: description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" - description: | - Override the key sequence for detaching a container. Format is - a single character `[a-Z]` or `ctrl-` where `` - is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: - description: | - A list of environment variables in the form `["VAR=value", ...]`. + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." type: "array" items: type: "string" @@ -8374,14 +7839,10 @@ paths: default: false User: type: "string" - description: | - The user, and optionally, group to run the exec process inside - the container. Format is one of: `user`, `user:group`, `uid`, - or `uid:gid`. + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." WorkingDir: type: "string" - description: | - The working directory for the exec process inside the container. + description: "The working directory for the exec process inside the container." example: AttachStdin: false AttachStdout: true @@ -8403,10 +7864,7 @@ paths: /exec/{id}/start: post: summary: "Start an exec instance" - description: | - Starts a previously set up exec instance. If detach is true, this endpoint - returns immediately after starting the command. Otherwise, it sets up an - interactive session with the command. + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." operationId: "ExecStart" consumes: - "application/json" @@ -8447,9 +7905,7 @@ paths: /exec/{id}/resize: post: summary: "Resize an exec instance" - description: | - Resize the TTY session used by an exec instance. This endpoint only works - if `tty` was specified as part of creating and starting the exec instance. + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." operationId: "ExecResize" responses: 201: @@ -8569,8 +8025,7 @@ paths: Warnings: type: "array" x-nullable: false - description: | - Warnings that occurred when fetching the list of volumes. + description: "Warnings that occurred when fetching the list of volumes" items: type: "string" @@ -8639,8 +8094,7 @@ paths: title: "VolumeConfig" properties: Name: - description: | - The new volume's name. If not specified, Docker generates a name. + description: "The new volume's name. If not specified, Docker generates a name." type: "string" x-nullable: false Driver: @@ -8649,9 +8103,7 @@ paths: default: "local" x-nullable: false DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." type: "object" additionalProperties: type: "string" @@ -8765,12 +8217,10 @@ paths: get: summary: "List networks" description: | - Returns a list of networks. For details on the format, see the - [network inspect endpoint](#operation/NetworkInspect). + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). - Note that it uses a different, smaller representation of a network than - inspecting a single network. For example, the list of containers attached - to the network is not propagated in API versions 1.28 and up. + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" @@ -8840,10 +8290,7 @@ paths: - name: "filters" in: "query" description: | - JSON encoded value of the filters (a `map[string][]string`) to process - on the networks list. - - Available filters: + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` @@ -8968,14 +8415,7 @@ paths: description: "The network's name." type: "string" CheckDuplicate: - description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." type: "boolean" Driver: description: "Name of the network driver plugin to use." @@ -8985,14 +8425,10 @@ paths: description: "Restrict external access to the network." type: "boolean" Attachable: - description: | - Globally scoped network is manually attachable by regular - containers from workers in swarm mode. + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." type: "boolean" Ingress: - description: | - Ingress network is the network which provides the routing-mesh - in swarm mode. + description: "Ingress network is the network which provides the routing-mesh in swarm mode." type: "boolean" IPAM: description: "Optional custom IP scheme for the network." @@ -9121,12 +8557,10 @@ paths: properties: Container: type: "string" - description: | - The ID or name of the container to disconnect from the network. + description: "The ID or name of the container to disconnect from the network." Force: type: "boolean" - description: | - Force the container to disconnect from the network. + description: "Force the container to disconnect from the network." tags: ["Network"] /networks/prune: post: @@ -9183,10 +8617,7 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the plugin list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=` - `enable=|` @@ -9202,9 +8633,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission the user has to accept upon installing - the plugin. + description: "Describes a permission the user has to accept upon installing the plugin." type: "object" title: "PluginPrivilegeItem" properties: @@ -9236,9 +8665,7 @@ paths: parameters: - name: "remote" in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" tags: @@ -9249,8 +8676,7 @@ paths: summary: "Install a plugin" operationId: "PluginPull" description: | - Pulls and installs a plugin. After the plugin is installed, it can be - enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: @@ -9279,21 +8705,14 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" type: "string" - name: "body" in: "body" schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. + description: "Describes a permission accepted by the user upon installing the plugin." type: "object" properties: Name: @@ -9338,9 +8757,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" tags: ["Plugin"] @@ -9364,16 +8781,12 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" - name: "force" in: "query" - description: | - Disable the plugin before removing. This may result in issues if the - plugin is in use by a container. + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." type: "boolean" default: false tags: ["Plugin"] @@ -9395,9 +8808,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" - name: "timeout" @@ -9424,9 +8835,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" tags: ["Plugin"] @@ -9448,9 +8857,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" - name: "remote" @@ -9463,21 +8870,14 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" type: "string" - name: "body" in: "body" schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. + description: "Describes a permission accepted by the user upon installing the plugin." type: "object" properties: Name: @@ -9518,9 +8918,7 @@ paths: parameters: - name: "name" in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" - name: "tarContext" @@ -9539,9 +8937,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" responses: @@ -9565,9 +8961,7 @@ paths: parameters: - name: "name" in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." required: true type: "string" - name: "body" @@ -9716,9 +9110,7 @@ paths: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" - description: | - The version number of the node object being updated. This is required - to avoid conflicting writes. + description: "The version number of the node object being updated. This is required to avoid conflicting writes." type: "integer" format: "int64" required: true @@ -9779,35 +9171,20 @@ paths: type: "object" properties: ListenAddr: - description: | - Listen address used for inter-manager communication, as well - as determining the networking interface used for the VXLAN - Tunnel Endpoint (VTEP). This can either be an address/port - combination in the form `192.168.1.1:4567`, or an interface - followed by a port number, like `eth0:4567`. If the port number - is omitted, the default swarm listening port is used. + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." type: "string" AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." type: "string" DataPathAddr: description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | @@ -9818,8 +9195,7 @@ paths: format: "uint32" DefaultAddrPool: description: | - Default Address Pool specifies default subnet pools for global - scope networks. + Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" @@ -9829,8 +9205,7 @@ paths: type: "boolean" SubnetSize: description: | - SubnetSize specifies the subnet size of the networks created - from the default subnet pool. + SubnetSize specifies the subnet size of the networks created from the default subnet pool type: "integer" format: "uint32" Spec: @@ -9877,37 +9252,24 @@ paths: type: "object" properties: ListenAddr: - description: | - Listen address used for inter-manager communication if the node - gets promoted to manager, as well as determining the networking - interface used for the VXLAN Tunnel Endpoint (VTEP). + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." type: "string" AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." type: "string" DataPathAddr: description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same addres - as `AdvertiseAddr` is used. + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: - description: | - Addresses of manager nodes already participating in the swarm. + description: "Addresses of manager nodes already participating in the swarm." type: "array" items: type: "string" @@ -9938,9 +9300,7 @@ paths: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" - description: | - Force leave swarm, even if this is the last manager or that it will - break the cluster. + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." in: "query" type: "boolean" default: false @@ -9972,9 +9332,7 @@ paths: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" - description: | - The version number of the swarm object being updated. This is - required to avoid conflicting writes. + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." type: "integer" format: "int64" required: true @@ -10077,20 +9435,12 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the services list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=` - `label=` - `mode=["replicated"|"global"]` - `name=` - - name: "status" - in: "query" - type: "boolean" - description: | - Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: @@ -10213,12 +9563,7 @@ paths: foo: "bar" - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" type: "string" tags: ["Service"] /services/{id}: @@ -10354,12 +9699,10 @@ paths: - name: "version" in: "query" - description: | - The version number of the service object being updated. This is - required to avoid conflicting writes. - This version number should be the value as currently set on the - service *before* the update. You can find the current version by - calling `GET /services/{id}` + description: "The version number of the service object being updated. + This is required to avoid conflicting writes. + This version number should be the value as currently set on the service *before* the update. + You can find the current version by calling `GET /services/{id}`" required: true type: "integer" - name: "registryAuthFrom" @@ -10379,12 +9722,7 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. + description: "A base64url-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" type: "string" tags: ["Service"] @@ -10392,11 +9730,9 @@ paths: get: summary: "Get service logs" description: | - Get `stdout` and `stderr` logs from a service. See also - [`/containers/{id}/logs`](#operation/ContainerLogs). + Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: @@ -10457,9 +9793,7 @@ paths: default: false - name: "tail" in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." type: "string" default: "all" tags: ["Service"] @@ -10600,10 +9934,7 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the tasks list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=` @@ -10646,11 +9977,9 @@ paths: get: summary: "Get task logs" description: | - Get `stdout` and `stderr` logs from a task. - See also [`/containers/{id}/logs`](#operation/ContainerLogs). + Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: @@ -10711,9 +10040,7 @@ paths: default: false - name: "tail" in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." type: "string" default: "all" tags: ["Task"] @@ -10767,10 +10094,7 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the secrets list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=` - `label= or label==value` @@ -10927,15 +10251,10 @@ paths: in: "body" schema: $ref: "#/definitions/SecretSpec" - description: | - The spec of the secret to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [SecretInspect endpoint](#operation/SecretInspect) response values. + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." - name: "version" in: "query" - description: | - The version number of the secret object being updated. This is - required to avoid conflicting writes. + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." type: "integer" format: "int64" required: true @@ -10974,10 +10293,7 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the configs list. - - Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=` - `label= or label==value` @@ -11121,15 +10437,10 @@ paths: in: "body" schema: $ref: "#/definitions/ConfigSpec" - description: | - The spec of the config to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [ConfigInspect endpoint](#operation/ConfigInspect) response values. + description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." - name: "version" in: "query" - description: | - The version number of the config object being updated. This is - required to avoid conflicting writes. + description: "The version number of the config object being updated. This is required to avoid conflicting writes." type: "integer" format: "int64" required: true @@ -11137,8 +10448,7 @@ paths: /distribution/{name}/json: get: summary: "Get image information from the registry" - description: | - Return image digest and platform information by contacting the registry. + description: "Return image digest and platform information by contacting the registry." operationId: "DistributionInspect" produces: - "application/json" @@ -11153,8 +10463,7 @@ paths: properties: Descriptor: type: "object" - description: | - A descriptor struct containing digest, media type, and size. + description: "A descriptor struct containing digest, media type, and size" properties: MediaType: type: "string" @@ -11169,8 +10478,7 @@ paths: type: "string" Platforms: type: "array" - description: | - An array containing all platforms supported by the image. + description: "An array containing all platforms supported by the image" items: type: "object" properties: @@ -11229,13 +10537,11 @@ paths: post: summary: "Initialize interactive session" description: | - Start a new interactive session with a server. Session allows server to - call back to the client for advanced capabilities. + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking - This endpoint hijacks the HTTP connection to HTTP2 transport that allows - the client to expose gPRC services on that connection. + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: @@ -11245,8 +10551,7 @@ paths: Connection: Upgrade ``` - The Docker daemon responds with a `101 UPGRADED` response follow with - the raw stream: + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go index 9880c632bdd4b..ef1e669c396f1 100644 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -30,7 +30,7 @@ type ContainerAttachConfig struct { // expectation is for the logger endpoints to assemble the chunks using this // metadata. type PartialLogMetaData struct { - Last bool // true if this message is last of a partial + Last bool //true if this message is last of a partial ID string // identifies group of messages comprising a single record Ordinal int // ordering of message in partial group } @@ -73,7 +73,6 @@ type LogSelector struct { // behavior of a backend.ContainerStats() call. type ContainerStatsConfig struct { Stream bool - OneShot bool OutStream io.Writer Version string } diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 9c464b73e25d6..fe90617eec352 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -205,7 +205,7 @@ const ( // BuilderV1 is the first generation builder in docker daemon BuilderV1 BuilderVersion = "1" // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" + BuilderBuildKit = "2" ) // ImageBuildResponse holds information @@ -265,7 +265,7 @@ type ImagePullOptions struct { // if the privilege request fails. type RequestPrivilegeFunc func() (string, error) -// ImagePushOptions holds information to push images. +//ImagePushOptions holds information to push images. type ImagePushOptions ImagePullOptions // ImageRemoveOptions holds parameters to remove images. @@ -363,10 +363,6 @@ type ServiceUpdateOptions struct { // ServiceListOptions holds parameters to list services with. type ServiceListOptions struct { Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool } // ServiceInspectOptions holds parameters related to the "service inspect" diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 3dd133a3a58a4..178e911a7afc2 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -3,7 +3,6 @@ package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" ) // configs holds structs used for internal communication between the @@ -16,7 +15,6 @@ type ContainerCreateConfig struct { Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform AdjustCPUShares bool } diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go index 16dd5019eef88..222d141007ec5 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,7 +1,8 @@ package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index d0c852f84d5c2..1ec9c3728ba8e 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,7 +1,8 @@ package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go index 63381da36749a..f8a606687cb2b 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,7 +1,8 @@ package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- @@ -10,9 +11,7 @@ package container // import "github.com/docker/docker/api/types/container" // swagger:model ContainerTopOKBody type ContainerTopOKBody struct { - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // + // Each process running in the container, where each is process is an array of values corresponding to the titles // Required: true Processes [][]string `json:"Processes"` diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index c10f175ea82f7..33addedf7791d 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,7 +1,8 @@ package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 49e05ae669449..94b6a20e159b6 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,7 +1,8 @@ package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index b8a4b3aa62bc7..c3de3d976a57b 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -7,32 +7,9 @@ import ( "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == "private" -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == "host" -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == "" -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string @@ -145,7 +122,7 @@ func (n NetworkMode) ConnectedContainer() string { return "" } -// UserDefined indicates user-created network +//UserDefined indicates user-created network func (n NetworkMode) UserDefined() string { if n.IsUserDefined() { return string(n) @@ -404,10 +381,9 @@ type HostConfig struct { CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd545c..0000000000000 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 4bc91cffd6e5c..0bd2e1e1853b9 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -66,7 +66,7 @@ func ToJSON(a Args) (string, error) { // then the encoded format will use an older legacy format where the values are a // list of strings, instead of a set. // -// Deprecated: do not use in any new code; use ToJSON instead +// Deprecated: Use ToJSON func ToParamWithVersion(version string, a Args) (string, error) { if a.Len() == 0 { return "", nil @@ -154,7 +154,7 @@ func (args Args) Len() int { func (args Args) MatchKVList(key string, sources map[string]string) bool { fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter + //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } @@ -200,7 +200,7 @@ func (args Args) Match(field, source string) bool { // ExactMatch returns true if the source matches exactly one of the values. func (args Args) ExactMatch(key, source string) bool { fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter + //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true } @@ -213,7 +213,7 @@ func (args Args) ExactMatch(key, source string) bool { // matches exactly the value. func (args Args) UniqueExactMatch(key, source string) bool { fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter + //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go index e302bb0aebbe7..b5a7a0c4901bc 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,7 +1,8 @@ package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 437b184c67b5b..71e97338fdc8a 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,6 +1,7 @@ package network // import "github.com/docker/docker/api/types/network" import ( "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" ) // Address represents an IP address @@ -12,7 +13,7 @@ type Address struct { // IPAM represents IP Address Management type IPAM struct { Driver string - Options map[string]string // Per network IPAM driver options + Options map[string]string //Per network IPAM driver options Config []IPAMConfig } @@ -122,5 +123,5 @@ var acceptedFilters = map[string]bool{ // ValidateFilters validates the list of filter args with the available filters. func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) + return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 53e47084c8d5e..8789ad3b32101 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net" - v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 5bbedfcf682ac..48190c1762f11 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -67,11 +67,10 @@ type ContainerSpec struct { // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - Capabilities []string `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index e45045866a6ea..1fdc9b0436135 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,5 +1,6 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: plugin.proto +// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -37,7 +38,6 @@ type PluginSpec struct { Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` } func (m *PluginSpec) Reset() { *m = PluginSpec{} } @@ -73,13 +73,6 @@ func (m *PluginSpec) GetDisabled() bool { return false } -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. type PluginPrivilege struct { @@ -167,21 +160,6 @@ func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { } i++ } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } return i, nil } @@ -230,6 +208,24 @@ func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -259,12 +255,6 @@ func (m *PluginSpec) Size() (n int) { if m.Disabled { n += 2 } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } return n } @@ -439,35 +429,6 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } } m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlugin(dAtA[iNdEx:]) @@ -734,21 +695,18 @@ var ( func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 9ef169046b4fa..6d63b7783fd9f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -9,7 +9,6 @@ message PluginSpec { string remote = 2; repeated PluginPrivilege privileges = 3; bool disabled = 4; - repeated string env = 5; } // PluginPrivilege describes a permission the user has to accept diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 6eb452d24d122..abf192e759414 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -10,17 +10,6 @@ type Service struct { PreviousSpec *ServiceSpec `json:",omitempty"` Endpoint Endpoint `json:",omitempty"` UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` } // ServiceSpec represents the spec of a service. @@ -43,10 +32,8 @@ type ServiceSpec struct { // ServiceMode represents the mode of a service. type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` } // UpdateState is the state of a service update. @@ -83,32 +70,6 @@ type ReplicatedService struct { // GlobalService is a kind of ServiceMode. type GlobalService struct{} -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - const ( // UpdateFailureActionPause PAUSE UpdateFailureActionPause = "pause" @@ -161,42 +122,3 @@ type UpdateConfig struct { // started, or the new task is started before the old task is shut down. Order string } - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a6f7ab7b5c790..d5a57df5db5a7 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -56,12 +56,6 @@ type Task struct { DesiredState TaskState `json:",omitempty"` NetworksAttachments []NetworkAttachment `json:",omitempty"` GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` } // TaskSpec represents the spec of a task. @@ -91,21 +85,13 @@ type TaskSpec struct { Runtime RuntimeType `json:",omitempty"` } -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. +// Resources represents resources (CPU/Memory). type Resources struct { NanoCPUs int64 `json:",omitempty"` MemoryBytes int64 `json:",omitempty"` GenericResources []GenericResource `json:",omitempty"` } -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - // GenericResource represents a "user defined" resource which can // be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) type GenericResource struct { @@ -133,7 +119,7 @@ type DiscreteGenericResource struct { // ResourceRequirements represents resources requirements. type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` + Limits *Resources `json:",omitempty"` Reservations *Resources `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a6ed75de3e689..a39ffcb7be2d0 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -39,7 +39,6 @@ type ImageInspect struct { Author string Config *container.Config Architecture string - Variant string `json:",omitempty"` Os string OsVersion string `json:",omitempty"` Size int64 @@ -154,7 +153,7 @@ type Info struct { Images int Driver string DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + SystemStatus [][2]string Plugins PluginsInfo MemoryLimit bool SwapLimit bool @@ -175,11 +174,9 @@ type Info struct { SystemTime string LoggingDriver string CgroupDriver string - CgroupVersion string `json:",omitempty"` NEventsListener int KernelVersion string OperatingSystem string - OSVersion string OSType string Architecture string IndexServerAddress string @@ -195,8 +192,8 @@ type Info struct { Labels []string ExperimentalBuild bool ServerVersion string - ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + ClusterStore string + ClusterAdvertise string Runtimes map[string]Runtime DefaultRuntime string Swarm swarm.Info @@ -319,7 +316,7 @@ type ContainerState struct { } // ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API +// is running on. It's only available in Docker Swarm type ContainerNode struct { ID string IPAddress string `json:"IP"` @@ -343,7 +340,7 @@ type ContainerJSONBase struct { HostnamePath string HostsPath string LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Node *ContainerNode `json:",omitempty"` Name string RestartCount int Driver string diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go index c69b08448df4c..b5ee96a500586 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -27,13 +27,10 @@ type Volume struct { Name string `json:"Name"` // The driver specific options used when creating the volume. - // // Required: true Options map[string]string `json:"Options"` - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. // Required: true Scope string `json:"Scope"` diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go index 8538078dd663c..0c3772d3adfd6 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,7 +1,8 @@ package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- @@ -14,9 +15,7 @@ type VolumeCreateBody struct { // Required: true Driver string `json:"Driver"` - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. // Required: true DriverOpts map[string]string `json:"DriverOpts"` @@ -25,7 +24,6 @@ type VolumeCreateBody struct { Labels map[string]string `json:"Labels"` // The new volume's name. If not specified, Docker generates a name. - // // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go index be06179bf488d..45c3c1c9aecb2 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,7 +1,8 @@ package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- @@ -16,8 +17,7 @@ type VolumeListOKBody struct { // Required: true Volumes []*types.Volume `json:"Volumes"` - // Warnings that occurred when fetching the list of volumes. - // + // Warnings that occurred when fetching the list of volumes // Required: true Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 0649a69cc7863..b63d4d6d4986e 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -252,8 +252,7 @@ func (cli *Client) DaemonHost() string { // HTTPClient returns a copy of the HTTP client bound to the server func (cli *Client) HTTPClient() *http.Client { - c := *cli.client - return &c + return &*cli.client } // ParseHostURL parses a url string, validates the string is a host url, and diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 9d0f0dcbf0bac..178ff67409a1e 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly +// +build linux freebsd openbsd netbsd darwin dragonfly package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index b1d5fea5bd507..5b795e0c17cef 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -5,23 +5,20 @@ import ( "encoding/json" "net/url" - "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" - specs "github.com/opencontainers/image-spec/specs-go/v1" ) type configWrapper struct { *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform } // ContainerCreate creates a new container based in the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { var response container.ContainerCreateCreatedBody if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { @@ -33,15 +30,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config hostConfig.AutoRemove = false } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { - return response, err - } - query := url.Values{} - if platform != nil { - query.Set("platform", platforms.Format(*platform)) - } - if containerName != "" { query.Set("name", containerName) } diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index a973de597fdf3..1e7a63a9c0665 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -35,7 +35,6 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 0a6488dde8266..6ef44c77480c1 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -24,19 +24,3 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea osType := getDockerOS(resp.header.Get("Server")) return types.ContainerStats{Body: resp.body, OSType: osType}, err } - -// ContainerStatsOneShot gets a single stat entry from a container. -// It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - query.Set("one-shot", "1") - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 041bc8d49c44c..001c10288141c 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -24,7 +24,8 @@ func (err errConnectionFailed) Error() string { // IsErrConnectionFailed returns true if the error is caused by connection failed. func IsErrConnectionFailed(err error) bool { - return errors.As(err, &errConnectionFailed{}) + _, ok := errors.Cause(err).(errConnectionFailed) + return ok } // ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. @@ -41,9 +42,8 @@ type notFound interface { // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - var e notFound - if errors.As(err, &e) { - return true + if _, ok := err.(notFound); ok { + return ok } return errdefs.IsNotFound(err) } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index f0dc9d9e12f32..6e56538955ee6 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -90,7 +90,6 @@ func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url } if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index e1dc49ef0f66a..e9c9a752f83fe 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -24,7 +24,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } apiPath := cli.getAPIPath(ctx, path, query) - req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err } @@ -40,7 +40,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequest("POST", url, nil) if err != nil { return nil, err } @@ -87,8 +87,6 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto // Server hijacks the connection, error 'connection closed' expected resp, err := clientconn.Do(req) - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index d3336d4106a9b..c2972ea950eb8 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -14,7 +14,7 @@ import ( // It returns the JSON content in the response body. func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { - // Check if the given image name can be resolved + //Check if the given image name can be resolved if _, err := reference.ParseNormalizedNamed(ref); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a4d7505094cd5..4fa8c006b2ea9 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -24,7 +24,6 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } } if optionFilters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) if err != nil { return images, err diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 845580d4a4cdb..49d412ee375c5 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -25,15 +25,16 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im return nil, errors.New("cannot push a digest reference") } + tag := "" name := reference.FamiliarName(ref) - query := url.Values{} - if !options.All { - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() } + query := url.Values{} + query.Set("tag", tag) + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index aabad4a911050..cde64be4b56bc 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -16,7 +16,6 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" volumetypes "github.com/docker/docker/api/types/volume" - specs "github.com/opencontainers/image-spec/specs-go/v1" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. @@ -48,7 +47,7 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) @@ -68,7 +67,6 @@ type ContainerAPIClient interface { ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error ContainerStop(ctx context.Context, container string, timeout *time.Duration) error ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index ed2acb55711dd..7130c1364eb28 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -13,7 +13,6 @@ import ( func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { query := url.Values{} if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index a9af001ef46b5..90f39ec14f92c 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -17,9 +17,9 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want + // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + req, err := cli.buildRequest("HEAD", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } @@ -35,7 +35,7 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { return ping, err } - req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index cf1935e2f5ee5..8285cecd6e176 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -15,7 +15,6 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query := url.Values{} if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return plugins, err diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index ee15a46ed09b7..2610338da616f 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -29,12 +29,12 @@ type serverResponse struct { // head sends an http request to the docker API using the method HEAD. func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) + return cli.sendRequest(ctx, "GET", path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. @@ -43,21 +43,30 @@ func (cli *Client) post(ctx context.Context, path string, query url.Values, obj if err != nil { return serverResponse{}, err } - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) + return cli.sendRequest(ctx, "POST", path, query, body, headers) } func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) + return cli.sendRequest(ctx, "PUT", path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) } type headers map[string][]string @@ -79,7 +88,7 @@ func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { } func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == http.MethodPost || method == http.MethodPut) + expectedPayload := (method == "POST" || method == "PUT") if expectedPayload && body == nil { body = bytes.NewReader([]byte{}) } diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 56bfe55b71018..620fc6cff7579 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index f97ec75a5cb76..64d35e7159826 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -23,10 +23,6 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt query.Set("filters", filterJSON) } - if options.Status { - query.Set("status", "true") - } - resp, err := cli.get(ctx, "/services", query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 942498dde2c74..2380d56382150 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -15,7 +15,6 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query := url.Values{} if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return volumes, err diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go index d0bedac4ad47b..84b54b2794cb6 100644 --- a/vendor/github.com/docker/docker/daemon/logger/factory.go +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -7,7 +7,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/plugingetter" - units "github.com/docker/go-units" + "github.com/docker/go-units" "github.com/pkg/errors" ) @@ -143,10 +143,6 @@ func ValidateLogOpts(name string, cfg map[string]string) error { } } - if err := validateExternal(cfg); err != nil { - return err - } - if !factory.driverRegistered(name) { return fmt.Errorf("logger: no log driver named '%s' is registered", name) } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index e66a6b8266f43..bbb8eeb7ec3a9 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" - units "github.com/docker/go-units" + "github.com/docker/go-units" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -156,7 +156,6 @@ func ValidateLogOpt(cfg map[string]string) error { case "max-size": case "compress": case "labels": - case "labels-regex": case "env": case "env-regex": case "tag": diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go index 050900197f04f..74be8e7da0a19 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go @@ -21,7 +21,5 @@ func (jl *JSONLog) Reset() { jl.Log = "" jl.Stream = "" jl.Created = time.Time{} - for k := range jl.Attrs { - delete(jl.Attrs, k) - } + jl.Attrs = make(map[string]string) } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go index cc4649903adcd..12f676bb1a31c 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -60,65 +60,35 @@ func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, erro return msg, nil } -type decoder struct { - rdr io.Reader - dec *json.Decoder - jl *jsonlog.JSONLog -} - -func (d *decoder) Reset(rdr io.Reader) { - d.rdr = rdr - d.dec = nil - if d.jl != nil { - d.jl.Reset() - } -} - -func (d *decoder) Close() { - d.dec = nil - d.rdr = nil - d.jl = nil -} - -func (d *decoder) Decode() (msg *logger.Message, err error) { - if d.dec == nil { - d.dec = json.NewDecoder(d.rdr) - } - if d.jl == nil { - d.jl = &jsonlog.JSONLog{} - } - for retries := 0; retries < maxJSONDecodeRetry; retries++ { - msg, err = decodeLogLine(d.dec, d.jl) - if err == nil || err == io.EOF { - break - } - - logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") - // try again, could be due to a an incomplete json object as we read - if _, ok := err.(*json.SyntaxError); ok { - d.dec = json.NewDecoder(d.rdr) - continue - } - - // io.ErrUnexpectedEOF is returned from json.Decoder when there is - // remaining data in the parser's buffer while an io.EOF occurs. - // If the json logger writes a partial json log entry to the disk - // while at the same time the decoder tries to decode it, the race condition happens. - if err == io.ErrUnexpectedEOF { - d.rdr = io.MultiReader(d.dec.Buffered(), d.rdr) - d.dec = json.NewDecoder(d.rdr) - continue - } - } - return msg, err -} - // decodeFunc is used to create a decoder for the log file reader -func decodeFunc(rdr io.Reader) loggerutils.Decoder { - return &decoder{ - rdr: rdr, - dec: nil, - jl: nil, +func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { + l := &jsonlog.JSONLog{} + dec := json.NewDecoder(rdr) + return func() (msg *logger.Message, err error) { + for retries := 0; retries < maxJSONDecodeRetry; retries++ { + msg, err = decodeLogLine(dec, l) + if err == nil || err == io.EOF { + break + } + + logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") + // try again, could be due to a an incomplete json object as we read + if _, ok := err.(*json.SyntaxError); ok { + dec = json.NewDecoder(rdr) + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF { + reader := io.MultiReader(dec.Buffered(), rdr) + dec = json.NewDecoder(reader) + continue + } + } + return msg, err } } diff --git a/vendor/github.com/docker/docker/daemon/logger/log_cache_opts.go b/vendor/github.com/docker/docker/daemon/logger/log_cache_opts.go deleted file mode 100644 index 8d09c489edfb0..0000000000000 --- a/vendor/github.com/docker/docker/daemon/logger/log_cache_opts.go +++ /dev/null @@ -1,29 +0,0 @@ -package logger - -var externalValidators []LogOptValidator - -// RegisterExternalValidator adds the validator to the list of external validators. -// External validators are used by packages outside this package that need to add their own validation logic. -// This should only be called on package initialization. -func RegisterExternalValidator(v LogOptValidator) { - externalValidators = append(externalValidators, v) -} - -// AddBuiltinLogOpts updates the list of built-in log opts. This allows other packages to supplement additional log options -// without having to register an actual log driver. This is used by things that are more proxy log drivers and should -// not be exposed as a usable log driver to the API. -// This should only be called on package initialization. -func AddBuiltinLogOpts(opts map[string]bool) { - for k, v := range opts { - builtInLogOpts[k] = v - } -} - -func validateExternal(cfg map[string]string) error { - for _, v := range externalValidators { - if err := v(cfg); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index 2b0119210231c..0f0e8f7bbea74 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -89,25 +89,12 @@ type LogFile struct { filesRefCounter refCounter // keep reference-counted of decompressed files notifyRotate *pubsub.Publisher marshal logger.MarshalFunc - createDecoder MakeDecoderFn + createDecoder makeDecoderFunc getTailReader GetTailReaderFunc perms os.FileMode } -// MakeDecoderFn creates a decoder -type MakeDecoderFn func(rdr io.Reader) Decoder - -// Decoder is for reading logs -// It is created by the log reader by calling the `MakeDecoderFunc` -type Decoder interface { - // Reset resets the decoder - // Reset is called for certain events, such as log rotations - Reset(io.Reader) - // Decode decodes the next log messeage from the stream - Decode() (*logger.Message, error) - // Close signals to the decoder that it can release whatever resources it was using. - Close() -} +type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) // SizeReaderAt defines a ReaderAt that also reports its size. // This is used for tailing log files. @@ -123,13 +110,13 @@ type SizeReaderAt interface { type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error) // NewLogFile creates new LogFile -func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc MakeDecoderFn, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) { +func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) { log, err := openFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) if err != nil { return nil, err } - size, err := log.Seek(0, io.SeekEnd) + size, err := log.Seek(0, os.SEEK_END) if err != nil { return nil, err } @@ -330,9 +317,6 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) } defer currentFile.Close() - dec := w.createDecoder(nil) - defer dec.Close() - currentChunk, err := newSectionReader(currentFile) if err != nil { w.mu.RUnlock() @@ -378,7 +362,7 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) readers = append(readers, currentChunk) } - tailFiles(readers, watcher, dec, w.getTailReader, config) + tailFiles(readers, watcher, w.createDecoder, w.getTailReader, config) closeFiles() w.mu.RLock() @@ -392,7 +376,7 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) notifyRotate := w.notifyRotate.Subscribe() defer w.notifyRotate.Evict(notifyRotate) - followLogs(currentFile, watcher, notifyRotate, dec, config.Since, config.Until) + followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) } func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { @@ -431,7 +415,7 @@ func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, }) if err != nil { - if !errors.Is(err, os.ErrNotExist) { + if !os.IsNotExist(errors.Cause(err)) { return nil, errors.Wrap(err, "error getting reference to decompressed log file") } continue @@ -491,14 +475,14 @@ func decompressfile(fileName, destFileName string, since time.Time) (*os.File, e func newSectionReader(f *os.File) (*io.SectionReader, error) { // seek to the end to get the size // we'll leave this at the end of the file since section reader does not advance the reader - size, err := f.Seek(0, io.SeekEnd) + size, err := f.Seek(0, os.SEEK_END) if err != nil { return nil, errors.Wrap(err, "error getting current file size") } return io.NewSectionReader(f, 0, size), nil } -func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, config logger.ReadConfig) { +func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, getTailReader GetTailReaderFunc, config logger.ReadConfig) { nLines := config.Tail ctx, cancel := context.WithCancel(context.Background()) @@ -531,12 +515,11 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge } rdr := io.MultiReader(readers...) - dec.Reset(rdr) - + decodeLogLine := createDecoder(rdr) for { - msg, err := dec.Decode() + msg, err := decodeLogLine() if err != nil { - if !errors.Is(err, io.EOF) { + if errors.Cause(err) != io.EOF { watcher.Err <- err } return @@ -555,8 +538,8 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge } } -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, dec Decoder, since, until time.Time) { - dec.Reset(f) +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { + decodeLogLine := createDecoder(f) name := f.Name() fileWatcher, err := watchFile(name) @@ -587,7 +570,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int if err := fileWatcher.Add(name); err != nil { return err } - dec.Reset(f) + decodeLogLine = createDecoder(f) return nil } @@ -598,7 +581,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int case e := <-fileWatcher.Events(): switch e.Op { case fsnotify.Write: - dec.Reset(f) + decodeLogLine = createDecoder(f) return nil case fsnotify.Rename, fsnotify.Remove: select { @@ -636,7 +619,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int oldSize := int64(-1) handleDecodeErr := func(err error) error { - if !errors.Is(err, io.EOF) { + if errors.Cause(err) != io.EOF { return err } @@ -668,7 +651,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int // main loop for { - msg, err := dec.Decode() + msg, err := decodeLogLine() if err != nil { if err := handleDecodeErr(err); err != nil { if err == errDone { diff --git a/vendor/github.com/docker/docker/daemon/logger/loginfo.go b/vendor/github.com/docker/docker/daemon/logger/loginfo.go index 947abd97af475..4c48235f5c73b 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loginfo.go +++ b/vendor/github.com/docker/docker/daemon/logger/loginfo.go @@ -41,22 +41,6 @@ func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string } } - labelsRegex, ok := info.Config["labels-regex"] - if ok && len(labels) > 0 { - re, err := regexp.Compile(labelsRegex) - if err != nil { - return nil, err - } - for k, v := range info.ContainerLabels { - if re.MatchString(k) { - if keyMod != nil { - k = keyMod(k) - } - extra[k] = v - } - } - } - envMapping := make(map[string]string) for _, e := range info.ContainerEnv { if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { diff --git a/vendor/github.com/docker/docker/daemon/logger/metrics.go b/vendor/github.com/docker/docker/daemon/logger/metrics.go index 05b557f8625ed..b7dfd38ec2061 100644 --- a/vendor/github.com/docker/docker/daemon/logger/metrics.go +++ b/vendor/github.com/docker/docker/daemon/logger/metrics.go @@ -1,7 +1,7 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( - metrics "github.com/docker/go-metrics" + "github.com/docker/go-metrics" ) var ( diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go index 01ef057981f02..6161d4ab73a89 100644 --- a/vendor/github.com/docker/docker/pkg/filenotify/poller.go +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -146,18 +146,9 @@ func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { // upon finding changes to a file or errors, sendEvent/sendErr is called func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { defer f.Close() - - timer := time.NewTimer(watchWaitTime) - if !timer.Stop() { - <-timer.C - } - defer timer.Stop() - for { - timer.Reset(watchWaitTime) - select { - case <-timer.C: + case <-time.After(watchWaitTime): case <-chClose: logrus.Debugf("watch for %s closed", f.Name()) return diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 87514b643d745..d4bbf3c9dcaf6 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -128,9 +128,8 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() if bp.bufLen == 0 { if bp.closeErr != nil { - err := bp.closeErr bp.mu.Unlock() - return 0, err + return 0, bp.closeErr } bp.wait.Wait() if bp.bufLen == 0 && bp.closeErr != nil { diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index cf8d04b1b2019..a68b566cea2c8 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -7,8 +7,8 @@ import ( "strings" "time" - units "github.com/docker/go-units" - "github.com/moby/term" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" "github.com/morikuni/aec" ) @@ -139,13 +139,13 @@ type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated + ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated + ErrorMessage string `json:"error,omitempty"` //deprecated // Aux contains out-of-band data, such as digests for push signing and image id after building. Aux *json.RawMessage `json:"aux,omitempty"` } @@ -177,8 +177,8 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if isTerminal && jm.Stream == "" && jm.Progress != nil { clearLine(out) endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.TimeNano != 0 { @@ -194,7 +194,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { } if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated + } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { fmt.Fprintf(out, "%s%s", jm.Stream, endl) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index 2371e92101014..28c06ff6930e2 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -254,9 +254,6 @@ func get(name string) (*Plugin, error) { // Get returns the plugin given the specified name and requested implementation. func Get(name, imp string) (*Plugin, error) { - if name == "" { - return nil, errors.New("Unable to find plugin without name") - } pl, err := get(name) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go index 6c66cad66221d..9cb13335a8b80 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -27,7 +27,7 @@ func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { if !strings.HasPrefix(path, "/") { path = "/" + path } - req, err := http.NewRequest(http.MethodPost, path, data) + req, err := http.NewRequest("POST", path, data) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 3792c67a9e454..46339c282f115 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -62,23 +62,23 @@ type bufferPool struct { func newBufferPoolWithSize(size int) *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { s := make([]byte, size); return &s }, + New: func() interface{} { return make([]byte, size) }, }, } } -func (bp *bufferPool) Get() *[]byte { - return bp.pool.Get().(*[]byte) +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) } -func (bp *bufferPool) Put(b *[]byte) { +func (bp *bufferPool) Put(b []byte) { bp.pool.Put(b) } // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { buf := buffer32KPool.Get() - written, err = io.CopyBuffer(dst, src, *buf) + written, err = io.CopyBuffer(dst, src, buf) buffer32KPool.Put(buf) return } diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go index 07450a2d7084b..7ca07dc640af3 100644 --- a/vendor/github.com/docker/docker/pkg/progress/progressreader.go +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -34,7 +34,7 @@ func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action stri func (p *Reader) Read(buf []byte) (n int, err error) { read, err := p.in.Read(buf) p.current += int64(read) - updateEvery := int64(1024 * 512) // 512kB + updateEvery := int64(1024 * 512) //512kB if p.size > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int64(0.01 * float64(p.size)); increment < updateEvery { diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go index 32b2f18925900..76033ed9e47a9 100644 --- a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -107,12 +107,9 @@ func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg // send under a select as to not block if the receiver is unavailable if p.timeout > 0 { - timeout := time.NewTimer(p.timeout) - defer timeout.Stop() - select { case sub <- v: - case <-timeout.C: + case <-time.After(p.timeout): } return } diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go index 90f5a7f360f22..c82fe603f663c 100644 --- a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -18,7 +18,7 @@ var eol = []byte("\n") // ErrNonPositiveLinesNumber is an error returned if the lines number was negative. var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") -// TailFile returns last n lines of the passed in file. +//TailFile returns last n lines of the passed in file. func TailFile(f *os.File, n int) ([][]byte, error) { size, err := f.Seek(0, io.SeekEnd) if err != nil { @@ -127,6 +127,7 @@ type scanner struct { delim []byte err error idx int + done bool } func (s *scanner) Start(ctx context.Context) int64 { diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go similarity index 94% rename from vendor/github.com/moby/term/ascii.go rename to vendor/github.com/docker/docker/pkg/term/ascii.go index 55873c0556c90..87bca8d4acdb1 100644 --- a/vendor/github.com/moby/term/ascii.go +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -1,4 +1,4 @@ -package term +package term // import "github.com/docker/docker/pkg/term" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go new file mode 100644 index 0000000000000..da733e58484c3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -0,0 +1,78 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + if len(r.escapeKeys) == 0 { + return nr, err + } + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go similarity index 82% rename from vendor/github.com/moby/term/tc.go rename to vendor/github.com/docker/docker/pkg/term/tc.go index 87d263256f177..01bcaa8abb187 100644 --- a/vendor/github.com/moby/term/tc.go +++ b/vendor/github.com/docker/docker/pkg/term/tc.go @@ -1,6 +1,6 @@ -// +build !windows,!illumos,!solaris +// +build !windows -package term +package term // import "github.com/docker/docker/pkg/term" import ( "syscall" diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go similarity index 97% rename from vendor/github.com/moby/term/term.go rename to vendor/github.com/docker/docker/pkg/term/term.go index c3b83635db96a..0589a955194bd 100644 --- a/vendor/github.com/moby/term/term.go +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -1,8 +1,8 @@ -// +build !windows,!illumos,!solaris +// +build !windows // Package term provides structures and helper functions to work with // terminal (state, sizes). -package term +package term // import "github.com/docker/docker/pkg/term" import ( "errors" diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go similarity index 67% rename from vendor/github.com/moby/term/term_windows.go rename to vendor/github.com/docker/docker/pkg/term/term_windows.go index 2e512759e5c7c..a3c3db1315740 100644 --- a/vendor/github.com/moby/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -1,12 +1,13 @@ -package term +package term // import "github.com/docker/docker/pkg/term" import ( "io" "os" "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE - windowsconsole "github.com/moby/term/windows" - "golang.org/x/sys/windows" + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. @@ -27,42 +28,37 @@ var vtInputSupported bool func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. - var ( - emulateStdin, emulateStdout, emulateStderr bool - - mode uint32 - ) - - fd := windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true } // Unconditionally set the console mode back even on failure because SetConsoleMode // remembers invalid bits on input handles. - _ = windows.SetConsoleMode(fd, mode) + winterm.SetConsoleMode(fd, mode) } - fd = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } - fd = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } @@ -71,19 +67,19 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // go-ansiterm hasn't switch to x/sys/windows. // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { - stdIn = windowsconsole.NewAnsiReader(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { - stdOut = windowsconsole.NewAnsiWriter(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { - stdErr = windowsconsole.NewAnsiWriter(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } @@ -98,8 +94,8 @@ func GetFdInfo(in interface{}) (uintptr, bool) { // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { return nil, err } @@ -113,23 +109,20 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil + return windowsconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) + return winterm.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - var mode uint32 - - if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { - return nil, err + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e } return &State{mode: mode}, nil @@ -139,9 +132,9 @@ func SaveState(fd uintptr) (*State, error) { // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode - mode &^= windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT - err := windows.SetConsoleMode(windows.Handle(fd), mode) + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) if err != nil { return err } @@ -176,7 +169,7 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) { // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) return state, err } @@ -195,21 +188,21 @@ func MakeRaw(fd uintptr) (*State, error) { // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT } - err = windows.SetConsoleMode(windows.Handle(fd), mode) + err = winterm.SetConsoleMode(fd, mode) if err != nil { return nil, err } @@ -222,7 +215,7 @@ func restoreAtInterrupt(fd uintptr, state *State) { go func() { _ = <-sigchan - _ = RestoreTerminal(fd, state) + RestoreTerminal(fd, state) os.Exit(0) }() } diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go similarity index 95% rename from vendor/github.com/moby/term/termios_bsd.go rename to vendor/github.com/docker/docker/pkg/term/termios_bsd.go index acdddb2e9f392..48b16f52039ca 100644 --- a/vendor/github.com/moby/term/termios_bsd.go +++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go @@ -1,6 +1,6 @@ // +build darwin freebsd openbsd netbsd -package term +package term // import "github.com/docker/docker/pkg/term" import ( "unsafe" diff --git a/vendor/github.com/moby/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go similarity index 94% rename from vendor/github.com/moby/term/termios_linux.go rename to vendor/github.com/docker/docker/pkg/term/termios_linux.go index 0f21abcc2fa13..6d4c63fdb75e5 100644 --- a/vendor/github.com/moby/term/termios_linux.go +++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -1,4 +1,4 @@ -package term +package term // import "github.com/docker/docker/pkg/term" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go similarity index 87% rename from vendor/github.com/moby/term/windows/ansi_reader.go rename to vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go index 155251521b09a..1d7c452cc845b 100644 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -1,6 +1,6 @@ // +build windows -package windowsconsole +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "bytes" @@ -31,6 +31,7 @@ type ansiReader struct { // NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a // Windows console input handle. func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, @@ -58,6 +59,8 @@ func (ar *ansiReader) Read(p []byte) (int, error) { // Previously read bytes exist, read as much as we can and return if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + originalLength := len(ar.buffer) copiedLength := copy(p, ar.buffer) @@ -67,14 +70,16 @@ func (ar *ansiReader) Read(p []byte) (int, error) { ar.buffer = ar.buffer[copiedLength:] } + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) return copiedLength, nil } // Read and translate key events - events, err := readInputEvents(ar, len(p)) + events, err := readInputEvents(ar.fd, len(p)) if err != nil { return 0, err } else if len(events) == 0 { + logger.Debug("No input events detected") return 0, nil } @@ -82,9 +87,11 @@ func (ar *ansiReader) Read(p []byte) (int, error) { // Save excess bytes and right-size keyBytes if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") return 0, nil } @@ -93,11 +100,13 @@ func (ar *ansiReader) Read(p []byte) (int, error) { return 0, errors.New("unexpected copy length encountered") } + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) return copiedLength, nil } // readInputEvents polls until at least one event is available. -func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting @@ -109,23 +118,25 @@ func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, erro } else if countRecords == 0 { countRecords = 1 } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { - err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) + err = winterm.ReadConsoleInput(fd, events, &nEvents) if err != nil { return nil, err } } // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) return events[:nEvents], nil } diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go similarity index 79% rename from vendor/github.com/moby/term/windows/ansi_writer.go rename to vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go index ccb5ef07757f3..7799a03fc59ed 100644 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -1,6 +1,6 @@ // +build windows -package windowsconsole +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "io" @@ -24,6 +24,7 @@ type ansiWriter struct { // NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a // Windows console output handle. func NewAnsiWriter(nFile int) io.Writer { + initLogger() file, fd := winterm.GetStdFile(nFile) info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { @@ -31,8 +32,9 @@ func NewAnsiWriter(nFile int) io.Writer { } parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) - return &ansiWriter{ + aw := &ansiWriter{ file: file, fd: fd, infoReset: info, @@ -40,6 +42,10 @@ func NewAnsiWriter(nFile int) io.Writer { escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw } func (aw *ansiWriter) Fd() uintptr { @@ -52,5 +58,7 @@ func (aw *ansiWriter) Write(p []byte) (total int, err error) { return 0, nil } + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) return aw.parser.Parse(p) } diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go similarity index 63% rename from vendor/github.com/moby/term/windows/console.go rename to vendor/github.com/docker/docker/pkg/term/windows/console.go index 01fdc0f2a1d7a..5274019758050 100644 --- a/vendor/github.com/moby/term/windows/console.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -1,11 +1,11 @@ // +build windows -package windowsconsole +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "os" - "golang.org/x/sys/windows" + "github.com/Azure/go-ansiterm/winterm" ) // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. @@ -22,18 +22,14 @@ func GetHandleInfo(in interface{}) (uintptr, bool) { if file, ok := in.(*os.File); ok { inFd = file.Fd() - isTerminal = isConsole(inFd) + isTerminal = IsConsole(inFd) } return inFd, isTerminal } // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/crypto/ssh/terminal.IsTerminal() -var IsConsole = isConsole - -func isConsole(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil } diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 0000000000000..3e5593ca6a683 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "io/ioutil" + "os" + "sync" + + "github.com/Azure/go-ansiterm" + "github.com/sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go similarity index 91% rename from vendor/github.com/moby/term/winsize.go rename to vendor/github.com/docker/docker/pkg/term/winsize.go index 1ef98d59961e0..a19663ad834bf 100644 --- a/vendor/github.com/moby/term/winsize.go +++ b/vendor/github.com/docker/docker/pkg/term/winsize.go @@ -1,6 +1,6 @@ // +build !windows -package term +package term // import "github.com/docker/docker/pkg/term" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 50d56ffbf07e5..52b111d5f36ef 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -1,5 +1,5 @@ [![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) [![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) [![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) [![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) @@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu Raw Result (easyjson requires static code generation) -| | ns/op | allocation bytes | allocation times | -| --- | --- | --- | --- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | -Always benchmark with your own workload. +Always benchmark with your own workload. The result depends heavily on the data input. # Usage @@ -41,10 +41,10 @@ import "encoding/json" json.Marshal(&data) ``` -with +with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Marshal(&data) @@ -60,7 +60,7 @@ json.Unmarshal(input, &data) with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Unmarshal(input, &data) @@ -78,10 +78,10 @@ go get github.com/json-iterator/go Contributors -* [thockin](https://github.com/thockin) -* [mattn](https://github.com/mattn) -* [cch123](https://github.com/cch123) -* [Oleg Shaldybin](https://github.com/olegshaldybin) -* [Jason Toffaletti](https://github.com/toffaletti) +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go index a4b93c78c8221..1f12f6612de98 100644 --- a/vendor/github.com/json-iterator/go/any_str.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 { flag := 1 startPos := 0 - endPos := 0 if any.val[0] == '+' || any.val[0] == '-' { startPos = 1 } @@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 { flag = -1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 @@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 { } startPos := 0 - endPos := 0 if any.val[0] == '-' { return 0 @@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 { startPos = 1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go index 8c58fcba5922f..2adcdc3b790e5 100644 --- a/vendor/github.com/json-iterator/go/config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) iter.Read() - if iter.Error != nil { + if iter.Error != nil && iter.Error != io.EOF { stream.WriteRaw("null") } else { - cfg.ReturnIterator(iter) stream.WriteRaw(string(rawMessage)) } }, func(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index b65137114f65b..58ee89c849e7b 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } @@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 80320cd64341f..74a97bfe5abfb 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole fieldNames = []string{tagProvidedFieldName} } // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' if isNotExported { fieldNames = []string{} } diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 9e2b623fe8d61..5829671301353 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { return decoder } } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + switch typ.Kind() { case reflect.String: return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyDecoder{decoderOfType(ctx, typ)} default: - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} } } @@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { return encoder } } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + switch typ.Kind() { case reflect.String: return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyEncoder{encoderOfType(ctx, typ)} default: - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } if typ.Kind() == reflect.Interface { return &dynamicMapKeyEncoder{ctx, typ} } @@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if c == '}' { return } - if c != '"' { - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return - } iter.unreadByte() key := decoder.keyType.UnsafeNew() decoder.keyDecoder.Decode(key, iter) diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go index 43ec71d6dadf3..fa71f47489121 100644 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -2,7 +2,6 @@ package jsoniter import ( "github.com/modern-go/reflect2" - "reflect" "unsafe" ) @@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { ptrType := typ.(*reflect2.UnsafePtrType) elemType := ptrType.Elem() decoder := decoderOfType(ctx, elemType) - if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { - return &dereferenceDecoder{elemType, decoder} - } return &OptionalDecoder{elemType, decoder} } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 5ad5cc561af2c..d7eb0eb5caa85 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } if c != '}' { @@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go index 17662fdedcb55..23d8a3ad6b126 100644 --- a/vendor/github.com/json-iterator/go/stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -103,14 +103,14 @@ func (stream *Stream) Flush() error { if stream.Error != nil { return stream.Error } - n, err := stream.out.Write(stream.buf) + _, err := stream.out.Write(stream.buf) if err != nil { if stream.Error == nil { stream.Error = err } return err } - stream.buf = stream.buf[n:] + stream.buf = stream.buf[:0] return nil } @@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() { func (stream *Stream) WriteMore() { stream.writeByte(',') stream.writeIndention(0) - stream.Flush() } // WriteArrayStart write [ with possible indention diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore deleted file mode 100644 index b0747ff010a7f..0000000000000 --- a/vendor/github.com/moby/term/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -profile.out -# support running go modules in vendor mode for local development -vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE deleted file mode 100644 index 6d8d58fb676bb..0000000000000 --- a/vendor/github.com/moby/term/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md deleted file mode 100644 index 0ce92cc339800..0000000000000 --- a/vendor/github.com/moby/term/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# term - utilities for dealing with terminals - -![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) - -term provides structures and helper functions to work with terminal (state, sizes). - -#### Using term - -```go -package main - -import ( - "log" - "os" - - "github.com/moby/term" -) - -func main() { - fd := os.Stdin.Fd() - if term.IsTerminal(fd) { - ws, err := term.GetWinsize(fd) - if err != nil { - log.Fatalf("term.GetWinsize: %s", err) - } - log.Printf("%d:%d\n", ws.Height, ws.Width) - } -} -``` - -## Contributing - -Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. - -## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/go.mod b/vendor/github.com/moby/term/go.mod deleted file mode 100644 index bf55ebee10a9d..0000000000000 --- a/vendor/github.com/moby/term/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/moby/term - -go 1.13 - -require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 - github.com/creack/pty v1.1.9 - github.com/google/go-cmp v0.4.0 - github.com/pkg/errors v0.9.1 // indirect - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd - gotest.tools/v3 v3.0.2 -) diff --git a/vendor/github.com/moby/term/go.sum b/vendor/github.com/moby/term/go.sum deleted file mode 100644 index 0d34962a0a84b..0000000000000 --- a/vendor/github.com/moby/term/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go deleted file mode 100644 index c47756b89a9cf..0000000000000 --- a/vendor/github.com/moby/term/proxy.go +++ /dev/null @@ -1,88 +0,0 @@ -package term - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader - buf []byte -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (n int, err error) { - if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { - return 0, EscapeError{} - } - - if len(r.buf) > 0 { - n = copy(buf, r.buf) - r.buf = r.buf[n:] - } - - nr, err := r.r.Read(buf[n:]) - n += nr - if len(r.escapeKeys) == 0 { - return n, err - } - - for i := 0; i < n; i++ { - if buf[i] == r.escapeKeys[r.escapeKeyPos] { - r.escapeKeyPos++ - - // Check if the full escape sequence is matched. - if r.escapeKeyPos == len(r.escapeKeys) { - n = i + 1 - r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, EscapeError{} - } - continue - } - - // If we need to prepend a partial escape sequence from the previous - // read, make sure the new buffer size doesn't exceed len(buf). - // Otherwise, preserve any extra data in a buffer for the next read. - if i < r.escapeKeyPos { - preserve := make([]byte, 0, r.escapeKeyPos+n) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf[:n]...) - n = copy(buf, preserve) - i += r.escapeKeyPos - r.buf = append(r.buf, preserve[n:]...) - } - r.escapeKeyPos = 0 - } - - // If we're in the middle of reading an escape sequence, make sure we don't - // let the caller read it. If later on we find that this is not the escape - // sequence, we'll prepend it back to buf. - n -= r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, err -} diff --git a/vendor/github.com/moby/term/tc_illumos.go b/vendor/github.com/moby/term/tc_illumos.go deleted file mode 100644 index a12c35cc60933..0000000000000 --- a/vendor/github.com/moby/term/tc_illumos.go +++ /dev/null @@ -1,25 +0,0 @@ -//+build solaris illumos - -package term - -import ( - "golang.org/x/sys/unix" - "syscall" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - - termios, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return syscall.EINVAL - } - p = (*Termios)(termios) - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - if err := unix.IoctlSetTermios(int(fd), setTermios, (*unix.Termios)(p)); err != nil { - return syscall.EINVAL - } - return 0 -} diff --git a/vendor/github.com/moby/term/term_illumos.go b/vendor/github.com/moby/term/term_illumos.go deleted file mode 100644 index b230b31d961f3..0000000000000 --- a/vendor/github.com/moby/term/term_illumos.go +++ /dev/null @@ -1,124 +0,0 @@ -//+build solaris illumos - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/moby/term/termios_illumos.go b/vendor/github.com/moby/term/termios_illumos.go deleted file mode 100644 index a79a56ade3239..0000000000000 --- a/vendor/github.com/moby/term/termios_illumos.go +++ /dev/null @@ -1,41 +0,0 @@ -//+build solaris illumos - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - termios, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - - var oldState State - oldState.termios = Termios(*termios) - - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go deleted file mode 100644 index 54265fffaffd3..0000000000000 --- a/vendor/github.com/moby/term/windows/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index a18d2c7b8412c..13b36464d976c 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -232,9 +232,9 @@ type API interface { // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns all the unique label names present in the block in sorted order. - LabelNames(ctx context.Context) ([]string, Warnings, error) + LabelNames(ctx context.Context, startTime time.Time, endTime time.Time) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label. - LabelValues(ctx context.Context, label string) (model.LabelValues, Warnings, error) + LabelValues(ctx context.Context, label string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) // Query performs a query for the given time. Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) // QueryRange performs a query for the given range. @@ -676,8 +676,14 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, json.Unmarshal(body, &res) } -func (h *httpAPI) LabelNames(ctx context.Context) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, startTime time.Time, endTime time.Time) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) + q := u.Query() + q.Set("start", formatTime(startTime)) + q.Set("end", formatTime(endTime)) + + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, err @@ -690,8 +696,14 @@ func (h *httpAPI) LabelNames(ctx context.Context) ([]string, Warnings, error) { return labelNames, w, json.Unmarshal(body, &labelNames) } -func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, Warnings, error) { +func (h *httpAPI) LabelValues(ctx context.Context, label string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) + q := u.Query() + q.Set("start", formatTime(startTime)) + q.Set("end", formatTime(endTime)) + + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, err diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 3a5aac700e794..d4ea301a33ce1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -607,7 +607,7 @@ func NewConstHistogram( } // MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. +// NewConstHistogram would have returned an error. func MustNewConstHistogram( desc *Desc, count uint64, diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index b978dfc50d2f3..9320176ca24f4 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -150,6 +150,17 @@ else $(GO) get $(GOOPTS) -t ./... endif +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 2e02215528f09..31d42f7124c36 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,11 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build linux + package procfs import ( "bufio" "bytes" + "errors" + "regexp" "strconv" "strings" @@ -52,6 +56,11 @@ type CPUInfo struct { PowerManagement string } +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + // CPUInfo returns information about current system CPUs. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) CPUInfo() ([]CPUInfo, error) { @@ -62,14 +71,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) { return parseCPUInfo(data) } -// parseCPUInfo parses data from /proc/cpuinfo -func parseCPUInfo(info []byte) ([]CPUInfo, error) { - cpuinfo := []CPUInfo{} - i := -1 +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + for scanner.Scan() { line := scanner.Text() - if strings.TrimSpace(line) == "" { + if !strings.Contains(line, ":") { continue } field := strings.SplitN(line, ": ", 2) @@ -82,7 +103,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) { return nil, err } cpuinfo[i].Processor = uint(v) - case "vendor_id": + case "vendor", "vendor_id": cpuinfo[i].VendorID = field[1] case "cpu family": cpuinfo[i].CPUFamily = field[1] @@ -163,5 +184,237 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) { } } return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, errors.New("Invalid line found in cpuinfo: " + line) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go new file mode 100644 index 0000000000000..83555077069a0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go new file mode 100644 index 0000000000000..4f5d172a35659 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_default.go new file mode 100644 index 0000000000000..d5bedf97f31c1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_default.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go new file mode 100644 index 0000000000000..22d93f8ef0c24 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go new file mode 100644 index 0000000000000..22d93f8ef0c24 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go new file mode 100644 index 0000000000000..22d93f8ef0c24 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go new file mode 100644 index 0000000000000..22d93f8ef0c24 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go new file mode 100644 index 0000000000000..64aee9c63c078 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go new file mode 100644 index 0000000000000..64aee9c63c078 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 0000000000000..26814eebaaf3b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 45a732155828c..868c8573d9254 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -173,6 +173,283 @@ Lines: 1 411605849 93680043 79 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps +Lines: 252 +00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager +Size: 8900 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2952 kB +Pss: 2952 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 2952 kB +Private_Dirty: 0 kB +Referenced: 2864 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me dw sd +00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager +Size: 10236 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 6152 kB +Pss: 6152 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 6152 kB +Private_Dirty: 0 kB +Referenced: 5308 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr mw me dw sd +016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager +Size: 424 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 176 kB +Pss: 176 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 84 kB +Private_Dirty: 92 kB +Referenced: 176 kB +Anonymous: 92 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 12 kB +SwapPss: 12 kB +Locked: 0 kB +VmFlags: rd wr mr mw me dw ac sd +0171a000-0173f000 rw-p 00000000 00:00 0 +Size: 148 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 76 kB +Pss: 76 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 76 kB +Referenced: 76 kB +Anonymous: 76 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000000000-c000400000 rw-p 00000000 00:00 0 +Size: 4096 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2564 kB +Pss: 2564 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 20 kB +Private_Dirty: 2544 kB +Referenced: 2544 kB +Anonymous: 2564 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1100 kB +SwapPss: 1100 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000400000-c001600000 rw-p 00000000 00:00 0 +Size: 18432 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 16024 kB +Pss: 16024 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 5864 kB +Private_Dirty: 10160 kB +Referenced: 11944 kB +Anonymous: 16024 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 440 kB +SwapPss: 440 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd nh +c001600000-c004000000 rw-p 00000000 00:00 0 +Size: 43008 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 +Size: 38596 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 1992 kB +Pss: 1992 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 476 kB +Private_Dirty: 1516 kB +Referenced: 1828 kB +Anonymous: 1992 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 384 kB +SwapPss: 384 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] +Size: 132 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 8 kB +Pss: 8 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 8 kB +Referenced: 8 kB +Anonymous: 8 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 4 kB +SwapPss: 4 kB +Locked: 0 kB +VmFlags: rd wr mr mw me gd ac +7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] +Size: 12 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr pf io de dd sd +7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] +Size: 8 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 4 kB +Pss: 0 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 4 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me de sd +ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] +Size: 4 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps_rollup +Lines: 17 +00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] +Rss: 29948 kB +Pss: 29944 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 15548 kB +Private_Dirty: 14396 kB +Referenced: 24752 kB +Anonymous: 20756 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1940 kB +SwapPss: 1940 kB +Locked: 0 kB +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 @@ -235,6 +512,11 @@ voluntary_ctxt_switches: 4742839 nonvoluntary_ctxt_switches: 1727500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/wchan +Lines: 1 +poll_schedule_timeoutEOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -310,6 +592,11 @@ Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/wchan +Lines: 1 +0EOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1554,7 +1841,7 @@ max keysize : 32 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/diskstats -Lines: 49 +Lines: 52 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 @@ -1604,11 +1891,45 @@ Lines: 49 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 + 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 + 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/fscache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/fscache/stats +Lines: 24 +FS-Cache statistics +Cookies: idx=3 dat=67877 spc=0 +Objects: alc=67473 nal=0 avl=67473 ded=388 +ChkAux : non=12 ok=33 upd=44 obs=55 +Pages : mrk=547164 unc=364577 +Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 +Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 +Invals : n=14 run=13 +Updates: n=7 nul=3 run=8 +Relinqs: n=394 nul=1 wcr=2 rtr=3 +AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 +Allocs : n=20 ok=19 wt=18 nbf=17 int=16 +Allocs : ops=15 owt=14 abt=13 +Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 +Retrvls: ops=151959 owt=42747 abt=44 +Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 +Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 +VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 +Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 +Ops : ini=377538 dfr=27 rel=377538 gc=37 +CacheOp: alo=1 luo=2 luc=3 gro=4 +CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 +CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 +CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2025,6 +2346,32 @@ Mode: 644 Directory: fixtures/proc/sys Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel/random +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/entropy_avail +Lines: 1 +3943 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/poolsize +Lines: 1 +4096 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold +Lines: 1 +3072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/sys/vm Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2526,6 +2873,237 @@ Mode: 664 Directory: fixtures/sys/block/sda Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/add_random +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/fua +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue/iosched +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_max +Lines: 1 +16384 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async +Lines: 1 +250 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/low_latency +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/max_budget +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/timeout_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_discard_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb +Lines: 1 +32767 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_integrity_segments +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_sectors_kb +Lines: 1 +1280 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segment_size +Lines: 1 +65536 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segments +Lines: 1 +168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_requests +Lines: 1 +64 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rotational +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/scheduler +Lines: 1 +mq-deadline kyber [bfq] none +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/wbt_lat_usec +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/block/sda/stat Lines: 1 9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 @@ -2534,6 +3112,140 @@ Mode: 664 Directory: fixtures/sys/class Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/fabric_name +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/node_name +Lines: 1 +0x2000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_name +Lines: 1 +0x1000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_state +Lines: 1 +Online +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames +Lines: 1 +0xffffffffffffffff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/error_frames +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count +Lines: 1 +0x2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count +Lines: 1 +0x8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count +Lines: 1 +0x9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset +Lines: 1 +0x7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_classes +Lines: 1 +Class 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_speeds +Lines: 1 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2564,6 +3276,11 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -2665,6 +3382,11 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -3109,7 +3831,7 @@ Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/temp Lines: 1 -44000 +-44000 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/type @@ -4287,6 +5009,17 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size Lines: 1 0 diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 0000000000000..8783cf3cc18c6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 755591d9a5e96..22cb07a6bbb5f 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) { return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + // ParseBool parses a string into a boolean pointer. func ParseBool(b string) *bool { var truth bool diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 0000000000000..beefdf02e9342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bytes. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 2af3ada180453..3e9362a94d930 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -52,7 +52,7 @@ type MDStat struct { func (fs FS) MDStat() ([]MDStat, error) { data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + return nil, err } mdstat, err := parseMDStat(data) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 9471136101313..59f4d50558362 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) - if mountInfoLength < 11 { + if mountInfoLength < 10 { return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) } @@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { return optionalFields, nil } -// Parses the mount options, superblock options. +// mountOptionsParser parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) options := strings.Split(mountOptions, ",") @@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string { return opts } -// Retrieves mountinfo information from `/proc/self/mountinfo`. +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. func GetMounts() ([]*MountInfo, error) { data, err := util.ReadFileNoStat("/proc/self/mountinfo") if err != nil { @@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) { return parseMountInfo(data) } -// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. func GetProcMounts(pid int) ([]*MountInfo, error) { data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 35b2ef3513f91..861ced9da0304 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -186,6 +186,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -494,8 +496,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 ) var ops []NFSOperationStats @@ -508,12 +510,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { break } - if len(ss) != numFields { + if len(ss) < minFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) + ns := make([]uint64, 0, minFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { @@ -523,7 +525,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - ops = append(ops, NFSOperationStats{ + opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], Transmissions: ns[1], @@ -533,7 +535,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeQueueMilliseconds: ns[5], CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], - }) + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) } return ops, s.Err() diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 1e27c83d50e51..b637be98458f4 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -38,7 +38,7 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// Retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 330e472c70fd2..9f97b6e5236a4 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) { return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { data, err := util.ReadFileNoStat(p.path("comm")) diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 0000000000000..4abd46451c690 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.Split(cgroupStr, ":") + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 0c9c402850c0f..a76ca70791940 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -41,7 +41,7 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string - // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 28d5c6eb1da89..1d7772d516a4a 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package procfs @@ -25,6 +25,7 @@ import ( "golang.org/x/sys/unix" ) +// ProcMapPermissions contains permission settings read from /proc/[pid]/maps type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 0000000000000..a576a720a442b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,165 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in /proc/pid/smaps + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM + Rss uint64 + // Process's proportional share of this mapping + Pss uint64 + // Size in bytes of clean shared pages + SharedClean uint64 + // Size in bytes of dirty shared pages + SharedDirty uint64 + // Size in bytes of clean private pages + PrivateClean uint64 + // Size in bytes of dirty private pages + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed + Referenced uint64 + // Amount of memory that does not belong to any file + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap + Swap uint64 + // Process's proportional memory on swap + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go index 684784929e4fa..faafc58553014 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go +++ b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go @@ -86,8 +86,8 @@ var ( ) // Initialize metric vectors. - servicesRPCDuraion = rpcDuration.WithLabelValues("catalog", "services") - serviceRPCDuraion = rpcDuration.WithLabelValues("catalog", "service") + servicesRPCDuration = rpcDuration.WithLabelValues("catalog", "services") + serviceRPCDuration = rpcDuration.WithLabelValues("catalog", "service") // DefaultSDConfig is the default Consul SD configuration. DefaultSDConfig = SDConfig{ @@ -357,7 +357,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. } srvs, meta, err := catalog.Services(opts.WithContext(ctx)) elapsed := time.Since(t0) - servicesRPCDuraion.Observe(elapsed.Seconds()) + servicesRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -473,7 +473,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx)) elapsed := time.Since(t0) - serviceRPCDuraion.Observe(elapsed.Seconds()) + serviceRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go index dc13ba12c3117..e445f8867afad 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go +++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go @@ -176,7 +176,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) listDroplets() ([]godo.Droplet, error) { var ( droplets []godo.Droplet - opts = &godo.ListOptions{Page: 1} + opts = &godo.ListOptions{} ) for { paginatedDroplets, resp, err := d.client.Droplets.List(context.Background(), opts) @@ -187,7 +187,13 @@ func (d *Discovery) listDroplets() ([]godo.Droplet, error) { if resp.Links == nil || resp.Links.IsLastPage() { break } - opts.Page++ + + page, err := resp.Links.CurrentPage() + if err != nil { + return nil, err + } + + opts.Page = page + 1 } return droplets, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go index 872992d8d2c8e..81d0e10c99052 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go @@ -40,18 +40,19 @@ const ( // HypervisorDiscovery discovers OpenStack hypervisors. type HypervisorDiscovery struct { - provider *gophercloud.ProviderClient - authOpts *gophercloud.AuthOptions - region string - logger log.Logger - port int + provider *gophercloud.ProviderClient + authOpts *gophercloud.AuthOptions + region string + logger log.Logger + port int + availability gophercloud.Availability } // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, l log.Logger) *HypervisorDiscovery { + port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery { return &HypervisorDiscovery{provider: provider, authOpts: opts, - region: region, port: port, logger: l} + region: region, port: port, availability: availability, logger: l} } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { @@ -60,8 +61,9 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group if err != nil { return nil, errors.Wrap(err, "could not authenticate to OpenStack") } + client, err := openstack.NewComputeV2(h.provider, gophercloud.EndpointOpts{ - Region: h.region, + Region: h.region, Availability: h.availability, }) if err != nil { return nil, errors.Wrap(err, "could not create OpenStack compute session") diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go index f3badb86c0d5e..8ae4a05b70dd9 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go @@ -47,22 +47,23 @@ const ( // InstanceDiscovery discovers OpenStack instances. type InstanceDiscovery struct { - provider *gophercloud.ProviderClient - authOpts *gophercloud.AuthOptions - region string - logger log.Logger - port int - allTenants bool + provider *gophercloud.ProviderClient + authOpts *gophercloud.AuthOptions + region string + logger log.Logger + port int + allTenants bool + availability gophercloud.Availability } // NewInstanceDiscovery returns a new instance discovery. func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, allTenants bool, l log.Logger) *InstanceDiscovery { + port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger) *InstanceDiscovery { if l == nil { l = log.NewNopLogger() } return &InstanceDiscovery{provider: provider, authOpts: opts, - region: region, port: port, allTenants: allTenants, logger: l} + region: region, port: port, allTenants: allTenants, availability: availability, logger: l} } type floatingIPKey struct { @@ -76,8 +77,9 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if err != nil { return nil, errors.Wrap(err, "could not authenticate to OpenStack") } + client, err := openstack.NewComputeV2(i.provider, gophercloud.EndpointOpts{ - Region: i.region, + Region: i.region, Availability: i.availability, }) if err != nil { return nil, errors.Wrap(err, "could not create OpenStack compute session") diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go index 65f8c6c43acee..38f02904a8c9f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go @@ -15,6 +15,7 @@ package openstack import ( "context" + "fmt" "net/http" "time" @@ -34,6 +35,7 @@ import ( var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), + Availability: "public", } // SDConfig is the configuration for OpenStack based service discovery. @@ -51,10 +53,11 @@ type SDConfig struct { ApplicationCredentialSecret config_util.Secret `yaml:"application_credential_secret"` Role Role `yaml:"role"` Region string `yaml:"region"` - RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` AllTenants bool `yaml:"all_tenants,omitempty"` TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` + Availability string `yaml:"availability,omitempty"` } // Role is the role of the target in OpenStack. @@ -91,12 +94,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } + + switch c.Availability { + case "public", "internal", "admin": + default: + return fmt.Errorf("unknown availability %s, must be one of admin, internal or public", c.Availability) + } + if c.Role == "" { return errors.New("role missing (one of: instance, hypervisor)") } if c.Region == "" { return errors.New("openstack SD configuration requires a region") } + return nil } @@ -152,20 +163,21 @@ func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { } client.HTTPClient = http.Client{ Transport: &http.Transport{ - IdleConnTimeout: 5 * time.Duration(conf.RefreshInterval), + IdleConnTimeout: 2 * time.Duration(conf.RefreshInterval), TLSClientConfig: tls, DialContext: conntrack.NewDialContextFunc( conntrack.DialWithTracing(), conntrack.DialWithName("openstack_sd"), ), }, - Timeout: 5 * time.Duration(conf.RefreshInterval), + Timeout: time.Duration(conf.RefreshInterval), } + availability := gophercloud.Availability(conf.Availability) switch conf.Role { case OpenStackRoleHypervisor: - return newHypervisorDiscovery(client, &opts, conf.Port, conf.Region, l), nil + return newHypervisorDiscovery(client, &opts, conf.Port, conf.Region, availability, l), nil case OpenStackRoleInstance: - return newInstanceDiscovery(client, &opts, conf.Port, conf.Region, conf.AllTenants, l), nil + return newInstanceDiscovery(client, &opts, conf.Port, conf.Region, conf.AllTenants, availability, l), nil } return nil, errors.New("unknown OpenStack discovery role") } diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go index 39e75b0e9c823..317e39435a856 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go @@ -20,9 +20,10 @@ import ( ) type FastRegexMatcher struct { - re *regexp.Regexp - prefix string - suffix string + re *regexp.Regexp + prefix string + suffix string + contains string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { @@ -41,7 +42,7 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } if parsed.Op == syntax.OpConcat { - m.prefix, m.suffix = optimizeConcatRegex(parsed) + m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) } return m, nil @@ -54,6 +55,9 @@ func (m *FastRegexMatcher) MatchString(s string) bool { if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { return false } + if m.contains != "" && !strings.Contains(s, m.contains) { + return false + } return m.re.MatchString(s) } @@ -63,7 +67,7 @@ func (m *FastRegexMatcher) GetRegexString() string { // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. -func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string) { +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { sub := r.Sub // We can safely remove begin and end text matchers respectively @@ -89,5 +93,15 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string) { suffix = string(sub[last].Rune) } + // If contains any literal which is not a prefix/suffix, we keep the + // 1st one. We do not keep the whole list of literals to simplify the + // fast path. + for i := 1; i < len(sub)-1; i++ { + if sub[i].Op == syntax.OpLiteral { + contains = string(sub[i].Rune) + break + } + } + return } diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go index 1d6716beb69a0..899ab2738cd97 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go @@ -90,7 +90,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { } errs = append(errs, &Error{ Group: g.Name, - Rule: i, + Rule: i + 1, RuleName: ruleName.Value, Err: node, }) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index bc2cfc1da5879..3c4629f7b0298 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -1978,20 +1978,23 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if k > inputVecLen { resultSize = inputVecLen } - if op == parser.STDVAR || op == parser.STDDEV { - result[groupingKey].value = 0.0 - } else if op == parser.TOPK || op == parser.QUANTILE { + switch op { + case parser.STDVAR, parser.STDDEV: + result[groupingKey].value = 0 + case parser.TOPK, parser.QUANTILE: result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) heap.Push(&result[groupingKey].heap, &Sample{ Point: Point{V: s.V}, Metric: s.Metric, }) - } else if op == parser.BOTTOMK { + case parser.BOTTOMK: result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) heap.Push(&result[groupingKey].reverseHeap, &Sample{ Point: Point{V: s.V}, Metric: s.Metric, }) + case parser.GROUP: + result[groupingKey].value = 1 } continue } @@ -2004,6 +2007,9 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group.groupCount++ group.mean += (s.V - group.mean) / float64(group.groupCount) + case parser.GROUP: + // Do nothing. Required to avoid the panic in `default:` below. + case parser.MAX: if group.value < s.V || math.IsNaN(group.value) { group.value = s.V diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index f7c31c1aa524e..d066a3702cd1f 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -92,6 +92,7 @@ AVG BOTTOMK COUNT COUNT_VALUES +GROUP MAX MIN QUANTILE @@ -535,7 +536,7 @@ metric : metric_identifier label_set ; -metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK; +metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK; label_set : LEFT_BRACE label_set_list RIGHT_BRACE { $$ = labels.New($2...) } @@ -635,10 +636,10 @@ series_value : IDENTIFIER * Keyword lists. */ -aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ; +aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ; // inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name. -maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK; +maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK; unary_op : ADD | SUB; diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index aeae239c0fdf7..75470a8079a22 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -78,30 +78,31 @@ const AVG = 57387 const BOTTOMK = 57388 const COUNT = 57389 const COUNT_VALUES = 57390 -const MAX = 57391 -const MIN = 57392 -const QUANTILE = 57393 -const STDDEV = 57394 -const STDVAR = 57395 -const SUM = 57396 -const TOPK = 57397 -const aggregatorsEnd = 57398 -const keywordsStart = 57399 -const BOOL = 57400 -const BY = 57401 -const GROUP_LEFT = 57402 -const GROUP_RIGHT = 57403 -const IGNORING = 57404 -const OFFSET = 57405 -const ON = 57406 -const WITHOUT = 57407 -const keywordsEnd = 57408 -const startSymbolsStart = 57409 -const START_METRIC = 57410 -const START_SERIES_DESCRIPTION = 57411 -const START_EXPRESSION = 57412 -const START_METRIC_SELECTOR = 57413 -const startSymbolsEnd = 57414 +const GROUP = 57391 +const MAX = 57392 +const MIN = 57393 +const QUANTILE = 57394 +const STDDEV = 57395 +const STDVAR = 57396 +const SUM = 57397 +const TOPK = 57398 +const aggregatorsEnd = 57399 +const keywordsStart = 57400 +const BOOL = 57401 +const BY = 57402 +const GROUP_LEFT = 57403 +const GROUP_RIGHT = 57404 +const IGNORING = 57405 +const OFFSET = 57406 +const ON = 57407 +const WITHOUT = 57408 +const keywordsEnd = 57409 +const startSymbolsStart = 57410 +const START_METRIC = 57411 +const START_SERIES_DESCRIPTION = 57412 +const START_EXPRESSION = 57413 +const START_METRIC_SELECTOR = 57414 +const startSymbolsEnd = 57415 var yyToknames = [...]string{ "$end", @@ -152,6 +153,7 @@ var yyToknames = [...]string{ "BOTTOMK", "COUNT", "COUNT_VALUES", + "GROUP", "MAX", "MIN", "QUANTILE", @@ -183,241 +185,248 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line generated_parser.y:709 +//line generated_parser.y:710 //line yacctab:1 var yyExca = [...]int{ -1, 1, 1, -1, -2, 0, - -1, 31, - 1, 119, - 10, 119, - 22, 119, + -1, 32, + 1, 120, + 10, 120, + 22, 120, -2, 0, - -1, 53, - 2, 131, - 15, 131, - 59, 131, - 65, 131, - -2, 89, -1, 54, 2, 132, 15, 132, - 59, 132, - 65, 132, - -2, 90, + 60, 132, + 66, 132, + -2, 89, -1, 55, 2, 133, 15, 133, - 59, 133, - 65, 133, - -2, 92, + 60, 133, + 66, 133, + -2, 90, -1, 56, 2, 134, 15, 134, - 59, 134, - 65, 134, - -2, 93, + 60, 134, + 66, 134, + -2, 92, -1, 57, 2, 135, 15, 135, - 59, 135, - 65, 135, - -2, 98, + 60, 135, + 66, 135, + -2, 93, -1, 58, 2, 136, 15, 136, - 59, 136, - 65, 136, - -2, 100, + 60, 136, + 66, 136, + -2, 94, -1, 59, 2, 137, 15, 137, - 59, 137, - 65, 137, - -2, 102, + 60, 137, + 66, 137, + -2, 99, -1, 60, 2, 138, 15, 138, - 59, 138, - 65, 138, - -2, 103, + 60, 138, + 66, 138, + -2, 101, -1, 61, 2, 139, 15, 139, - 59, 139, - 65, 139, - -2, 104, + 60, 139, + 66, 139, + -2, 103, -1, 62, 2, 140, 15, 140, - 59, 140, - 65, 140, - -2, 105, + 60, 140, + 66, 140, + -2, 104, -1, 63, 2, 141, 15, 141, - 59, 141, - 65, 141, + 60, 141, + 66, 141, + -2, 105, + -1, 64, + 2, 142, + 15, 142, + 60, 142, + 66, 142, -2, 106, - -1, 173, - 12, 180, - 13, 180, - 16, 180, - 17, 180, - 23, 180, - 26, 180, - 32, 180, - 33, 180, - 36, 180, - 42, 180, - 45, 180, - 46, 180, - 47, 180, - 48, 180, - 49, 180, - 50, 180, - 51, 180, - 52, 180, - 53, 180, - 54, 180, - 55, 180, - 59, 180, - 63, 180, + -1, 65, + 2, 143, + 15, 143, + 60, 143, + 66, 143, + -2, 107, + -1, 175, + 12, 183, + 13, 183, + 16, 183, + 17, 183, + 23, 183, + 26, 183, + 32, 183, + 33, 183, + 36, 183, + 42, 183, + 45, 183, + 46, 183, + 47, 183, + 48, 183, + 49, 183, + 50, 183, + 51, 183, + 52, 183, + 53, 183, + 54, 183, + 55, 183, + 56, 183, + 60, 183, + 64, 183, -2, 0, - -1, 174, - 12, 180, - 13, 180, - 16, 180, - 17, 180, - 23, 180, - 26, 180, - 32, 180, - 33, 180, - 36, 180, - 42, 180, - 45, 180, - 46, 180, - 47, 180, - 48, 180, - 49, 180, - 50, 180, - 51, 180, - 52, 180, - 53, 180, - 54, 180, - 55, 180, - 59, 180, - 63, 180, + -1, 176, + 12, 183, + 13, 183, + 16, 183, + 17, 183, + 23, 183, + 26, 183, + 32, 183, + 33, 183, + 36, 183, + 42, 183, + 45, 183, + 46, 183, + 47, 183, + 48, 183, + 49, 183, + 50, 183, + 51, 183, + 52, 183, + 53, 183, + 54, 183, + 55, 183, + 56, 183, + 60, 183, + 64, 183, -2, 0, - -1, 190, - 19, 178, + -1, 192, + 19, 181, -2, 0, - -1, 237, - 19, 179, + -1, 240, + 19, 182, -2, 0, } const yyPrivate = 57344 -const yyLast = 553 +const yyLast = 543 var yyAct = [...]int{ - 243, 194, 33, 133, 233, 234, 165, 166, 105, 71, - 94, 93, 96, 6, 173, 174, 196, 171, 97, 172, - 118, 92, 227, 112, 168, 246, 205, 226, 95, 107, - 211, 225, 113, 160, 249, 52, 247, 241, 98, 106, - 169, 98, 240, 221, 7, 222, 207, 208, 89, 111, - 209, 100, 224, 101, 159, 239, 220, 99, 114, 197, - 199, 201, 202, 210, 212, 215, 216, 217, 218, 219, - 91, 92, 198, 200, 203, 204, 206, 213, 214, 2, - 3, 4, 5, 76, 77, 96, 244, 102, 191, 167, - 66, 97, 190, 29, 86, 87, 136, 73, 89, 90, - 107, 146, 140, 143, 138, 189, 139, 72, 142, 44, - 106, 238, 248, 157, 75, 67, 104, 170, 135, 158, - 91, 141, 175, 176, 177, 178, 179, 180, 181, 182, - 183, 184, 185, 186, 187, 188, 119, 120, 121, 122, + 246, 196, 34, 135, 236, 237, 167, 168, 107, 73, + 98, 96, 95, 175, 176, 120, 99, 198, 173, 230, + 174, 170, 249, 97, 114, 229, 115, 208, 228, 53, + 109, 214, 162, 250, 247, 252, 100, 171, 100, 244, + 108, 169, 224, 6, 243, 68, 225, 210, 211, 227, + 113, 212, 102, 161, 103, 223, 94, 242, 116, 101, + 199, 201, 203, 204, 205, 213, 215, 218, 219, 220, + 221, 222, 94, 30, 200, 202, 206, 207, 209, 216, + 217, 98, 104, 91, 78, 79, 80, 99, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 138, 91, + 92, 144, 7, 148, 142, 145, 93, 140, 241, 141, + 2, 3, 4, 5, 143, 137, 139, 251, 77, 172, + 31, 160, 93, 137, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, - 30, 150, 196, 162, 152, 1, 149, 137, 223, 10, - 164, 236, 205, 43, 135, 167, 211, 148, 110, 68, - 193, 228, 42, 109, 168, 229, 230, 231, 232, 235, - 153, 155, 207, 208, 108, 51, 209, 41, 9, 9, - 169, 154, 156, 40, 237, 197, 199, 201, 202, 210, - 212, 215, 216, 217, 218, 219, 117, 39, 198, 200, - 203, 204, 206, 213, 214, 46, 66, 73, 48, 21, - 47, 38, 37, 145, 242, 134, 49, 72, 245, 64, - 8, 115, 135, 70, 31, 17, 18, 144, 36, 19, - 116, 35, 250, 34, 163, 65, 74, 251, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 161, - 192, 69, 13, 50, 195, 32, 23, 46, 66, 151, - 48, 21, 47, 45, 103, 0, 0, 0, 49, 0, - 0, 64, 0, 0, 0, 0, 0, 17, 18, 0, - 0, 19, 0, 0, 0, 0, 0, 65, 0, 0, - 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 0, 46, 66, 13, 48, 21, 47, 23, 0, - 0, 0, 0, 49, 0, 0, 64, 0, 0, 0, - 0, 0, 17, 18, 0, 0, 19, 16, 66, 0, - 0, 21, 65, 0, 0, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 0, 17, 18, 13, - 0, 19, 0, 23, 0, 16, 29, 0, 92, 21, - 11, 12, 14, 15, 20, 22, 24, 25, 26, 27, - 28, 77, 0, 0, 13, 17, 18, 0, 23, 19, - 0, 86, 87, 0, 0, 89, 0, 0, 11, 12, - 14, 15, 20, 22, 24, 25, 26, 27, 28, 92, - 0, 0, 13, 0, 0, 147, 23, 91, 0, 0, - 0, 76, 77, 78, 0, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 0, 89, 90, 0, 0, - 0, 0, 0, 92, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 76, 77, 78, 91, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 0, - 89, 90, 0, 92, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 76, 77, 78, 0, 79, - 80, 81, 91, 83, 84, 85, 86, 87, 88, 0, - 89, 90, 0, 92, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 76, 77, 78, 0, 79, - 80, 0, 91, 83, 84, 0, 86, 87, 88, 0, - 89, 90, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 91, + 133, 134, 1, 193, 198, 164, 154, 192, 152, 75, + 226, 10, 166, 151, 208, 239, 44, 169, 214, 74, + 191, 70, 195, 231, 150, 159, 170, 232, 233, 234, + 235, 238, 155, 157, 210, 211, 43, 52, 212, 45, + 9, 9, 171, 156, 158, 69, 240, 199, 201, 203, + 204, 205, 213, 215, 218, 219, 220, 221, 222, 42, + 41, 200, 202, 206, 207, 209, 216, 217, 47, 68, + 112, 49, 22, 48, 109, 111, 147, 245, 136, 50, + 119, 248, 66, 75, 108, 137, 110, 8, 18, 19, + 106, 32, 20, 74, 40, 253, 39, 38, 67, 72, + 254, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 117, 146, 37, 13, 118, 36, 33, + 24, 47, 68, 35, 49, 22, 48, 165, 76, 163, + 194, 71, 50, 51, 197, 66, 153, 46, 105, 0, + 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, + 0, 67, 0, 0, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 0, 47, 68, 13, + 49, 22, 48, 24, 0, 0, 0, 0, 50, 0, + 0, 66, 0, 0, 0, 0, 0, 18, 19, 0, + 0, 20, 17, 68, 0, 0, 22, 67, 0, 0, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 18, 19, 0, 13, 20, 17, 30, 24, + 0, 22, 0, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 18, 19, 0, + 13, 20, 0, 0, 24, 0, 0, 0, 0, 0, + 11, 12, 14, 15, 16, 21, 23, 25, 26, 27, + 28, 29, 94, 0, 0, 13, 0, 0, 149, 24, + 0, 0, 0, 0, 78, 79, 80, 0, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 0, 91, + 92, 0, 0, 94, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 78, 79, 80, 0, 81, + 82, 83, 93, 85, 86, 87, 88, 89, 90, 0, + 91, 92, 0, 0, 94, 0, 0, 0, 0, 0, + 0, 0, 0, 94, 0, 0, 78, 79, 80, 0, + 81, 82, 94, 93, 85, 86, 79, 88, 89, 90, + 0, 91, 92, 0, 78, 79, 88, 89, 0, 0, + 91, 0, 0, 0, 0, 88, 89, 0, 0, 91, + 92, 0, 0, 0, 93, 0, 0, 0, 0, 0, + 0, 0, 0, 93, 0, 0, 0, 0, 0, 0, + 0, 0, 93, } var yyPact = [...]int{ - 11, 34, 353, 353, 255, 325, -1000, -1000, -1000, 80, + 41, 92, 355, 355, 259, 330, -1000, -1000, -1000, 60, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 215, - -1000, 112, -1000, 429, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 26, 23, -1000, 300, -1000, - 300, 77, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 98, -1000, -1000, 166, - -1000, -1000, 21, -1000, 10, -1000, -38, -38, -38, -38, - -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, - -38, 223, 155, 23, -47, -1000, 106, 106, 203, -1000, - 395, 7, -1000, 149, -1000, -1000, 152, -1000, -1000, 95, - -1000, 31, -1000, 148, 300, -1000, -45, -46, -1000, 300, - 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, - 300, 300, 300, -1000, -1000, -1000, 86, -1000, -1000, -1000, - -1000, 150, -1000, -1000, 36, -1000, 429, -1000, -1000, 27, - -1000, 29, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 3, -2, -1000, -1000, -1000, -1000, 72, 72, - 354, 106, 106, 106, 106, 7, 57, 57, 57, 489, - 459, 57, 57, 489, 7, 7, 57, 7, 354, -1000, - 109, -1000, 35, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 231, -1000, 116, -1000, 58, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 21, 23, -1000, 305, + -1000, 305, 32, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 222, -1000, + -1000, 218, -1000, -1000, 22, -1000, 4, -1000, -44, -44, + -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, + -44, -44, -44, 226, 114, 23, -50, -1000, 99, 99, + 206, -1000, 398, 42, -1000, 156, -1000, -1000, 154, -1000, + -1000, 157, -1000, 30, -1000, 150, 305, -1000, -45, -48, + -1000, 305, 305, 305, 305, 305, 305, 305, 305, 305, + 305, 305, 305, 305, 305, -1000, -1000, -1000, 151, -1000, + -1000, -1000, -1000, 152, -1000, -1000, 35, -1000, 58, -1000, + -1000, 28, -1000, 26, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1, -5, -1000, -1000, -1000, -1000, + 24, 24, 469, 99, 99, 99, 99, 42, 478, 478, + 478, 460, 429, 478, 478, 460, 42, 42, 478, 42, + 469, -1000, 106, -1000, 37, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 300, -1000, -1000, -1000, -1000, 69, 69, 1, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 17, 110, -1000, -1000, - 14, -1000, 429, -1000, -1000, -1000, 69, -1000, -1000, -1000, - -1000, -1000, + -1000, -1000, -1000, -1000, 305, -1000, -1000, -1000, -1000, 17, + 17, -2, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 14, + 115, -1000, -1000, 15, -1000, 58, -1000, -1000, -1000, 17, + -1000, -1000, -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 274, 8, 273, 1, 269, 264, 185, 263, 159, - 261, 230, 9, 260, 5, 4, 259, 246, 0, 6, - 244, 7, 243, 11, 58, 241, 240, 2, 238, 237, - 10, 231, 35, 222, 221, 207, 206, 193, 187, 172, - 163, 109, 3, 161, 155, 150, + 0, 288, 8, 287, 1, 286, 284, 187, 283, 161, + 281, 237, 9, 280, 5, 4, 279, 278, 0, 6, + 277, 7, 273, 12, 58, 268, 267, 2, 265, 264, + 11, 263, 29, 247, 246, 244, 230, 210, 209, 186, + 166, 189, 3, 165, 152, 120, } var yyR1 = [...]int{ @@ -431,15 +440,15 @@ var yyR1 = [...]int{ 39, 39, 39, 40, 41, 41, 41, 32, 32, 32, 1, 1, 1, 2, 2, 2, 2, 11, 11, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, - 9, 10, 10, 10, 12, 12, 12, 12, 45, 17, - 17, 17, 17, 16, 16, 16, 16, 16, 20, 20, - 20, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, + 9, 9, 10, 10, 10, 12, 12, 12, 12, 45, + 17, 17, 17, 17, 16, 16, 16, 16, 16, 20, + 20, 20, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 8, 8, 5, 5, 5, - 5, 34, 19, 21, 21, 18, 42, 38, 43, 43, - 15, 15, + 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, + 5, 5, 5, 5, 34, 19, 21, 21, 18, 42, + 38, 43, 43, 15, 15, } var yyR2 = [...]int{ @@ -453,73 +462,73 @@ var yyR2 = [...]int{ 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, 3, 1, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, - 0, 3, 1, 2, 3, 3, 2, 1, 2, 0, - 3, 2, 1, 1, 3, 1, 3, 4, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, + 2, 0, 3, 1, 2, 3, 3, 2, 1, 2, + 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 2, 1, 1, 1, 0, 1, - 0, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, + 1, 0, 1, 0, 1, } var yyChk = [...]int{ - -1000, -44, 68, 69, 70, 71, 2, 10, -11, -7, - -9, 45, 46, 59, 47, 48, 12, 32, 33, 36, - 49, 16, 50, 63, 51, 52, 53, 54, 55, 13, - -45, -11, 10, -27, -22, -25, -28, -33, -34, -35, - -37, -38, -39, -40, -41, -3, 12, 17, 15, 23, - -8, -7, -32, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 26, 42, 13, -41, -9, -10, - 18, -12, 12, 2, -17, 2, 26, 27, 28, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, - 42, 63, 14, -23, -30, 2, 59, 65, 15, -30, - -27, -27, -32, -1, 18, -2, 12, 2, 18, 7, - 2, 28, 2, 22, -24, -31, -26, -36, 58, -24, - -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, - -24, -24, -24, -42, 2, 9, -42, 2, -30, -23, - -14, 15, 2, -14, -29, 20, -27, 20, 18, 7, - 2, -5, 2, 28, 39, 29, 40, 18, -12, 23, - 2, -16, 5, -20, 12, -19, -21, 17, 26, 42, - -27, 62, 64, 60, 61, -27, -27, -27, -27, -27, - -27, -27, -27, -27, -27, -27, -27, -27, -27, 19, - 6, 2, -13, 20, -4, -6, 2, 45, 58, 46, - 59, 47, 48, 60, 61, 12, 62, 32, 33, 36, - 49, 16, 50, 63, 64, 51, 52, 53, 54, 55, - 20, 7, 18, -2, 23, 2, 24, 24, -21, -19, - -19, -14, -14, -15, -14, -15, -43, -42, 2, 20, - 7, 2, -27, -18, 17, -18, 24, 19, 2, 20, - -4, -18, + -1000, -44, 69, 70, 71, 72, 2, 10, -11, -7, + -9, 45, 46, 60, 47, 48, 49, 12, 32, 33, + 36, 50, 16, 51, 64, 52, 53, 54, 55, 56, + 13, -45, -11, 10, -27, -22, -25, -28, -33, -34, + -35, -37, -38, -39, -40, -41, -3, 12, 17, 15, + 23, -8, -7, -32, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 26, 42, 13, -41, + -9, -10, 18, -12, 12, 2, -17, 2, 26, 27, + 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 41, 42, 64, 14, -23, -30, 2, 60, 66, + 15, -30, -27, -27, -32, -1, 18, -2, 12, 2, + 18, 7, 2, 28, 2, 22, -24, -31, -26, -36, + 59, -24, -24, -24, -24, -24, -24, -24, -24, -24, + -24, -24, -24, -24, -24, -42, 2, 9, -42, 2, + -30, -23, -14, 15, 2, -14, -29, 20, -27, 20, + 18, 7, 2, -5, 2, 28, 39, 29, 40, 18, + -12, 23, 2, -16, 5, -20, 12, -19, -21, 17, + 26, 42, -27, 63, 65, 61, 62, -27, -27, -27, + -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, + -27, 19, 6, 2, -13, 20, -4, -6, 2, 45, + 59, 46, 60, 47, 48, 49, 61, 62, 12, 63, + 32, 33, 36, 50, 16, 51, 64, 65, 52, 53, + 54, 55, 56, 20, 7, 18, -2, 23, 2, 24, + 24, -21, -19, -19, -14, -14, -15, -14, -15, -43, + -42, 2, 20, 7, 2, -27, -18, 17, -18, 24, + 19, 2, 20, -4, -18, } var yyDef = [...]int{ - 0, -2, 110, 110, 0, 0, 7, 6, 1, 110, + 0, -2, 111, 111, 0, 0, 7, 6, 1, 111, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 0, - 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 0, 94, 171, 0, 177, - 0, 75, 76, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, 165, 166, 0, 5, 87, 0, - 109, 112, 0, 117, 118, 122, 41, 41, 41, 41, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 0, 95, 174, 0, + 180, 0, 75, 76, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, 168, 169, 0, 5, + 87, 0, 110, 113, 0, 118, 119, 123, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 0, 0, 0, 21, 22, 0, 0, 0, 58, - 0, 73, 74, 0, 79, 81, 0, 86, 107, 0, - 113, 0, 116, 121, 0, 40, 45, 46, 42, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 65, 66, 176, 0, 72, 19, 20, - 23, 0, 52, 24, 0, 60, 62, 64, 77, 0, - 82, 0, 85, 167, 168, 169, 170, 108, 111, 114, - 115, 120, 123, 125, 128, 129, 130, 172, 0, 0, - 25, 0, 0, -2, -2, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 67, - -2, 71, 0, 51, 54, 56, 57, 142, 143, 144, + 41, 41, 41, 0, 0, 0, 21, 22, 0, 0, + 0, 58, 0, 73, 74, 0, 79, 81, 0, 86, + 108, 0, 114, 0, 117, 122, 0, 40, 45, 46, + 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 65, 66, 179, 0, 72, + 19, 20, 23, 0, 52, 24, 0, 60, 62, 64, + 77, 0, 82, 0, 85, 170, 171, 172, 173, 109, + 112, 115, 116, 121, 124, 126, 129, 130, 131, 175, + 0, 0, 25, 0, 0, -2, -2, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 67, -2, 71, 0, 51, 54, 56, 57, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, - 59, 63, 78, 80, 83, 84, 0, 0, 0, 173, - 174, 43, 44, 47, 181, 48, 0, -2, 70, 49, - 0, 55, 61, 124, 175, 126, 0, 68, 69, 50, - 53, 127, + 165, 166, 167, 59, 63, 78, 80, 83, 84, 0, + 0, 0, 176, 177, 43, 44, 47, 184, 48, 0, + -2, 70, 49, 0, 55, 61, 125, 178, 127, 0, + 68, 69, 50, 53, 128, } var yyTok1 = [...]int{ @@ -534,7 +543,7 @@ var yyTok2 = [...]int{ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, + 72, 73, } var yyTok3 = [...]int{ 0, @@ -879,62 +888,62 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:164 +//line generated_parser.y:165 { yylex.(*parser).generatedParserResult = yyDollar[2].labels } case 3: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:167 +//line generated_parser.y:168 { yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input") } case 4: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:169 +//line generated_parser.y:170 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 5: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:171 +//line generated_parser.y:172 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 7: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:174 +//line generated_parser.y:175 { yylex.(*parser).unexpected("", "") } case 19: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:196 +//line generated_parser.y:197 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node) } case 20: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:198 +//line generated_parser.y:199 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node) } case 21: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:200 +//line generated_parser.y:201 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node) } case 22: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:202 +//line generated_parser.y:203 { yylex.(*parser).unexpected("aggregation", "") yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{}) } case 23: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:210 +//line generated_parser.y:211 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, @@ -942,7 +951,7 @@ yydefault: } case 24: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:216 +//line generated_parser.y:217 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, @@ -951,97 +960,97 @@ yydefault: } case 25: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:229 +//line generated_parser.y:230 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 26: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:230 +//line generated_parser.y:231 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 27: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:231 +//line generated_parser.y:232 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 28: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:232 +//line generated_parser.y:233 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 29: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:233 +//line generated_parser.y:234 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 30: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:234 +//line generated_parser.y:235 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 31: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:235 +//line generated_parser.y:236 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 32: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:236 +//line generated_parser.y:237 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 33: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:237 +//line generated_parser.y:238 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 34: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:238 +//line generated_parser.y:239 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 35: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:239 +//line generated_parser.y:240 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 36: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:240 +//line generated_parser.y:241 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 37: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:241 +//line generated_parser.y:242 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 38: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:242 +//line generated_parser.y:243 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 39: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:243 +//line generated_parser.y:244 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 41: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:251 +//line generated_parser.y:252 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, @@ -1049,7 +1058,7 @@ yydefault: } case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:256 +//line generated_parser.y:257 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, @@ -1058,14 +1067,14 @@ yydefault: } case 43: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:264 +//line generated_parser.y:265 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } case 44: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:269 +//line generated_parser.y:270 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings @@ -1073,7 +1082,7 @@ yydefault: } case 47: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:279 +//line generated_parser.y:280 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne @@ -1081,7 +1090,7 @@ yydefault: } case 48: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:285 +//line generated_parser.y:286 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany @@ -1089,51 +1098,51 @@ yydefault: } case 49: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:294 +//line generated_parser.y:295 { yyVAL.strings = yyDollar[2].strings } case 50: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:296 +//line generated_parser.y:297 { yyVAL.strings = yyDollar[2].strings } case 51: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:298 +//line generated_parser.y:299 { yyVAL.strings = []string{} } case 52: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:300 +//line generated_parser.y:301 { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } case 53: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:306 +//line generated_parser.y:307 { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } case 54: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:308 +//line generated_parser.y:309 { yyVAL.strings = []string{yyDollar[1].item.Val} } case 55: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:310 +//line generated_parser.y:311 { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } case 56: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:314 +//line generated_parser.y:315 { if !isLabel(yyDollar[1].item.Val) { yylex.(*parser).unexpected("grouping opts", "label") @@ -1142,14 +1151,14 @@ yydefault: } case 57: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:321 +//line generated_parser.y:322 { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } case 58: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:329 +//line generated_parser.y:330 { fn, exist := getFunction(yyDollar[1].item.Val) if !exist { @@ -1166,58 +1175,58 @@ yydefault: } case 59: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:346 +//line generated_parser.y:347 { yyVAL.node = yyDollar[2].node } case 60: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:348 +//line generated_parser.y:349 { yyVAL.node = Expressions{} } case 61: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:352 +//line generated_parser.y:353 { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } case 62: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:354 +//line generated_parser.y:355 { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } case 63: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:356 +//line generated_parser.y:357 { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } case 64: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:367 +//line generated_parser.y:368 { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } case 65: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:375 +//line generated_parser.y:376 { yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration) yyVAL.node = yyDollar[1].node } case 66: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:380 +//line generated_parser.y:381 { yylex.(*parser).unexpected("offset", "duration") yyVAL.node = yyDollar[1].node } case 67: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:388 +//line generated_parser.y:389 { var errMsg string vs, ok := yyDollar[1].node.(*VectorSelector) @@ -1240,7 +1249,7 @@ yydefault: } case 68: yyDollar = yyS[yypt-6 : yypt+1] -//line generated_parser.y:411 +//line generated_parser.y:412 { yyVAL.node = &SubqueryExpr{ Expr: yyDollar[1].node.(Expr), @@ -1252,35 +1261,35 @@ yydefault: } case 69: yyDollar = yyS[yypt-6 : yypt+1] -//line generated_parser.y:421 +//line generated_parser.y:422 { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } case 70: yyDollar = yyS[yypt-5 : yypt+1] -//line generated_parser.y:423 +//line generated_parser.y:424 { yylex.(*parser).unexpected("subquery selector", "duration or \"]\"") yyVAL.node = yyDollar[1].node } case 71: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:425 +//line generated_parser.y:426 { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } case 72: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:427 +//line generated_parser.y:428 { yylex.(*parser).unexpected("subquery selector", "duration") yyVAL.node = yyDollar[1].node } case 73: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:437 +//line generated_parser.y:438 { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { if yyDollar[1].item.Typ == SUB { @@ -1294,7 +1303,7 @@ yydefault: } case 74: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:455 +//line generated_parser.y:456 { vs := yyDollar[2].node.(*VectorSelector) vs.PosRange = mergeRanges(&yyDollar[1].item, vs) @@ -1304,7 +1313,7 @@ yydefault: } case 75: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:463 +//line generated_parser.y:464 { vs := &VectorSelector{ Name: yyDollar[1].item.Val, @@ -1316,7 +1325,7 @@ yydefault: } case 76: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:473 +//line generated_parser.y:474 { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) @@ -1324,7 +1333,7 @@ yydefault: } case 77: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:481 +//line generated_parser.y:482 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1333,7 +1342,7 @@ yydefault: } case 78: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:488 +//line generated_parser.y:489 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1342,7 +1351,7 @@ yydefault: } case 79: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:495 +//line generated_parser.y:496 { yyVAL.node = &VectorSelector{ LabelMatchers: []*labels.Matcher{}, @@ -1351,7 +1360,7 @@ yydefault: } case 80: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:504 +//line generated_parser.y:505 { if yyDollar[1].matchers != nil { yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher) @@ -1361,194 +1370,194 @@ yydefault: } case 81: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:512 +//line generated_parser.y:513 { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } case 82: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:514 +//line generated_parser.y:515 { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } case 83: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:518 +//line generated_parser.y:519 { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 84: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:520 +//line generated_parser.y:521 { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } case 85: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:522 +//line generated_parser.y:523 { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } case 86: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:524 +//line generated_parser.y:525 { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } case 87: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:532 +//line generated_parser.y:533 { yyVAL.labels = append(yyDollar[2].labels, labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}) sort.Sort(yyVAL.labels) } case 88: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:534 +//line generated_parser.y:535 { yyVAL.labels = yyDollar[1].labels } - case 107: + case 108: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:541 +//line generated_parser.y:542 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 108: + case 109: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:543 +//line generated_parser.y:544 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 109: + case 110: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:545 +//line generated_parser.y:546 { yyVAL.labels = labels.New() } - case 110: + case 111: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:547 +//line generated_parser.y:548 { yyVAL.labels = labels.New() } - case 111: + case 112: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:551 +//line generated_parser.y:552 { yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label) } - case 112: + case 113: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:553 +//line generated_parser.y:554 { yyVAL.labels = []labels.Label{yyDollar[1].label} } - case 113: + case 114: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:555 +//line generated_parser.y:556 { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.labels = yyDollar[1].labels } - case 114: + case 115: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:560 +//line generated_parser.y:561 { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 115: + case 116: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:562 +//line generated_parser.y:563 { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 116: + case 117: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:564 +//line generated_parser.y:565 { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 117: + case 118: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:566 +//line generated_parser.y:567 { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 118: + case 119: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:574 +//line generated_parser.y:575 { yylex.(*parser).generatedParserResult = &seriesDescription{ labels: yyDollar[1].labels, values: yyDollar[2].series, } } - case 119: + case 120: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:583 +//line generated_parser.y:584 { yyVAL.series = []SequenceValue{} } - case 120: + case 121: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:585 +//line generated_parser.y:586 { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 121: + case 122: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:587 +//line generated_parser.y:588 { yyVAL.series = yyDollar[1].series } - case 122: + case 123: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:589 +//line generated_parser.y:590 { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 123: + case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:593 +//line generated_parser.y:594 { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 124: + case 125: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:595 +//line generated_parser.y:596 { yyVAL.series = []SequenceValue{} for i := uint64(0); i < yyDollar[3].uint; i++ { yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 125: + case 126: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:602 +//line generated_parser.y:603 { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 126: + case 127: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:604 +//line generated_parser.y:605 { yyVAL.series = []SequenceValue{} for i := uint64(0); i <= yyDollar[3].uint; i++ { yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 127: + case 128: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:611 +//line generated_parser.y:612 { yyVAL.series = []SequenceValue{} for i := uint64(0); i <= yyDollar[4].uint; i++ { @@ -1556,45 +1565,45 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 128: + case 129: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:621 +//line generated_parser.y:622 { if yyDollar[1].item.Val != "stale" { yylex.(*parser).unexpected("series values", "number or \"stale\"") } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 171: + case 174: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:652 +//line generated_parser.y:653 { yyVAL.node = &NumberLiteral{ Val: yylex.(*parser).number(yyDollar[1].item.Val), PosRange: yyDollar[1].item.PositionRange(), } } - case 172: + case 175: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:660 +//line generated_parser.y:661 { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 173: + case 176: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:662 +//line generated_parser.y:663 { yyVAL.float = yyDollar[2].float } - case 174: + case 177: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:663 +//line generated_parser.y:664 { yyVAL.float = -yyDollar[2].float } - case 175: + case 178: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:667 +//line generated_parser.y:668 { var err error yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64) @@ -1602,9 +1611,9 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 176: + case 179: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:677 +//line generated_parser.y:678 { var err error yyVAL.duration, err = parseDuration(yyDollar[1].item.Val) @@ -1612,24 +1621,24 @@ yydefault: yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) } } - case 177: + case 180: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:688 +//line generated_parser.y:689 { yyVAL.node = &StringLiteral{ Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), PosRange: yyDollar[1].item.PositionRange(), } } - case 178: + case 181: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:701 +//line generated_parser.y:702 { yyVAL.duration = 0 } - case 180: + case 183: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:705 +//line generated_parser.y:706 { yyVAL.strings = nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index c68300442e8d4..08c5442295baa 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -48,21 +48,21 @@ func (i Item) String() string { return fmt.Sprintf("%q", i.Val) } -// isOperator returns true if the Item corresponds to a arithmetic or set operator. +// IsOperator returns true if the Item corresponds to a arithmetic or set operator. // Returns false otherwise. func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd } -// isAggregator returns true if the Item belongs to the aggregator functions. +// IsAggregator returns true if the Item belongs to the aggregator functions. // Returns false otherwise func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd } -// isAggregator returns true if the Item is an aggregator that takes a parameter. +// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter. // Returns false otherwise func (i ItemType) IsAggregatorWithParam() bool { return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE } -// isKeyword returns true if the Item corresponds to a keyword. +// IsKeyword returns true if the Item corresponds to a keyword. // Returns false otherwise. func (i ItemType) IsKeyword() bool { return i > keywordsStart && i < keywordsEnd } @@ -77,7 +77,7 @@ func (i ItemType) IsComparisonOperator() bool { } } -// isSetOperator returns whether the Item corresponds to a set operator. +// IsSetOperator returns whether the Item corresponds to a set operator. func (i ItemType) IsSetOperator() bool { switch i { case LAND, LOR, LUNLESS: @@ -104,6 +104,7 @@ var key = map[string]ItemType{ "count": COUNT, "min": MIN, "max": MAX, + "group": GROUP, "stddev": STDDEV, "stdvar": STDVAR, "topk": TOPK, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 8c8b049d54f3f..2b0525ab6b2f8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -569,13 +569,6 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs autoCompact: true, chunkPool: chunkenc.NewPool(), } - db.metrics = newDBMetrics(db, r) - - maxBytes := opts.MaxBytes - if maxBytes < 0 { - maxBytes = 0 - } - db.metrics.maxBytes.Set(float64(maxBytes)) if !opts.NoLockfile { absdir, err := filepath.Abs(dir) @@ -617,6 +610,14 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return nil, err } + // Register metrics after assigning the head block. + db.metrics = newDBMetrics(db, r) + maxBytes := opts.MaxBytes + if maxBytes < 0 { + maxBytes = 0 + } + db.metrics.maxBytes.Set(float64(maxBytes)) + if err := db.reload(); err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index f02ef767f07fe..a21f5ad3177e8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -837,6 +837,9 @@ func (h *Head) Truncate(mint int64) (err error) { h.metrics.checkpointCreationTotal.Inc() if _, err = wal.Checkpoint(h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() + if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok { + h.metrics.walCorruptionsTotal.Inc() + } return errors.Wrap(err, "create checkpoint") } if err := h.wal.Truncate(last + 1); err != nil { @@ -1689,8 +1692,6 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie h.metrics.seriesCreated.Inc() atomic.AddUint64(&h.numSeries, 1) - h.postings.Add(id, lset) - h.symMtx.Lock() defer h.symMtx.Unlock() @@ -1706,6 +1707,10 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie h.symbols[l.Value] = struct{}{} } + // Postings should be set after setting the symbols (or after holding + // the symbol mtx) to avoid race during compaction of seeing partial symbols. + h.postings.Add(id, lset) + return s, true, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go index 615c6d743e1fa..a94ccc5e02ca2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sort" "sync" "github.com/go-kit/kit/log" @@ -286,47 +287,32 @@ type Intervals []Interval // Add the new time-range to the existing ones. // The existing ones must be sorted. -func (itvs Intervals) Add(n Interval) Intervals { - for i, r := range itvs { - // TODO(gouthamve): Make this codepath easier to digest. - if r.InBounds(n.Mint-1) || r.InBounds(n.Mint) { - if n.Maxt > r.Maxt { - itvs[i].Maxt = n.Maxt - } - - j := 0 - for _, r2 := range itvs[i+1:] { - if n.Maxt < r2.Mint { - break - } - j++ - } - if j != 0 { - if itvs[i+j].Maxt > n.Maxt { - itvs[i].Maxt = itvs[i+j].Maxt - } - itvs = append(itvs[:i+1], itvs[i+j+1:]...) - } - return itvs - } - - if r.InBounds(n.Maxt+1) || r.InBounds(n.Maxt) { - if n.Mint < r.Maxt { - itvs[i].Mint = n.Mint - } - return itvs - } - - if n.Mint < r.Mint { - newRange := make(Intervals, i, len(itvs[:i])+1) - copy(newRange, itvs[:i]) - newRange = append(newRange, n) - newRange = append(newRange, itvs[i:]...) +func (in Intervals) Add(n Interval) Intervals { + if len(in) == 0 { + return append(in, n) + } + // Find min and max indexes of intervals that overlap with the new interval. + // Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference + // to the new one, we can merge those together. + mini := sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 }) + if mini == len(in) { + return append(in, n) + } - return newRange + maxi := sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 }) + if maxi == 0 { + if mini == 0 { + return append(Intervals{n}, in...) } + return append(in[:mini], append(Intervals{n}, in[mini:]...)...) } - itvs = append(itvs, n) - return itvs + if n.Mint < in[mini].Mint { + in[mini].Mint = n.Mint + } + in[mini].Maxt = in[maxi+mini-1].Maxt + if n.Maxt > in[mini].Maxt { + in[mini].Maxt = n.Maxt + } + return append(in[:mini+1], in[maxi+mini:]...) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 71706b4dd2b7c..5d05b0da6dd77 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -136,8 +136,6 @@ github.com/cespare/xxhash github.com/cespare/xxhash/v2 # github.com/containerd/containerd v1.3.4 github.com/containerd/containerd/errdefs -github.com/containerd/containerd/log -github.com/containerd/containerd/platforms # github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 ## explicit github.com/containerd/fifo @@ -151,12 +149,13 @@ github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.2.1-0.20200702073552-0ea5a8b50b19 +# github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d ## explicit github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb github.com/cortexproject/cortex/pkg/alertmanager/alerts/local +github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient github.com/cortexproject/cortex/pkg/api github.com/cortexproject/cortex/pkg/chunk github.com/cortexproject/cortex/pkg/chunk/aws @@ -245,7 +244,7 @@ github.com/digitalocean/godo github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode -# github.com/docker/docker v17.12.0-ce-rc1.0.20200621004740-33fba35d42e7+incompatible +# github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -285,6 +284,8 @@ github.com/docker/docker/pkg/pubsub github.com/docker/docker/pkg/streamformatter github.com/docker/docker/pkg/stringid github.com/docker/docker/pkg/tailfile +github.com/docker/docker/pkg/term +github.com/docker/docker/pkg/term/windows # github.com/docker/go-connections v0.4.0 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets @@ -523,7 +524,7 @@ github.com/jonboulle/clockwork github.com/joncrlsn/dque # github.com/jpillora/backoff v1.0.0 github.com/jpillora/backoff -# github.com/json-iterator/go v1.1.9 +# github.com/json-iterator/go v1.1.10 ## explicit github.com/json-iterator/go # github.com/jstemmer/go-junit-report v0.9.1 @@ -578,9 +579,6 @@ github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.2.2 ## explicit github.com/mitchellh/mapstructure -# github.com/moby/term v0.0.0-20200611042045-63b9a826fb74 -github.com/moby/term -github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 @@ -659,7 +657,7 @@ github.com/prometheus/alertmanager/store github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55 +# github.com/prometheus/client_golang v1.7.1 ## explicit github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -683,11 +681,11 @@ github.com/prometheus/common/route github.com/prometheus/common/version # github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 github.com/prometheus/node_exporter/https -# github.com/prometheus/procfs v0.0.11 +# github.com/prometheus/procfs v0.1.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20200626180636-d17d88935c8d +# github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c ## explicit github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery