From 8203a31273b3056fe4b85722ab3eb5a48a40bbde Mon Sep 17 00:00:00 2001 From: Son Luong Ngoc Date: Mon, 17 Jun 2024 15:53:19 +0200 Subject: [PATCH 01/11] BuckEvent: provide a Buck Event Publisher proto Provide a BuckEvent Publisher service which emits BuckEvents to an external server implementation. Closes https://github.com/facebook/buck2/issues/226 --- Cargo.toml | 1 + app/buck2_event_publisher_proto/BUCK | 18 ++++++++++ app/buck2_event_publisher_proto/Cargo.toml | 16 +++++++++ app/buck2_event_publisher_proto/build.rs | 28 ++++++++++++++++ .../event_publisher.proto | 33 +++++++++++++++++++ app/buck2_event_publisher_proto/src/lib.rs | 12 +++++++ 6 files changed, 108 insertions(+) create mode 100644 app/buck2_event_publisher_proto/BUCK create mode 100644 app/buck2_event_publisher_proto/Cargo.toml create mode 100644 app/buck2_event_publisher_proto/build.rs create mode 100644 app/buck2_event_publisher_proto/event_publisher.proto create mode 100644 app/buck2_event_publisher_proto/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 71a263c2bd30..58f2a417075f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ members = [ "app/buck2_event_observer", "app/buck2_events", "app/buck2_event_log", + "app/buck2_event_publisher_proto", "app/buck2_execute", "app/buck2_execute_impl", "app/buck2_external_cells", diff --git a/app/buck2_event_publisher_proto/BUCK b/app/buck2_event_publisher_proto/BUCK new file mode 100644 index 000000000000..a9ded5a686d2 --- /dev/null +++ b/app/buck2_event_publisher_proto/BUCK @@ -0,0 +1,18 @@ +load("@fbcode//buck2:proto_defs.bzl", "rust_protobuf_library") +load("@fbsource//tools/build_defs:glob_defs.bzl", "glob") + +oncall("build_infra") + +rust_protobuf_library( + name = "buck2_event_publisher_proto", + srcs = glob(["src/**/*.rs"]), + build_script = "build.rs", + build_env = { + "BUCK_HACK_DATA_PROTOC_INCLUDE": "$(location //buck2/app/buck2_data:data_proto)", + }, + protos = ["event_publisher.proto"], + deps = [ + "fbsource//third-party/rust:tonic", + "//buck2/app/buck2_data:buck2_data", + ], +) diff --git a/app/buck2_event_publisher_proto/Cargo.toml b/app/buck2_event_publisher_proto/Cargo.toml new file mode 100644 index 000000000000..39b5e1a80f70 --- /dev/null +++ b/app/buck2_event_publisher_proto/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "buck2_event_publisher_proto" + +edition = "2021" +license = { workspace = true } +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +prost = { workspace = true } +tonic = { workspace = true } + +buck2_data = { workspace = true } + +[build-dependencies] +buck2_protoc_dev = { workspace = true } diff --git a/app/buck2_event_publisher_proto/build.rs b/app/buck2_event_publisher_proto/build.rs new file mode 100644 index 000000000000..25553e85e676 --- /dev/null +++ b/app/buck2_event_publisher_proto/build.rs @@ -0,0 +1,28 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::io; +use std::path::PathBuf; + +fn main() -> io::Result<()> { + let proto_files = &["event_publisher.proto"]; + + let data_include = if let Ok(value) = env::var("BUCK_HACK_DATA_PROTOC_INCLUDE") { + let path = PathBuf::from(value); + path.parent().unwrap().to_str().unwrap().to_owned() + } else { + "../buck2_data".to_owned() + }; + + buck2_protoc_dev::configure() + .setup_protoc() + .extern_path(".buck.data", "::buck2_data") + .compile(proto_files, &[".", &data_include]) +} diff --git a/app/buck2_event_publisher_proto/event_publisher.proto b/app/buck2_event_publisher_proto/event_publisher.proto new file mode 100644 index 000000000000..1963474c4e74 --- /dev/null +++ b/app/buck2_event_publisher_proto/event_publisher.proto @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +syntax = "proto3"; + +import "data.proto"; + +package event_publisher; + +message BuckEventRequest { + // A trace-unique 64-bit identifying the stream. + uint64 stream_id = 1; + + buck.data.BuckEvent event = 2; +}; + +message BuckEventResponse { + // A trace-unique 64-bit identifying the stream. + uint64 stream_id = 1; + + // The trace ID of the event that has been committed. + uint64 trace_id = 2; +}; + +service BuckEventPublisher { + rpc StreamBuckEvent(stream BuckEventRequest) returns (stream BuckEventResponse); +}; diff --git a/app/buck2_event_publisher_proto/src/lib.rs b/app/buck2_event_publisher_proto/src/lib.rs new file mode 100644 index 000000000000..4392dad159d7 --- /dev/null +++ b/app/buck2_event_publisher_proto/src/lib.rs @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +tonic::include_proto!("event_publisher"); From cee321ffd514a491e801531e673ef2720f9b1847 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 15 Nov 2024 11:43:05 +0100 Subject: [PATCH 02/11] Add buck2_event_publisher_proto to Cargo.toml --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 58f2a417075f..04ba52b73136 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -373,6 +373,7 @@ buck2_error = { path = "app/buck2_error" } buck2_error_derive = { path = "app/buck2_error_derive" } buck2_event_log = { path = "app/buck2_event_log" } buck2_event_observer = { path = "app/buck2_event_observer" } +buck2_event_publisher_proto = { path = "app/buck2_event_publisher_proto" } buck2_events = { path = "app/buck2_events" } buck2_execute = { path = "app/buck2_execute" } buck2_execute_impl = { path = "app/buck2_execute_impl" } From 12dc03d825dd855bf93d92d0c2263a7ab8430c06 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 15 Nov 2024 11:40:47 +0100 Subject: [PATCH 03/11] Add Bazel Build Event Protocol/Stream protobuf --- Cargo.toml | 3 + app/bazel_event_publisher_proto/Cargo.toml | 15 + app/bazel_event_publisher_proto/build.rs | 37 + .../proto/action_cache.proto | 63 + .../proto/build_event_stream.proto | 1446 +++++++++++++++++ .../proto/command_line.proto | 102 ++ .../proto/failure_details.proto | 1378 ++++++++++++++++ .../proto/google/api/annotations.proto | 31 + .../proto/google/api/client.proto | 456 ++++++ .../proto/google/api/field_behavior.proto | 104 ++ .../proto/google/api/http.proto | 371 +++++ .../proto/google/api/launch_stage.proto | 72 + .../devtools/build/v1/build_events.proto | 187 +++ .../devtools/build/v1/build_status.proto | 77 + .../build/v1/publish_build_event.proto | 187 +++ .../proto/invocation_policy.proto | 207 +++ .../proto/option_filters.proto | 61 + .../proto/package_load_metrics.proto | 44 + .../proto/strategy_policy.proto | 67 + app/bazel_event_publisher_proto/src/lib.rs | 61 + 20 files changed, 4969 insertions(+) create mode 100644 app/bazel_event_publisher_proto/Cargo.toml create mode 100644 app/bazel_event_publisher_proto/build.rs create mode 100644 app/bazel_event_publisher_proto/proto/action_cache.proto create mode 100644 app/bazel_event_publisher_proto/proto/build_event_stream.proto create mode 100644 app/bazel_event_publisher_proto/proto/command_line.proto create mode 100644 app/bazel_event_publisher_proto/proto/failure_details.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/api/annotations.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/api/client.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/api/http.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto create mode 100644 app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto create mode 100644 app/bazel_event_publisher_proto/proto/invocation_policy.proto create mode 100644 app/bazel_event_publisher_proto/proto/option_filters.proto create mode 100644 app/bazel_event_publisher_proto/proto/package_load_metrics.proto create mode 100644 app/bazel_event_publisher_proto/proto/strategy_policy.proto create mode 100644 app/bazel_event_publisher_proto/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 04ba52b73136..29710b502384 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "allocative/allocative_derive", # @oss-disable: "attic/uniplate", # @oss-disable: "attic/uniplate_derive", + "app/bazel_event_publisher_proto", "app/buck2", "app/buck2_action_impl", "app/buck2_action_impl_tests", @@ -336,6 +337,8 @@ starlark_syntax = { version = "0.12.0", path = "starlark-rust/starlark_syntax" } static_interner = { path = "shed/static_interner" } three_billion_instructions = { path = "shed/three_billion_instructions" } +bazel_event_publisher_proto = { path = "app/bazel_event_publisher_proto" } + buck2_action_impl = { path = "app/buck2_action_impl" } buck2_action_metadata_proto = { path = "app/buck2_action_metadata_proto" } buck2_analysis = { path = "app/buck2_analysis" } diff --git a/app/bazel_event_publisher_proto/Cargo.toml b/app/bazel_event_publisher_proto/Cargo.toml new file mode 100644 index 000000000000..f7daddf9bf52 --- /dev/null +++ b/app/bazel_event_publisher_proto/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "bazel_event_publisher_proto" + +edition = "2021" +license = { workspace = true } +repository = { workspace = true } +version = "0.1.0" + +[dependencies] +prost = { workspace = true } +prost-types = { workspace = true } +tonic = { workspace = true } + +[build-dependencies] +buck2_protoc_dev = { workspace = true } diff --git a/app/bazel_event_publisher_proto/build.rs b/app/bazel_event_publisher_proto/build.rs new file mode 100644 index 000000000000..1b59ef89a797 --- /dev/null +++ b/app/bazel_event_publisher_proto/build.rs @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +use std::env; +use std::io; +use std::path::PathBuf; + +fn main() -> io::Result<()> { + let proto_files = &[ + "proto/action_cache.proto", + "proto/build_event_stream.proto", + "proto/command_line.proto", + "proto/failure_details.proto", + "proto/invocation_policy.proto", + "proto/option_filters.proto", + "proto/package_load_metrics.proto", + "proto/strategy_policy.proto", + "proto/google/api/annotations.proto", + "proto/google/api/client.proto", + "proto/google/api/field_behavior.proto", + "proto/google/api/http.proto", + "proto/google/api/launch_stage.proto", + "proto/google/devtools/build/v1/build_events.proto", + "proto/google/devtools/build/v1/build_status.proto", + "proto/google/devtools/build/v1/publish_build_event.proto", + ]; + + buck2_protoc_dev::configure() + .setup_protoc() + .compile(proto_files, &["./proto/"]) +} diff --git a/app/bazel_event_publisher_proto/proto/action_cache.proto b/app/bazel_event_publisher_proto/proto/action_cache.proto new file mode 100644 index 000000000000..a31cba599ed6 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/action_cache.proto @@ -0,0 +1,63 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package blaze; + +option java_package = "com.google.devtools.build.lib.actions.cache"; +option java_outer_classname = "Protos"; + +// Information about the action cache behavior during a single build. +message ActionCacheStatistics { + // Size of the action cache in bytes. + // + // This is computed by the code that persists the action cache to disk and + // represents the size of the written files, which has no direct relation to + // the number of entries in the cache. + uint64 size_in_bytes = 1; + + // Time it took to save the action cache to disk. + uint64 save_time_in_ms = 2; + + // Reasons for not finding an action in the cache. + enum MissReason { + DIFFERENT_ACTION_KEY = 0; + DIFFERENT_DEPS = 1; + DIFFERENT_ENVIRONMENT = 2; + DIFFERENT_FILES = 3; + CORRUPTED_CACHE_ENTRY = 4; + NOT_CACHED = 5; + UNCONDITIONAL_EXECUTION = 6; + } + + // Detailed information for a particular miss reason. + message MissDetail { + MissReason reason = 1; + int32 count = 2; + } + + // Cache counters. + int32 hits = 3; + int32 misses = 4; + + // Breakdown of the cache misses based on the reasons behind them. + repeated MissDetail miss_details = 5; + + // Time it took to load the action cache from disk. Reported as 0 if the + // action cache has not been loaded in this invocation. + uint64 load_time_in_ms = 6; + + // NEXT TAG: 7 +} diff --git a/app/bazel_event_publisher_proto/proto/build_event_stream.proto b/app/bazel_event_publisher_proto/proto/build_event_stream.proto new file mode 100644 index 000000000000..c33b8ba65140 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/build_event_stream.proto @@ -0,0 +1,1446 @@ +// Copyright 2016 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// LINT: LEGACY_NAMES + +syntax = "proto3"; + +package build_event_stream; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "package_load_metrics.proto"; +import "action_cache.proto"; +import "command_line.proto"; +import "failure_details.proto"; +import "invocation_policy.proto"; + +option java_package = "com.google.devtools.build.lib.buildeventstream"; +option java_outer_classname = "BuildEventStreamProtos"; + +// Identifier for a build event. It is deliberately structured to also provide +// information about which build target etc the event is related to. +// +// Events are chained via the event id as follows: each event has an id and a +// set of ids of children events such that apart from the initial event each +// event has an id that is mentioned as child id in an earlier event and a build +// invocation is complete if and only if all direct and indirect children of the +// initial event have been posted. +message BuildEventId { + // Generic identifier for a build event. This is the default type of + // BuildEventId, but should not be used outside testing; nevertheless, + // tools should handle build events with this kind of id gracefully. + message UnknownBuildEventId { + string details = 1; + } + + // Identifier of an event reporting progress. Those events are also used to + // chain in events that come early. + message ProgressId { + // Unique identifier. No assumption should be made about how the ids are + // assigned; the only meaningful operation on this field is test for + // equality. + int32 opaque_count = 1; + } + + // Identifier of an event indicating the beginning of a build; this will + // normally be the first event. + message BuildStartedId {} + + // Identifier on an event indicating the original commandline received by + // the bazel server. + message UnstructuredCommandLineId {} + + // Identifier on an event describing the commandline received by Bazel. + message StructuredCommandLineId { + // A title for this command line value, as there may be multiple. + // For example, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. + string command_line_label = 1; + } + + // Identifier of an event indicating the workspace status. + message WorkspaceStatusId {} + + // Identifier on an event reporting on the options included in the command + // line, both explicitly and implicitly. + message OptionsParsedId {} + + // Identifier of an event reporting that an external resource was fetched + // from. + message FetchId { + // The external resource that was fetched from. + string url = 1; + } + + // Identifier of an event indicating that a target pattern has been expanded + // further. + // Messages of this shape are also used to describe parts of a pattern that + // have been skipped for some reason, if the actual expansion was still + // carried out (e.g., if keep_going is set). In this case, the + // pattern_skipped choice in the id field is to be made. + message PatternExpandedId { + repeated string pattern = 1; + } + + message WorkspaceConfigId {} + + message BuildMetadataId {} + + // Identifier of an event indicating that a target has been expanded by + // identifying for which configurations it should be build. + message TargetConfiguredId { + string label = 1; + + // If empty, the id refers to the expansion of the target. If not-empty, + // the id refers to the expansion of an aspect applied to the (already + // expanded) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetConfigured events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event introducing a named set of files (usually artifacts) + // to be referred to in later messages. + message NamedSetOfFilesId { + // Identifier of the file set; this is an opaque string valid only for the + // particular instance of the event stream. + string id = 1; + } + + // Identifier of an event introducing a configuration. + message ConfigurationId { + // Identifier of the configuration; users of the protocol should not make + // any assumptions about it having any structure, or equality of the + // identifier between different streams. + // + // A value of "none" means the null configuration. It is used for targets + // that are not configurable, for example, source files. + string id = 1; + } + + // Identifier of an event indicating that a target was built completely; this + // does not include running the test if the target is a test target. + message TargetCompletedId { + string label = 1; + + // The configuration for which the target was built. + ConfigurationId configuration = 3; + + // If empty, the id refers to the completion of the target. If not-empty, + // the id refers to the completion of an aspect applied to the (already + // completed) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetCompleted events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event reporting that an action was completed (not all + // actions are reported, only the ones that can be considered important; + // this includes all failed actions). + message ActionCompletedId { + string primary_output = 1; + // Optional, the label of the owner of the action, for reference. + string label = 2; + // Optional, the id of the configuration of the action owner. + ConfigurationId configuration = 3; + } + + // Identifier of an event reporting an event associated with an unconfigured + // label. Usually, this indicates a failure due to a missing input file. In + // any case, it will report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. The + // purpose of those events is to serve as the root cause of a failed target. + message UnconfiguredLabelId { + string label = 1; + } + + // Identifier of an event reporting an event associated with a configured + // label, usually a visibility error. In any case, an event with such an + // id will always report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. + message ConfiguredLabelId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting on an individual test run. The label + // identifies the test that is reported about, the remaining fields are + // in such a way as to uniquely identify the action within a build. In fact, + // attempts for the same test, run, shard triple are counted sequentially, + // starting with 1. + message TestResultId { + string label = 1; + ConfigurationId configuration = 5; + int32 run = 2; + int32 shard = 3; + int32 attempt = 4; + } + + // Identifier of an event reporting progress of an individual test run. + message TestProgressId { + // The label of the target for the action. + string label = 1; + // The configuration under which the action is running. + ConfigurationId configuration = 2; + // The run number of the test action (e.g. for runs_per_test > 1). + int32 run = 3; + // For sharded tests, the shard number of the test action. + int32 shard = 4; + // The execution attempt number which may increase due to retries (e.g. for + // flaky tests). + int32 attempt = 5; + // An incrementing count used to differentiate TestProgressIds for the same + // test attempt. + int32 opaque_count = 6; + } + + // Identifier of an event reporting the summary of a test. + message TestSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting the summary of a target. + message TargetSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of the BuildFinished event, indicating the end of a build. + message BuildFinishedId {} + + // Identifier of an event providing additional logs/statistics after + // completion of the build. + message BuildToolLogsId {} + + // Identifier of an event providing build metrics after completion + // of the build. + message BuildMetricsId {} + + // Identifier of an event providing convenience symlinks information. + message ConvenienceSymlinksIdentifiedId {} + + // Identifier of an event providing the ExecRequest of a run command. + message ExecRequestId {} + + reserved 27; + + oneof id { + UnknownBuildEventId unknown = 1; + ProgressId progress = 2; + BuildStartedId started = 3; + UnstructuredCommandLineId unstructured_command_line = 11; + StructuredCommandLineId structured_command_line = 18; + WorkspaceStatusId workspace_status = 14; + OptionsParsedId options_parsed = 12; + FetchId fetch = 17; + ConfigurationId configuration = 15; + TargetConfiguredId target_configured = 16; + PatternExpandedId pattern = 4; + PatternExpandedId pattern_skipped = 10; + NamedSetOfFilesId named_set = 13; + TargetCompletedId target_completed = 5; + ActionCompletedId action_completed = 6; + UnconfiguredLabelId unconfigured_label = 19; + ConfiguredLabelId configured_label = 21; + TestResultId test_result = 8; + TestProgressId test_progress = 29; + TestSummaryId test_summary = 7; + TargetSummaryId target_summary = 26; + BuildFinishedId build_finished = 9; + BuildToolLogsId build_tool_logs = 20; + BuildMetricsId build_metrics = 22; + WorkspaceConfigId workspace = 23; + BuildMetadataId build_metadata = 24; + ConvenienceSymlinksIdentifiedId convenience_symlinks_identified = 25; + ExecRequestId exec_request = 28; + } +} + +// Payload of an event summarizing the progress of the build so far. Those +// events are also used to be parents of events where the more logical parent +// event cannot be posted yet as the needed information is not yet complete. +message Progress { + // The next chunk of stdout that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stdout = 1; + + // The next chunk of stderr that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stderr = 2; +} + +// Payload of an event indicating that an expected event will not come, as +// the build is aborted prematurely for some reason. +message Aborted { + enum AbortReason { + UNKNOWN = 0; + + // The user requested the build to be aborted (e.g., by hitting Ctl-C). + USER_INTERRUPTED = 1; + + // The user requested that no analysis be performed. + NO_ANALYZE = 8; + + // The user requested that no build be carried out. + NO_BUILD = 9; + + // The build or target was aborted as a timeout was exceeded. + TIME_OUT = 2; + + // The build or target was aborted as some remote environment (e.g., for + // remote execution of actions) was not available in the expected way. + REMOTE_ENVIRONMENT_FAILURE = 3; + + // Failure due to reasons entirely internal to the build tool, i.e. an + // unexpected crash due to programmer error. + INTERNAL = 4; + + // A Failure occurred in the loading phase of a target. + LOADING_FAILURE = 5; + + // A Failure occurred in the analysis phase of a target. + ANALYSIS_FAILURE = 6; + + // Target build was skipped (e.g. due to incompatible CPU constraints). + SKIPPED = 7; + + // Build incomplete due to an earlier build failure (e.g. --keep_going was + // set to false causing the build be ended upon failure). + INCOMPLETE = 10; + + // The build tool ran out of memory and crashed. + OUT_OF_MEMORY = 11; + } + AbortReason reason = 1; + + // A human readable description with more details about there reason, where + // available and useful. + string description = 2; +} + +// Payload of an event indicating the beginning of a new build. Usually, events +// of those type start a new build-event stream. The target pattern requested +// to be build is contained in one of the announced child events; it is an +// invariant that precisely one of the announced child events has a non-empty +// target pattern. +message BuildStarted { + string uuid = 1; + + // Start of the build in ms since the epoch. + // + // Deprecated, use `start_time` instead. + // + // TODO(yannic): Remove. + int64 start_time_millis = 2 [deprecated = true]; + + // Start of the build. + google.protobuf.Timestamp start_time = 9; + + // Version of the build tool that is running. + string build_tool_version = 3; + + // A human-readable description of all the non-default option settings + string options_description = 4; + + // The name of the command that the user invoked. + string command = 5; + + // The working directory from which the build tool was invoked. + string working_directory = 6; + + // The directory of the workspace. + string workspace_directory = 7; + + // The process ID of the Bazel server. + int64 server_pid = 8; +} + +// Configuration related to the blaze workspace and output tree. +message WorkspaceConfig { + // The root of the local blaze exec root. All output files live underneath + // this at "blaze-out/". + string local_exec_root = 1; +} + +// Payload of an event reporting the command-line of the invocation as +// originally received by the server. Note that this is not the command-line +// given by the user, as the client adds information about the invocation, +// like name and relevant entries of rc-files and client environment variables. +// However, it does contain enough information to reproduce the build +// invocation. +message UnstructuredCommandLine { + repeated string args = 1; +} + +// Payload of an event reporting on the parsed options, grouped in various ways. +message OptionsParsed { + repeated string startup_options = 1; + repeated string explicit_startup_options = 2; + repeated string cmd_line = 3; + repeated string explicit_cmd_line = 4; + blaze.invocation_policy.InvocationPolicy invocation_policy = 5; + string tool_tag = 6; +} + +// Payload of an event indicating that an external resource was fetched. This +// event will only occur in streams where an actual fetch happened, not in ones +// where a cached copy of the entity to be fetched was used. +message Fetch { + bool success = 1; +} + +// Payload of an event reporting the workspace status. Key-value pairs can be +// provided by specifying the workspace_status_command to an executable that +// returns one key-value pair per line of output (key and value separated by a +// space). +message WorkspaceStatus { + message Item { + string key = 1; + string value = 2; + } + repeated Item item = 1; +} + +// Payload of an event reporting custom key-value metadata associated with the +// build. +message BuildMetadata { + // Custom metadata for the build. + map metadata = 1 + ; +} + +// Payload of an event reporting details of a given configuration. +message Configuration { + string mnemonic = 1; + string platform_name = 2; + string cpu = 3; + map make_variable = 4 + ; + // Whether this configuration is used for building tools. + bool is_tool = 5; +} + +// Payload of the event indicating the expansion of a target pattern. +// The main information is in the chaining part: the id will contain the +// target pattern that was expanded and the children id will contain the +// target or target pattern it was expanded to. +message PatternExpanded { + // Represents a test_suite target and the tests that it expanded to. Nested + // test suites are recursively expanded. The test labels only contain the + // final test targets, not any nested suites. + message TestSuiteExpansion { + // The label of the test_suite rule. + string suite_label = 1; + // Labels of the test targets included in the suite. Includes all tests in + // the suite regardless of any filters or negative patterns which may result + // in the test not actually being run. + repeated string test_labels = 2; + } + + // All test suites requested via top-level target patterns. Does not include + // test suites whose label matched a negative pattern. + repeated TestSuiteExpansion test_suite_expansions = 1; +} + +// Enumeration type characterizing the size of a test, as specified by the +// test rule. +enum TestSize { + UNKNOWN = 0; + SMALL = 1; + MEDIUM = 2; + LARGE = 3; + ENORMOUS = 4; +} + +// Payload of the event indicating that the configurations for a target have +// been identified. As with pattern expansion the main information is in the +// chaining part: the id will contain the target that was configured and the +// children id will contain the configured targets it was configured to. +message TargetConfigured { + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + string target_kind = 1; + + // The size of the test, if the target is a test target. Unset otherwise. + TestSize test_size = 2; + + // List of all tags associated with this target (for all possible + // configurations). + repeated string tag = 3; +} + +message File { + // A sequence of prefixes to apply to the file name to construct a full path. + // In most but not all cases, there will be 3 entries: + // 1. A root output directory, eg "bazel-out" + // 2. A configuration mnemonic, eg "k8-fastbuild" + // 3. An output category, eg "genfiles" + repeated string path_prefix = 4; + + // identifier indicating the nature of the file (e.g., "stdout", "stderr") + string name = 1; + + oneof file { + // A location where the contents of the file can be found. The string is + // encoded according to RFC2396. + string uri = 2; + // The contents of the file, if they are guaranteed to be short. + bytes contents = 3; + // The symlink target path, if the file is an unresolved symlink. + string symlink_target_path = 7; + } + + // Digest of the file, using the build tool's configured digest algorithm, + // hex-encoded. + string digest = 5; + + // Length of the file in bytes. + int64 length = 6; +} + +// Payload of a message to describe a set of files, usually build artifacts, to +// be referred to later by their name. In this way, files that occur identically +// as outputs of several targets have to be named only once. +message NamedSetOfFiles { + // Files that belong to this named set of files. + repeated File files = 1; + + // Other named sets whose members also belong to this set. + repeated BuildEventId.NamedSetOfFilesId file_sets = 2; +} + +// Payload of the event indicating the completion of an action. The main purpose +// of posting those events is to provide details on the root cause for a target +// failing; however, consumers of the build-event protocol must not assume +// that only failed actions are posted. +message ActionExecuted { + bool success = 1; + + // The mnemonic of the action that was executed + string type = 8; + + // The exit code of the action, if it is available. + int32 exit_code = 2; + + // Location where to find the standard output of the action + // (e.g., a file path). + File stdout = 3; + + // Location where to find the standard error of the action + // (e.g., a file path). + File stderr = 4; + + // Deprecated. This field is now present on ActionCompletedId. + string label = 5 [deprecated = true]; + + // Deprecated. This field is now present on ActionCompletedId. + BuildEventId.ConfigurationId configuration = 7 [deprecated = true]; + + // Primary output; only provided for successful actions. + File primary_output = 6; + + // The command-line of the action, if the action is a command. + repeated string command_line = 9; + + // List of paths to log files + repeated File action_metadata_logs = 10; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 11; + + // Start of action execution, before any attempted execution begins. + google.protobuf.Timestamp start_time = 12; + + // End of action execution, after all attempted execution completes. + google.protobuf.Timestamp end_time = 13; + + // Additional details about action execution supplied by strategies. Bazel + // options will determine which strategy details are included when multiple + // strategies are involved in a single action's execution. + // + // The default type will be `tools.proto.SpawnExec` found in `spawn.proto`. + repeated google.protobuf.Any strategy_details = 14; +} + +// Collection of all output files belonging to that output group. +message OutputGroup { + // Ids of fields that have been removed. + reserved 2; + + // Name of the output group + string name = 1; + + // List of file sets that belong to this output group as well. + repeated BuildEventId.NamedSetOfFilesId file_sets = 3; + + // Indicates that one or more of the output group's files were not built + // successfully (the generating action failed). + bool incomplete = 4; + + // Inlined files that belong to this output group, requested via + // --build_event_inline_output_groups. + repeated File inline_files = 5; +} + +// Payload of the event indicating the completion of a target. The target is +// specified in the id. If the target failed the root causes are provided as +// children events. +message TargetComplete { + bool success = 1; + + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + // Deprecated: use the target_kind field in TargetConfigured instead. + string target_kind = 5 [deprecated = true]; + + // The size of the test, if the target is a test target. Unset otherwise. + // Deprecated: use the test_size field in TargetConfigured instead. + TestSize test_size = 6 [deprecated = true]; + + // The output files are arranged by their output group. If an output file + // is part of multiple output groups, it appears once in each output + // group. + repeated OutputGroup output_group = 2; + + // Temporarily, also report the important outputs directly. This is only to + // allow existing clients help transition to the deduplicated representation; + // new clients should not use it. + repeated File important_output = 4 [deprecated = true]; + + // Report output artifacts (referenced transitively via output_group) which + // emit directories instead of singleton files. These directory_output entries + // will never include a uri. + repeated File directory_output = 8; + + // List of tags associated with this configured target. + repeated string tag = 3; + + // The timeout specified for test actions under this configured target. + // + // Deprecated, use `test_timeout` instead. + // + // TODO(yannic): Remove. + int64 test_timeout_seconds = 7 [deprecated = true]; + + // The timeout specified for test actions under this configured target. + google.protobuf.Duration test_timeout = 10; + + // Failure information about the target, only populated if success is false, + // and sometimes not even then. Equal to one of the ActionExecuted + // failure_detail fields for one of the root cause ActionExecuted events. + failure_details.FailureDetail failure_detail = 9; +} + +enum TestStatus { + NO_STATUS = 0; + PASSED = 1; + FLAKY = 2; + TIMEOUT = 3; + FAILED = 4; + INCOMPLETE = 5; + REMOTE_FAILURE = 6; + FAILED_TO_BUILD = 7; + TOOL_HALTED_BEFORE_TESTING = 8; +} + +// Payload on events reporting about individual test action. +message TestResult { + reserved 1; + + // The status of this test. + TestStatus status = 5; + + // Additional details about the status of the test. This is intended for + // user display and must not be parsed. + string status_details = 9; + + // True, if the reported attempt is taken from the tool's local cache. + bool cached_locally = 4; + + // Time in milliseconds since the epoch at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + // + // Deprecated, use `test_attempt_start` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_start_millis_epoch = 6 [deprecated = true]; + + // Time at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + google.protobuf.Timestamp test_attempt_start = 10; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + // + // Deprecated, use `test_attempt_duration` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_duration_millis = 3 [deprecated = true]; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + google.protobuf.Duration test_attempt_duration = 11; + + // Files (logs, test.xml, undeclared outputs, etc) generated by that test + // action. + repeated File test_action_output = 2; + + // Warnings generated by that test action. + repeated string warning = 7; + + // Message providing optional meta data on the execution of the test action, + // if available. + message ExecutionInfo { + // Deprecated, use TargetComplete.test_timeout instead. + int32 timeout_seconds = 1 [deprecated = true]; + + // Name of the strategy to execute this test action (e.g., "local", + // "remote") + string strategy = 2; + + // True, if the reported attempt was a cache hit in a remote cache. + bool cached_remotely = 6; + + // The exit code of the test action. + int32 exit_code = 7; + + // The hostname of the machine where the test action was executed (in case + // of remote execution), if known. + string hostname = 3; + + // Represents a hierarchical timing breakdown of an activity. + // The top level time should be the total time of the activity. + // Invariant: `time` >= sum of `time`s of all direct children. + message TimingBreakdown { + repeated TimingBreakdown child = 1; + string name = 2; + // Deprecated, use `time` instead. + // + // TODO(yannic): Remove. + int64 time_millis = 3 [deprecated = true]; + google.protobuf.Duration time = 4; + } + TimingBreakdown timing_breakdown = 4; + + message ResourceUsage { + string name = 1; + int64 value = 2; + } + repeated ResourceUsage resource_usage = 5; + } + ExecutionInfo execution_info = 8; +} + +// Event payload providing information about an active, individual test run. +message TestProgress { + // Identifies a resource that may provide information about an active test + // run. The resource is not necessarily a file and may need to be queried + // for information. The URI is not guaranteed to be available after the test + // completes. The string is encoded according to RFC2396. + string uri = 1; +} + +// Payload of the event summarizing a test. +message TestSummary { + // Wrapper around BlazeTestStatus to support importing that enum to proto3. + // Overall status of test, accumulated over all runs, shards, and attempts. + TestStatus overall_status = 5; + + // Total number of shard attempts. + // E.g., if a target has 4 runs, 3 shards, each with 2 attempts, + // then total_run_count will be 4*3*2 = 24. + int32 total_run_count = 1; + + // Value of runs_per_test for the test. + int32 run_count = 10; + + // Number of attempts. + // If there are a different number of attempts per shard, the highest attempt + // count across all shards for each run is used. + int32 attempt_count = 15; + + // Number of shards. + int32 shard_count = 11; + + // Path to logs of passed runs. + repeated File passed = 3; + + // Path to logs of failed runs; + repeated File failed = 4; + + // Total number of cached test actions + int32 total_num_cached = 6; + + // When the test first started running. + // + // Deprecated, use `first_start_time` instead. + // + // TODO(yannic): Remove. + int64 first_start_time_millis = 7 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp first_start_time = 13; + + // When the last test action completed. + // + // Deprecated, use `last_stop_time` instead. + // + // TODO(yannic): Remove. + int64 last_stop_time_millis = 8 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp last_stop_time = 14; + + // The total runtime of the test. + // + // Deprecated, use `total_run` instead. + // + // TODO(yannic): Remove. + int64 total_run_duration_millis = 9 [deprecated = true]; + + // The total runtime of the test. + google.protobuf.Duration total_run_duration = 12; +} + +// Payload of the event summarizing a target (test or non-test). +message TargetSummary { + // Conjunction of TargetComplete events for this target, including aspects. + bool overall_build_success = 1; + + // Repeats TestSummary's overall_status if available. + TestStatus overall_test_status = 2; +} + +// Event indicating the end of a build. +message BuildFinished { + // Exit code of a build. The possible values correspond to the predefined + // codes in bazel's lib.ExitCode class, as well as any custom exit code a + // module might define. The predefined exit codes are subject to change (but + // rarely do) and are not part of the public API. + // + // A build was successful iff ExitCode.code equals 0. + message ExitCode { + // The name of the exit code. + string name = 1; + + // The exit code. + int32 code = 2; + } + + // Things that happened during the build that could be of interest. + message AnomalyReport { + // Was the build suspended at any time during the build. + // Examples of suspensions are SIGSTOP, or the hardware being put to sleep. + // If was_suspended is true, then most of the timings for this build are + // suspect. + // NOTE: This is no longer set and is deprecated. + bool was_suspended = 1; + } + + // If the build succeeded or failed. + bool overall_success = 1 [deprecated = true]; + + // The overall status of the build. A build was successful iff + // ExitCode.code equals 0. + ExitCode exit_code = 3; + + // End of the build in ms since the epoch. + // + // Deprecated, use `finish_time` instead. + // + // TODO(yannic): Remove. + int64 finish_time_millis = 2 [deprecated = true]; + + // End of the build. + google.protobuf.Timestamp finish_time = 5; + + AnomalyReport anomaly_report = 4 [deprecated = true]; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 6; +} + +message BuildMetrics { + message ActionSummary { + // The total number of actions created and registered during the build, + // including both aspects and configured targets. This metric includes + // unused actions that were constructed but not executed during this build. + // It does not include actions that were created on prior builds that are + // still valid, even if those actions had to be re-executed on this build. + // For the total number of actions that would be created if this invocation + // were "clean", see BuildGraphMetrics below. + int64 actions_created = 1; + + // The total number of actions created this build just by configured + // targets. Used mainly to allow consumers of actions_created, which used to + // not include aspects' actions, to normalize across the Blaze release that + // switched actions_created to include all created actions. + int64 actions_created_not_including_aspects = 3; + + // The total number of actions executed during the build. This includes any + // remote cache hits, but excludes local action cache hits. + int64 actions_executed = 2; + + message ActionData { + string mnemonic = 1; + + // The total number of actions of this type executed during the build. As + // above, includes remote cache hits but excludes local action cache hits. + int64 actions_executed = 2; + + // When the first action of this type started being executed, in + // milliseconds from the epoch. + int64 first_started_ms = 3; + + // When the last action of this type ended being executed, in + // milliseconds from the epoch. + int64 last_ended_ms = 4; + + // Accumulated CPU time of all spawned actions of this type. + // This is only set if all the actions reported a time + google.protobuf.Duration system_time = 5; + google.protobuf.Duration user_time = 6; + + // The total number of actions of this type registered during the build. + int64 actions_created = 7; + } + // Contains the top N actions by number of actions executed. + repeated ActionData action_data = 4; + + // Deprecated. The total number of remote cache hits. + int64 remote_cache_hits = 5 [deprecated = true]; + + message RunnerCount { + string name = 1; + int32 count = 2; + string exec_kind = 3; + } + repeated RunnerCount runner_count = 6; + + blaze.ActionCacheStatistics action_cache_statistics = 7; + } + ActionSummary action_summary = 1; + + message MemoryMetrics { + // Size of the JVM heap post build in bytes. This is only collected if + // --memory_profile is set, since it forces a full GC. + int64 used_heap_size_post_build = 1; + + // Size of the peak JVM heap size in bytes post GC. Note that this reports 0 + // if there was no major GC during the build. + int64 peak_post_gc_heap_size = 2; + + // Size of the peak tenured space JVM heap size event in bytes post GC. Note + // that this reports 0 if there was no major GC during the build. + int64 peak_post_gc_tenured_space_heap_size = 4; + + message GarbageMetrics { + // Type of garbage collected, e.g. G1 Old Gen. + string type = 1; + // Number of bytes of garbage of the given type collected during this + // invocation. + int64 garbage_collected = 2; + } + + repeated GarbageMetrics garbage_metrics = 3; + } + MemoryMetrics memory_metrics = 2; + + message TargetMetrics { + // DEPRECATED + // No longer populated. It never measured what it was supposed to (targets + // loaded): it counted targets that were analyzed even if the underlying + // package had not changed. + // TODO(janakr): rename and remove. + int64 targets_loaded = 1; + + // Number of targets/aspects configured during this build. Does not include + // targets/aspects that were configured on prior builds on this server and + // were cached. See BuildGraphMetrics below if you need that. + int64 targets_configured = 2; + + // Number of configured targets analyzed during this build. Does not include + // aspects. Used mainly to allow consumers of targets_configured, which used + // to not include aspects, to normalize across the Blaze release that + // switched targets_configured to include aspects. + int64 targets_configured_not_including_aspects = 3; + } + TargetMetrics target_metrics = 3; + + message PackageMetrics { + // Number of BUILD files (aka packages) successfully loaded during this + // build. + // + // [For Bazel binaries built at source states] Before Dec 2021, this value + // was the number of packages attempted to be loaded, for a particular + // definition of "attempted". + // + // After Dec 2021, this value would sometimes overcount because the same + // package could sometimes be attempted to be loaded multiple times due to + // memory pressure. + // + // After Feb 2022, this value is the number of packages successfully + // loaded. + int64 packages_loaded = 1; + + // Loading time metrics per package. + repeated devtools.build.lib.packages.metrics.PackageLoadMetrics + package_load_metrics = 2; + } + PackageMetrics package_metrics = 4; + + message TimingMetrics { + // For Skymeld, it's possible that + // analysis_phase_time_in_ms + execution_phase_time_in_ms >= wall_time_in_ms + // + // The CPU time in milliseconds consumed during this build. + int64 cpu_time_in_ms = 1; + // The elapsed wall time in milliseconds during this build. + int64 wall_time_in_ms = 2; + // The elapsed wall time in milliseconds during the analysis phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first analysis work to the last. + int64 analysis_phase_time_in_ms = 3; + // The elapsed wall time in milliseconds during the execution phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first action execution (excluding workspace status + // actions) to the last. + int64 execution_phase_time_in_ms = 4; + + // The elapsed wall time in milliseconds until the first action execution + // started (excluding workspace status actions). + int64 actions_execution_start_in_ms = 5; + } + TimingMetrics timing_metrics = 5; + + message CumulativeMetrics { + // One-indexed number of "analyses" the server has run, including the + // current one. Will be incremented for every build/test/cquery/etc. command + // that reaches the analysis phase. + int32 num_analyses = 11; + // One-indexed number of "builds" the server has run, including the current + // one. Will be incremented for every build/test/run/etc. command that + // reaches the execution phase. + int32 num_builds = 12; + } + + CumulativeMetrics cumulative_metrics = 6; + + message ArtifactMetrics { + reserved 1; + + message FilesMetric { + int64 size_in_bytes = 1; + int32 count = 2; + } + + // Measures all source files newly read this build. Does not include + // unchanged sources on incremental builds. + FilesMetric source_artifacts_read = 2; + // Measures all output artifacts from executed actions. This includes + // actions that were cached locally (via the action cache) or remotely (via + // a remote cache or executor), but does *not* include outputs of actions + // that were cached internally in Skyframe. + FilesMetric output_artifacts_seen = 3; + // Measures all output artifacts from actions that were cached locally + // via the action cache. These artifacts were already present on disk at the + // start of the build. Does not include Skyframe-cached actions' outputs. + FilesMetric output_artifacts_from_action_cache = 4; + // Measures all artifacts that belong to a top-level output group. Does not + // deduplicate, so if there are two top-level targets in this build that + // share an artifact, it will be counted twice. + FilesMetric top_level_artifacts = 5; + } + + ArtifactMetrics artifact_metrics = 7; + + // Data about the evaluation of Skyfunctions. + message EvaluationStat { + // Name of the Skyfunction. + string skyfunction_name = 1; + // How many times a given operation was carried out on a Skyfunction. + int64 count = 2; + } + + // Information about the size and shape of the build graph. Some fields may + // not be populated if Bazel was able to skip steps due to caching. + message BuildGraphMetrics { + // How many configured targets/aspects were in this build, including any + // that were analyzed on a prior build and are still valid. May not be + // populated if analysis phase was fully cached. Note: for historical + // reasons this includes input/output files and other configured targets + // that do not actually have associated actions. + int32 action_lookup_value_count = 1; + // How many configured targets alone were in this build: always at most + // action_lookup_value_count. Useful mainly for historical comparisons to + // TargetMetrics.targets_configured, which used to not count aspects. This + // also includes configured targets that do not have associated actions. + int32 action_lookup_value_count_not_including_aspects = 5; + // How many actions belonged to the configured targets/aspects above. It may + // not be necessary to execute all of these actions to build the requested + // targets. May not be populated if analysis phase was fully cached. + int32 action_count = 2; + // How many actions belonged to configured targets: always at most + // action_count. Useful mainly for historical comparisons to + // ActionMetrics.actions_created, which used to not count aspects' actions. + int32 action_count_not_including_aspects = 6; + // How many "input file" configured targets there were: one per source file. + // Should agree with artifact_metrics.source_artifacts_read.count above, + int32 input_file_configured_target_count = 7; + // How many "output file" configured targets there were: output files that + // are targets (not implicit outputs). + int32 output_file_configured_target_count = 8; + // How many "other" configured targets there were (like alias, + // package_group, and other non-rule non-file configured targets). + int32 other_configured_target_count = 9; + // How many artifacts are outputs of the above actions. May not be populated + // if analysis phase was fully cached. + int32 output_artifact_count = 3; + // How many Skyframe nodes there are in memory at the end of the build. This + // may underestimate the number of nodes when running with memory-saving + // settings or with Skybuild, and may overestimate if there are nodes from + // prior evaluations still in the cache. + int32 post_invocation_skyframe_node_count = 4; + // Number of SkyValues that were dirtied during the build. Dirtied nodes are + // those that transitively depend on a node that changed by itself (e.g. one + // representing a file in the file system) + repeated EvaluationStat dirtied_values = 10; + // Number of SkyValues that changed by themselves. For example, when a file + // on the file system changes, the SkyValue representing it will change. + repeated EvaluationStat changed_values = 11; + // Number of SkyValues that were built. This means that they were evaluated + // and were found to have changed from their previous version. + repeated EvaluationStat built_values = 12; + // Number of SkyValues that were evaluated and found clean, i.e. equal to + // their previous version. + repeated EvaluationStat cleaned_values = 13; + // Number of evaluations to build SkyValues. This includes restarted + // evaluations, which means there can be multiple evaluations per built + // SkyValue. Subtract built_values from this number to get the number of + // restarted evaluations. + repeated EvaluationStat evaluated_values = 17; + + // For SkyKeys in 'done values' where the SkyValue is of type + // RuleConfiguredTargetValue, we pull those out separately and report the + // ruleClass and action count. + message RuleClassCount { + // Unique key for the rule class. + string key = 1; + + // String name of the rule_class (not guaranteed unique) + string rule_class = 2; + + // how many rule instances of this type were seen. + uint64 count = 3; + + // how many actions were created by this rule class. + uint64 action_count = 4; + } + repeated RuleClassCount rule_class = 14; + + // For SkyKeys whose function name is ASPECT break out that information + message AspectCount { + // Unique key for Aspect. + string key = 1; + + // usually the same as above, but can differ in some cases. + string aspect_name = 2; + + // number of aspects created of this type. + uint64 count = 3; + + // number of actions created by aspects of this type. + uint64 action_count = 4; + } + repeated AspectCount aspect = 15; + + // Removed due to overlap with EvaluationStat + reserved 16; + } + + BuildGraphMetrics build_graph_metrics = 8; + + // Information about all workers that were alive during the invocation. + message WorkerMetrics { + // Deprecated. Use worker_ids instead of this field. + int32 worker_id = 1 [deprecated = true]; + + // Ids of workers. Could be multiple in case of multiplex workers + repeated uint32 worker_ids = 8; + // Worker process id. If there is no process for worker, equals to zero. + uint32 process_id = 2; + // Mnemonic of running worker. + string mnemonic = 3; + // Multiplex or singleplex worker. + bool is_multiplex = 4; + // Using worker sandbox file system or not. + bool is_sandbox = 5; + // TODO(b/300067854): Deprecate since all worker metrics should have their + // WorkerStats set. + bool is_measurable = 6; + // Hash value of worker key. Needed to distinguish worker pools with same + // menmonic but with different worker keys. + int64 worker_key_hash = 9; + + WorkerStatus worker_status = 10; + + enum WorkerStatus { + // Used to indicate a worker instance where the process has not been + // created yet. In reality this isn't logged, but leaving this here as a + // possible option in the future. + NOT_STARTED = 0; + ALIVE = 1; + KILLED_DUE_TO_MEMORY_PRESSURE = 2; + // Indicates that the worker process was killed due to a reason unknown to + // Bazel at the point of measurement; if a known cause (below) comes along + // later on, this field will be updated. + KILLED_UNKNOWN = 3; + KILLED_DUE_TO_INTERRUPTED_EXCEPTION = 4; + KILLED_DUE_TO_IO_EXCEPTION = 5; + KILLED_DUE_TO_USER_EXEC_EXCEPTION = 6; + } + + optional failure_details.Worker.Code code = 12; + + int64 actions_executed = 11; + + int64 prior_actions_executed = 13; + + // Information collected from worker at some point. + message WorkerStats { + // Epoch unix time of collection of metrics. + int64 collect_time_in_ms = 1; + // Memory usage of worker process at the end of the build. + int32 worker_memory_in_kb = 2; + // Memory usage of the worker process prior to the invocation. + int32 prior_worker_memory_in_kb = 4; + // Epoch unix time of last action started on specific worker. + int64 last_action_start_time_in_ms = 3; + } + + // Combined workers statistics. + repeated WorkerStats worker_stats = 7; + } + + repeated WorkerMetrics worker_metrics = 9; + + // Information about host network. + message NetworkMetrics { + // Information for all the network traffic going on on the host machine + // during the invocation. + message SystemNetworkStats { + // Total bytes sent during the invocation. + uint64 bytes_sent = 1; + // Total bytes received during the invocation. + uint64 bytes_recv = 2; + // Total packets sent during the invocation. + uint64 packets_sent = 3; + // Total packets received during the invocation. + uint64 packets_recv = 4; + // Peak bytes/sec sent during the invocation. + uint64 peak_bytes_sent_per_sec = 5; + // Peak bytes/sec received during the invocation. + uint64 peak_bytes_recv_per_sec = 6; + // Peak packets/sec sent during the invocation. + uint64 peak_packets_sent_per_sec = 7; + // Peak packets/sec received during the invocation. + uint64 peak_packets_recv_per_sec = 8; + } + + SystemNetworkStats system_network_stats = 1; + } + + NetworkMetrics network_metrics = 10; + + // Information about worker pool actions. + message WorkerPoolMetrics { + // Statistics of worker pool per worker pool hash. Basically it's a map from + // worker pool hash to statistics. + repeated WorkerPoolStats worker_pool_stats = 1; + + message WorkerPoolStats { + // Hash of worker pool these stats are for. Contains information about + // startup flags. + int32 hash = 1; + // Mnemonic of workers these stats are for. + string mnemonic = 2; + // Number of workers created during a build. + int64 created_count = 3; + // Number of workers destroyed during a build (sum of all workers + // destroyed by eviction, UserExecException, IoException, + // InterruptedException and unknown reasons below). + int64 destroyed_count = 4; + // Number of workers evicted during a build. + int64 evicted_count = 5; + // Number of workers destroyed due to UserExecExceptions. + int64 user_exec_exception_destroyed_count = 6; + // Number of workers destroyed due to IoExceptions. + int64 io_exception_destroyed_count = 7; + // Number of workers destroyed due to InterruptedExceptions. + int64 interrupted_exception_destroyed_count = 8; + // Number of workers destroyed due to an unknown reason. + int64 unknown_destroyed_count = 9; + // Number of workers alive at the end of the build. + int64 alive_count = 10; + } + } + + WorkerPoolMetrics worker_pool_metrics = 11; + + // Information about dynamic execution. + message DynamicExecutionMetrics { + message RaceStatistics { + // Mnemonic of the action. + string mnemonic = 1; + // Name of runner of local branch. + string local_runner = 2; + // Name of runner of remote branch. + string remote_runner = 3; + // Number of wins of local branch in race. + int32 local_wins = 4; + // Number of wins of remote branch in race. + int32 remote_wins = 5; + } + // Race statistics grouped by mnemonic, local_name, remote_name. + repeated RaceStatistics race_statistics = 1; + } + + DynamicExecutionMetrics dynamic_execution_metrics = 12; +} + +// Event providing additional statistics/logs after completion of the build. +message BuildToolLogs { + repeated File log = 1; +} + +// Event describing all convenience symlinks (i.e., workspace symlinks) to be +// created or deleted once the execution phase has begun. Note that this event +// does not say anything about whether or not the build tool actually executed +// these filesystem operations; it only says what logical operations should be +// performed. This event is emitted exactly once per build; if no symlinks are +// to be modified, the event is still emitted with empty contents. +message ConvenienceSymlinksIdentified { + repeated ConvenienceSymlink convenience_symlinks = 1; +} + +// The message that contains what type of action to perform on a given path and +// target of a symlink. +message ConvenienceSymlink { + enum Action { + UNKNOWN = 0; + + // Indicates a symlink should be created, or overwritten if it already + // exists. + CREATE = 1; + + // Indicates a symlink should be deleted if it already exists. + DELETE = 2; + } + + // The path of the symlink to be created or deleted, absolute or relative to + // the workspace, creating any directories necessary. If a symlink already + // exists at that location, then it should be replaced by a symlink pointing + // to the new target. + string path = 1; + + // The operation we are performing on the symlink. + Action action = 2; + + // If action is CREATE, this is the target path (relative to the output base) + // that the symlink should point to. + // + // If action is DELETE, this field is not set. + string target = 3; +} + +// Event that contains the ExecRequest of a run command announced only after a +// successful build and before trying to execute the requested command-line. +message ExecRequestConstructed { + bytes working_directory = 1; + repeated bytes argv = 2; + repeated EnvironmentVariable environment_variable = 3; + repeated bytes environment_variable_to_clear = 4; + bool should_exec = 5; +} + +// An environment variable provided by a run command after a successful build. +message EnvironmentVariable { + bytes name = 1; + bytes value = 2; +} + +// Message describing a build event. Events will have an identifier that +// is unique within a given build invocation; they also announce follow-up +// events as children. More details, which are specific to the kind of event +// that is observed, is provided in the payload. More options for the payload +// might be added in the future. +message BuildEvent { + reserved 11, 19; + BuildEventId id = 1; + repeated BuildEventId children = 2; + bool last_message = 20; + oneof payload { + Progress progress = 3; + Aborted aborted = 4; + BuildStarted started = 5; + UnstructuredCommandLine unstructured_command_line = 12; + command_line.CommandLine structured_command_line = 22; + OptionsParsed options_parsed = 13; + WorkspaceStatus workspace_status = 16; + Fetch fetch = 21; + Configuration configuration = 17; + PatternExpanded expanded = 6; + TargetConfigured configured = 18; + ActionExecuted action = 7; + NamedSetOfFiles named_set_of_files = 15; + TargetComplete completed = 8; + TestResult test_result = 10; + TestProgress test_progress = 30; + TestSummary test_summary = 9; + TargetSummary target_summary = 28; + BuildFinished finished = 14; + BuildToolLogs build_tool_logs = 23; + BuildMetrics build_metrics = 24; + WorkspaceConfig workspace_info = 25; + BuildMetadata build_metadata = 26; + ConvenienceSymlinksIdentified convenience_symlinks_identified = 27; + ExecRequestConstructed exec_request = 29; + } +} diff --git a/app/bazel_event_publisher_proto/proto/command_line.proto b/app/bazel_event_publisher_proto/proto/command_line.proto new file mode 100644 index 000000000000..181f5d63147b --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/command_line.proto @@ -0,0 +1,102 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package command_line; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +import "option_filters.proto"; + +// Representation of a Bazel command line. +message CommandLine { + // A title for this command line value, to differentiate it from others. + // In particular, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. This is a string for flexibility. + string command_line_label = 1; + + // A Bazel command line is made of distinct parts. For example, + // `bazel --nomaster_bazelrc test --nocache_test_results //foo:aTest` + // has the executable "bazel", a startup flag, a command "test", a command + // flag, and a test target. There could be many more flags and targets, or + // none (`bazel info` for example), but the basic structure is there. The + // command line should be broken down into these logical sections here. + repeated CommandLineSection sections = 2; +} + +// A section of the Bazel command line. +message CommandLineSection { + // The name of this section, such as "startup_option" or "command". + string section_label = 1; + + oneof section_type { + // Sections with non-options, such as the list of targets or the command, + // should use simple string chunks. + ChunkList chunk_list = 2; + + // Startup and command options are lists of options and belong here. + OptionList option_list = 3; + } +} + +// Wrapper to allow a list of strings in the "oneof" section_type. +message ChunkList { + repeated string chunk = 1; +} + +// Wrapper to allow a list of options in the "oneof" section_type. +message OptionList { + repeated Option option = 1; +} + +// A single command line option. +// +// This represents the option itself, but does not take into account the type of +// option or how the parser interpreted it. If this option is part of a command +// line that represents the actual input that Bazel received, it would, for +// example, include expansion flags as they are. However, if this option +// represents the canonical form of the command line, with the values as Bazel +// understands them, then the expansion flag, which has no value, would not +// appear, and the flags it expands to would. +message Option { + // How the option looks with the option and its value combined. Depending on + // the purpose of this command line report, this could be the canonical + // form, or the way that the flag was set. + // + // Some examples: this might be `--foo=bar` form, or `--foo bar` with a space; + // for boolean flags, `--nobaz` is accepted on top of `--baz=false` and other + // negating values, or for a positive value, the unqualified `--baz` form + // is also accepted. This could also be a short `-b`, if the flag has an + // abbreviated form. + string combined_form = 1; + + // The canonical name of the option, without the preceding dashes. + string option_name = 2; + + // The value of the flag, or unset for flags that do not take values. + // Especially for boolean flags, this should be in canonical form, the + // combined_form field above gives room for showing the flag as it was set + // if that is preferred. + string option_value = 3; + + // This flag's tagged effects. See OptionEffectTag's java documentation for + // details. + repeated options.OptionEffectTag effect_tags = 4; + + // Metadata about the flag. See OptionMetadataTag's java documentation for + // details. + repeated options.OptionMetadataTag metadata_tags = 5; +} diff --git a/app/bazel_event_publisher_proto/proto/failure_details.proto b/app/bazel_event_publisher_proto/proto/failure_details.proto new file mode 100644 index 000000000000..7df8ca4ea13e --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/failure_details.proto @@ -0,0 +1,1378 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file's messages describe any failure(s) that occurred during Bazel's +// handling of a request. The intent is to provide more detail to a Bazel client +// than is conveyed with an exit code, to help those clients decide how to +// respond to, or classify, a failure. + +syntax = "proto3"; + +package failure_details; + +option java_package = "com.google.devtools.build.lib.server"; + +import "google/protobuf/descriptor.proto"; + +message FailureDetailMetadata { + uint32 exit_code = 1; +} + + extend google.protobuf.EnumValueOptions { + FailureDetailMetadata metadata = 1078; +} + +// The FailureDetail message type is designed such that consumers can extract a +// basic classification of a FailureDetail message even if the consumer was +// built with a stale definition. This forward compatibility is implemented via +// conventions on FailureDetail and its submessage types, as follows. +// +// *** FailureDetail field numbers +// +// Field numbers 1 through 100 (inclusive) are reserved for generally applicable +// values. Any number of these fields may be set on a FailureDetail message. +// +// Field numbers 101 through 10,000 (inclusive) are reserved for use inside the +// "oneof" structure. Only one of these values should be set on a FailureDetail +// message. +// +// Additional fields numbers are unlikely to be needed, but, for extreme future- +// proofing purposes, field numbers 10,001 through 1,000,000 (inclusive; +// excluding protobuf's reserved range 19000 through 19999) are reserved for +// additional generally applicable values. +// +// *** FailureDetail's "oneof" submessages +// +// Each field in the "oneof" structure is a submessage corresponding to a +// category of failure. +// +// In each of these submessage types, field number 1 is an enum whose values +// correspond to a subcategory of the failure. Generally, the enum's constant +// which maps to 0 should be interpreted as "unspecified", though this is not +// required. +// +// *** Recommended forward compatibility strategy +// +// The recommended forward compatibility strategy is to reduce a FailureDetail +// message to a pair of integers. +// +// The first integer corresponds to the field number of the submessage set +// inside FailureDetail's "oneof", which corresponds with the failure's +// category. +// +// The second integer corresponds to the value of the enum at field number 1 +// within that submessage, which corresponds with the failure's subcategory. +// +// WARNING: This functionality is experimental and should not be relied on at +// this time. +// TODO(mschaller): remove experimental warning +message FailureDetail { + // A short human-readable message describing the failure, for debugging. + // + // This value is *not* intended to be used algorithmically. + string message = 1; + + // Reserved for future generally applicable values. Any of these may be set. + reserved 2 to 100; + + oneof category { + Interrupted interrupted = 101; + ExternalRepository external_repository = 103; + BuildProgress build_progress = 104; + RemoteOptions remote_options = 106; + ClientEnvironment client_environment = 107; + Crash crash = 108; + SymlinkForest symlink_forest = 110; + PackageOptions package_options = 114; + RemoteExecution remote_execution = 115; + Execution execution = 116; + Workspaces workspaces = 117; + CrashOptions crash_options = 118; + Filesystem filesystem = 119; + ExecutionOptions execution_options = 121; + Command command = 122; + Spawn spawn = 123; + GrpcServer grpc_server = 124; + CanonicalizeFlags canonicalize_flags = 125; + BuildConfiguration build_configuration = 126; + InfoCommand info_command = 127; + MemoryOptions memory_options = 129; + Query query = 130; + LocalExecution local_execution = 132; + ActionCache action_cache = 134; + FetchCommand fetch_command = 135; + SyncCommand sync_command = 136; + Sandbox sandbox = 137; + IncludeScanning include_scanning = 139; + TestCommand test_command = 140; + ActionQuery action_query = 141; + TargetPatterns target_patterns = 142; + CleanCommand clean_command = 144; + ConfigCommand config_command = 145; + ConfigurableQuery configurable_query = 146; + DumpCommand dump_command = 147; + HelpCommand help_command = 148; + MobileInstall mobile_install = 150; + ProfileCommand profile_command = 151; + RunCommand run_command = 152; + VersionCommand version_command = 153; + PrintActionCommand print_action_command = 154; + WorkspaceStatus workspace_status = 158; + JavaCompile java_compile = 159; + ActionRewinding action_rewinding = 160; + CppCompile cpp_compile = 161; + StarlarkAction starlark_action = 162; + NinjaAction ninja_action = 163; + DynamicExecution dynamic_execution = 164; + FailAction fail_action = 166; + SymlinkAction symlink_action = 167; + CppLink cpp_link = 168; + LtoAction lto_action = 169; + TestAction test_action = 172; + Worker worker = 173; + Analysis analysis = 174; + PackageLoading package_loading = 175; + Toolchain toolchain = 177; + StarlarkLoading starlark_loading = 179; + ExternalDeps external_deps = 181; + DiffAwareness diff_awareness = 182; + ModCommand mod_command = 183; + BuildReport build_report = 184; + Skyfocus skyfocus = 185; + RemoteAnalysisCaching remote_analysis_caching = 186; + } + + reserved 102; // For internal use + reserved 105; // For internal use + reserved 109; // For internal use + reserved 111 to 113; // For internal use + reserved 120; // For internal use + reserved 128; // For internal use + reserved 131; // For internal use + reserved 133; // For internal use + reserved 138; // For internal use + reserved 143; // For internal use + reserved 149; // For internal use + reserved 155 to 157; // For internal use + reserved 165; // For internal use + reserved 170 to 171; // For internal use + reserved 176; // For internal use + reserved 178; // For internal use + reserved 180; // For internal use +} + +message Interrupted { + enum Code { + // Unknown interrupt. Avoid using this code, instead use INTERRUPTED. + INTERRUPTED_UNKNOWN = 0 [(metadata) = { exit_code: 8 }]; + + // Command was interrupted (cancelled). + INTERRUPTED = 28 [(metadata) = { exit_code: 8 }]; + + // The following more specific interrupt codes have been deprecated and + // consolidated into INTERRUPTED. + DEPRECATED_BUILD = 4 [(metadata) = { exit_code: 8 }]; + DEPRECATED_BUILD_COMPLETION = 5 [(metadata) = { exit_code: 8 }]; + DEPRECATED_PACKAGE_LOADING_SYNC = 6 [(metadata) = { exit_code: 8 }]; + DEPRECATED_EXECUTOR_COMPLETION = 7 [(metadata) = { exit_code: 8 }]; + DEPRECATED_COMMAND_DISPATCH = 8 [(metadata) = { exit_code: 8 }]; + DEPRECATED_INFO_ITEM = 9 [(metadata) = { exit_code: 8 }]; + DEPRECATED_AFTER_QUERY = 10 [(metadata) = { exit_code: 8 }]; + DEPRECATED_FETCH_COMMAND = 17 [(metadata) = { exit_code: 8 }]; + DEPRECATED_SYNC_COMMAND = 18 [(metadata) = { exit_code: 8 }]; + DEPRECATED_CLEAN_COMMAND = 20 [(metadata) = { exit_code: 8 }]; + DEPRECATED_MOBILE_INSTALL_COMMAND = 21 [(metadata) = { exit_code: 8 }]; + DEPRECATED_QUERY = 22 [(metadata) = { exit_code: 8 }]; + DEPRECATED_RUN_COMMAND = 23 [(metadata) = { exit_code: 8 }]; + DEPRECATED_OPTIONS_PARSING = 27 [(metadata) = { exit_code: 8 }]; + + reserved 1 to 3; // For internal use + reserved 11 to 16; // For internal use + reserved 19; // For internal use + reserved 24 to 26; // For internal use + } + + Code code = 1; +} + +message Spawn { + enum Code { + SPAWN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // See the SpawnResult.Status Java enum for definitions of the following + // Spawn failure codes. + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + TIMEOUT = 2 [(metadata) = { exit_code: 1 }]; + // Note: Spawn OUT_OF_MEMORY leads to a BUILD_FAILURE exit_code because the + // build tool itself did not run out of memory. + OUT_OF_MEMORY = 3 [(metadata) = { exit_code: 1 }]; + EXECUTION_FAILED = 4 [(metadata) = { exit_code: 34 }]; + EXECUTION_DENIED = 5 [(metadata) = { exit_code: 1 }]; + REMOTE_CACHE_FAILED = 6 [(metadata) = { exit_code: 34 }]; + COMMAND_LINE_EXPANSION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + EXEC_IO_EXCEPTION = 8 [(metadata) = { exit_code: 36 }]; + INVALID_TIMEOUT = 9 [(metadata) = { exit_code: 1 }]; + INVALID_REMOTE_EXECUTION_PROPERTIES = 10 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 11 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated when SpawnResult is + // refactored to prohibit undetailed failures + UNSPECIFIED_EXECUTION_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + // This also includes other remote cache errors, not just evictions, + // if --incompatible_remote_use_new_exit_code_for_lost_inputs is set. + // TODO: Rename it to a more general name when + // --experimental_remote_cache_eviction_retries is moved to + // non-experimental. + REMOTE_CACHE_EVICTED = 14 [(metadata) = { exit_code: 39 }]; + SPAWN_LOG_IO_EXCEPTION = 15 [(metadata) = { exit_code: 36 }]; + } + Code code = 1; + + // For Codes describing generic failure to spawn (eg. EXECUTION_FAILED and + // EXECUTION_DENIED) the `catastrophic` field may be set to true indicating a + // failure that immediately terminated the entire build tool. + bool catastrophic = 2; + + // If Code is NON_ZERO_EXIT, the `spawn_exit_code` field may be set to the + // non-zero exit code returned by the spawned process to the OS. + // + // NOTE: This field must not be confused with the build tool's overall + // exit code. + int32 spawn_exit_code = 3; +} + +message ExternalRepository { + enum Code { + EXTERNAL_REPOSITORY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES = 1 [(metadata) = { exit_code: 2 }]; + BAD_DOWNLOADER_CONFIG = 2 [(metadata) = { exit_code: 2 }]; + REPOSITORY_MAPPING_RESOLUTION_FAILED = 3 [(metadata) = { exit_code: 37 }]; + CREDENTIALS_INIT_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + } + Code code = 1; + // Additional data could include external repository names. +} + +message BuildProgress { + enum Code { + BUILD_PROGRESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_INITIALIZATION = 3 [(metadata) = { exit_code: 36 }]; + BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + BES_LOCAL_WRITE_ERROR = 5 [(metadata) = { exit_code: 36 }]; + BES_INITIALIZATION_ERROR = 6 [(metadata) = { exit_code: 36 }]; + BES_UPLOAD_TIMEOUT_ERROR = 7 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_TIMEOUT = 8 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_IO_ERROR = 9 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_INTERRUPTED = 10 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_CANCELED = 11 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_UNKNOWN_ERROR = 12 [(metadata) = { exit_code: 38 }]; + BES_UPLOAD_LOCAL_FILE_ERROR = 13 [(metadata) = { exit_code: 38 }]; + BES_STREAM_NOT_RETRYING_FAILURE = 14 [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR = 15 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR = 16 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_REMOTE_ERROR = 19 + [(metadata) = { exit_code: 45 }]; + BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE = 17 + [(metadata) = { exit_code: 38 }]; + reserved 1, 2, 18, 20; // For internal use + } + Code code = 1; + // Additional data could include the build progress upload endpoint. +} + +message RemoteOptions { + enum Code { + REMOTE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR = 1 + [(metadata) = { exit_code: 2 }]; + // Credentials could not be read from the requested file/socket/process/etc. + CREDENTIALS_READ_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + // Credentials could not be written to a shared, temporary file. + CREDENTIALS_WRITE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + DOWNLOADER_WITHOUT_GRPC_CACHE = 4 [(metadata) = { exit_code: 2 }]; + EXECUTION_WITH_INVALID_CACHE = 5 [(metadata) = { exit_code: 2 }]; + + reserved 6; + } + + Code code = 1; +} + +message ClientEnvironment { + enum Code { + CLIENT_ENVIRONMENT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLIENT_CWD_MALFORMED = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message Crash { + enum Code { + CRASH_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CRASH_OOM = 1 [(metadata) = { exit_code: 33 }]; + } + + Code code = 1; + + // The cause chain of the crash, with the outermost throwable first. Limited + // to the outermost exception and at most 4 nested causes (so, max size of 5). + repeated Throwable causes = 2; + + // True when the root cause of the crash was not an OutOfMemoryError, but + // CRASH_OOM was chosen because an OutOfMemoryError was detected prior to the + // crash. + bool oom_detector_override = 3; +} + +message Throwable { + // The class name of the java.lang.Throwable. + string throwable_class = 1; + // The throwable's message. + string message = 2; + // The result of calling toString on the deepest (i.e. closest to the + // throwable's construction site) 1000 (or fewer) StackTraceElements. + // Unstructured to simplify string matching. + repeated string stack_trace = 3; +} + +message SymlinkForest { + enum Code { + SYMLINK_FOREST_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT = 1 [(metadata) = { exit_code: 2 }]; + TOPLEVEL_OUTDIR_USED_AS_SOURCE = 2 [(metadata) = { exit_code: 2 }]; + CREATION_FAILED = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message BuildReport { + enum Code { + BUILD_REPORT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS = 1 + [(metadata) = { exit_code: 36 }]; + BUILD_REPORT_WRITE_FAILED = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; + // Additional data for partial failures might include the build report that + // failed to be written. +} + +// Failure details for errors produced when using Skyfocus +message Skyfocus { + enum Code { + // The defined working set cannot be used for the focused targets. For + // example, this happens when the intersection of the working set and the + // transitive closure of the focused target is empty. + INVALID_WORKING_SET = 0 [(metadata) = { exit_code: 2 }]; + // The user needs to augment their working set to include the new file(s). + NON_WORKING_SET_CHANGE = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_CHANGE = 2 [(metadata) = { exit_code: 2 }]; + DISALLOWED_OPERATION_ON_FOCUSED_GRAPH = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +// Failure details for errors produced during remote analysis caching. +message RemoteAnalysisCaching { + enum Code { + REMOTE_ANALYSIS_CACHING_UNKNOWN = 0 [(metadata) = { exit_code: 1 }]; + SERIALIZED_FRONTIER_PROFILE_FAILED = 1 [(metadata) = { exit_code: 1 }]; + PROJECT_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PackageOptions { + enum Code { + reserved 2, 3; // For internal use + + PACKAGE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_PATH_INVALID = 1 [(metadata) = { exit_code: 2 }]; + NONSINGLETON_PACKAGE_PATH = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message RemoteExecution { + // The association of some of these options with exit code 2, "command line + // error", seems sketchy. Especially worth reconsidering are the channel init + // failure modes, which can correspond to failures occurring in gRPC setup. + // These all correspond with current Bazel behavior. + enum Code { + REMOTE_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CAPABILITIES_QUERY_FAILURE = 1 [(metadata) = { exit_code: 34 }]; + CREDENTIALS_INIT_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + CACHE_INIT_FAILURE = 3 [(metadata) = { exit_code: 2 }]; + RPC_LOG_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + EXEC_CHANNEL_INIT_FAILURE = 5 [(metadata) = { exit_code: 2 }]; + CACHE_CHANNEL_INIT_FAILURE = 6 [(metadata) = { exit_code: 2 }]; + DOWNLOADER_CHANNEL_INIT_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + LOG_DIR_CLEANUP_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + CLIENT_SERVER_INCOMPATIBLE = 9 [(metadata) = { exit_code: 34 }]; + DOWNLOADED_INPUTS_DELETION_FAILURE = 10 [(metadata) = { exit_code: 34 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD = 11 + [(metadata) = { exit_code: 2 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS = 12 + [(metadata) = { exit_code: 2 }]; + INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE = 13 + [(metadata) = { exit_code: 36 }]; + REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE = 14 + [(metadata) = { exit_code: 1 }]; + ILLEGAL_OUTPUT = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXEC_AND_PLATFORM_PROPERTIES = 16 [(metadata) = { exit_code: 1 }]; + TOPLEVEL_OUTPUTS_DOWNLOAD_FAILURE = 17 [(metadata) = { exit_code: 34 }]; + } + + Code code = 1; +} + +message Execution { + enum Code { + EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTION_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + EXECUTION_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + EXECROOT_CREATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + reserved 8; // was ACTION_INPUT_FILES_MISSING, now mostly + // SOURCE_INPUT_MISSING + LOCAL_TEMPLATE_EXPANSION_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + INPUT_DIRECTORY_CHECK_IO_EXCEPTION = 10 [(metadata) = { exit_code: 36 }]; + EXTRA_ACTION_OUTPUT_CREATION_FAILURE = 11 [(metadata) = { exit_code: 36 }]; + TEST_RUNNER_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + FILE_WRITE_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + TEST_OUT_ERR_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION = 15 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION = 16 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_IO_EXCEPTION = 17 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_COMMAND_EXCEPTION = 18 + [(metadata) = { exit_code: 36 }]; + ACTION_INPUT_READ_IO_EXCEPTION = 19 [(metadata) = { exit_code: 36 }]; + ACTION_NOT_UP_TO_DATE = 20 [(metadata) = { exit_code: 1 }]; + PSEUDO_ACTION_EXECUTION_PROHIBITED = 21 [(metadata) = { exit_code: 1 }]; + DISCOVERED_INPUT_DOES_NOT_EXIST = 22 [(metadata) = { exit_code: 36 }]; + ACTION_OUTPUTS_DELETION_FAILURE = 23 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUTS_NOT_CREATED = 24 [(metadata) = { exit_code: 1 }]; + ACTION_FINALIZATION_FAILURE = 25 [(metadata) = { exit_code: 1 }]; + ACTION_INPUT_LOST = 26 [(metadata) = { exit_code: 1 }]; + FILESYSTEM_CONTEXT_UPDATE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_CLOSE_FAILURE = 28 [(metadata) = { exit_code: 1 }]; + INPUT_DISCOVERY_IO_EXCEPTION = 29 [(metadata) = { exit_code: 1 }]; + TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE = 30 + [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 31 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE = 32 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE = 33 + [(metadata) = { exit_code: 1 }]; + NON_ACTION_EXECUTION_FAILURE = 34 [(metadata) = { exit_code: 1 }]; + CYCLE = 35 [(metadata) = { exit_code: 1 }]; + SOURCE_INPUT_MISSING = 36 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_EXCEPTION = 37 [(metadata) = { exit_code: 1 }]; + reserved 38; + SOURCE_INPUT_IO_EXCEPTION = 39 [(metadata) = { exit_code: 1 }]; + SYMLINK_TREE_DELETION_IO_EXCEPTION = 40 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +// Failure details about Bazel's WORKSPACE features. +message Workspaces { + enum Code { + WORKSPACES_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACES_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + WORKSPACES_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + + // See `managed_directories` in + // https://bazel.build/rules/lib/globals#workspace. + ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES = 3 + [(metadata) = { exit_code: 1 }]; + WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES = 4 + [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CrashOptions { + enum Code { + CRASH_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; // For internal use + } + + Code code = 1; +} + +message Filesystem { + enum Code { + FILESYSTEM_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; + reserved 2; + EMBEDDED_BINARIES_ENUMERATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + SERVER_PID_TXT_FILE_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + SERVER_FILE_WRITE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE = 6 + [(metadata) = { exit_code: 2 }]; + FILESYSTEM_JNI_NOT_AVAILABLE = 8 [(metadata) = { exit_code: 36 }]; + + reserved 7, 9, 10; // For internal use + } + + Code code = 1; +} + +message ExecutionOptions { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING (instead: 2?) + // - DEPRECATED_LOCAL_RESOURCES_USED (instead: 2?) + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + EXECUTION_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_STRATEGY = 3 [(metadata) = { exit_code: 2 }]; + REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING = 4 + [(metadata) = { exit_code: 36 }]; + DEPRECATED_LOCAL_RESOURCES_USED = 5 [(metadata) = { exit_code: 36 }]; + INVALID_CYCLIC_DYNAMIC_STRATEGY = 6 [(metadata) = { exit_code: 36 }]; + RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT = 7 [(metadata) = { exit_code: 2 }]; + REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN = 8 + [(metadata) = { exit_code: 2 }]; + STRATEGY_NOT_FOUND = 9 [(metadata) = { exit_code: 2 }]; + DYNAMIC_STRATEGY_NOT_SANDBOXED = 10 [(metadata) = { exit_code: 2 }]; + MULTIPLE_EXECUTION_LOG_FORMATS = 11 [(metadata) = { exit_code: 2 }]; + + reserved 1, 2; // For internal use + } + + Code code = 1; +} + +message Command { + enum Code { + // The name "COMMAND_UNKNOWN" might reasonably be interpreted as "command + // not found". The enum's default value should represent a lack of knowledge + // about the failure instead. + COMMAND_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_NOT_FOUND = 1 [(metadata) = { exit_code: 2 }]; + ANOTHER_COMMAND_RUNNING = 2 [(metadata) = { exit_code: 9 }]; + PREVIOUSLY_SHUTDOWN = 3 [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + INVOCATION_POLICY_PARSE_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + INVOCATION_POLICY_INVALID = 8 [(metadata) = { exit_code: 2 }]; + OPTIONS_PARSE_FAILURE = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_OPTIONS_PARSE_FAILURE = 10 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_NOT_RECOGNIZED = 11 [(metadata) = { exit_code: 2 }]; + NOT_IN_WORKSPACE = 12 [(metadata) = { exit_code: 2 }]; + reserved 13; + IN_OUTPUT_DIRECTORY = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message GrpcServer { + enum Code { + GRPC_SERVER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + GRPC_SERVER_NOT_COMPILED_IN = 1 [(metadata) = { exit_code: 37 }]; + SERVER_BIND_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + BAD_COOKIE = 3 [(metadata) = { exit_code: 36 }]; + NO_CLIENT_DESCRIPTION = 4 [(metadata) = { exit_code: 36 }]; + reserved 5; // For internal use + } + + Code code = 1; +} + +message CanonicalizeFlags { + enum Code { + CANONICALIZE_FLAGS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FOR_COMMAND_INVALID = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +// Failure modes described by this category pertain to the Bazel invocation +// configuration consumed by Bazel's analysis phase. This category is not +// intended as a grab-bag for all Bazel flag value constraint violations, which +// instead generally belong in the category for the subsystem whose flag values +// participate in the constraint. +message BuildConfiguration { + enum Code { + BUILD_CONFIGURATION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PLATFORM_MAPPING_EVALUATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + PLATFORM_MAPPINGS_FILE_IS_DIRECTORY = 2 [(metadata) = { exit_code: 1 }]; + PLATFORM_MAPPINGS_FILE_NOT_FOUND = 3 [(metadata) = { exit_code: 1 }]; + TOP_LEVEL_CONFIGURATION_CREATION_FAILURE = 4 + [(metadata) = { exit_code: 1 }]; + INVALID_CONFIGURATION = 5 [(metadata) = { exit_code: 2 }]; + INVALID_BUILD_OPTIONS = 6 [(metadata) = { exit_code: 2 }]; + MULTI_CPU_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + HEURISTIC_INSTRUMENTATION_FILTER_INVALID = 8 + [(metadata) = { exit_code: 2 }]; + CYCLE = 9 [(metadata) = { exit_code: 2 }]; + CONFLICTING_CONFIGURATIONS = 10 [(metadata) = { exit_code: 2 }]; + // This can come from either an invalid user-specified option or a + // configuration transition. There's no sure-fire way to distinguish the two + // possibilities in Bazel, so we go with the more straightforward + // command-line error exit code 2. + INVALID_OUTPUT_DIRECTORY_MNEMONIC = 11 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_DISCARDED_ANALYSIS_CACHE = 12 [(metadata) = { exit_code: 2 }]; + // Failure modes specific to PROJECT.scl files. + INVALID_PROJECT = 13 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message InfoCommand { + // The distinction between a failure to write a single info item and a failure + // to write them all seems sketchy. Why do they have different exit codes? + // This reflects current Bazel behavior, but deserves more thought. + enum Code { + INFO_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_KEYS = 1 [(metadata) = { exit_code: 2 }]; + KEY_NOT_RECOGNIZED = 2 [(metadata) = { exit_code: 2 }]; + INFO_BLOCK_WRITE_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ALL_INFO_WRITE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message MemoryOptions { + enum Code { + MEMORY_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // Deprecated: validation is now implemented by the option converter. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE = 1 + [(metadata) = { exit_code: 2 }, deprecated = true]; + // Deprecated: no tenured collectors found is now a crash on startup. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND = 2 + [(metadata) = { exit_code: 2 }, deprecated = true]; + } + + Code code = 1; +} + +message Query { + enum Code { + QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION = 1 [(metadata) = { exit_code: 2 }]; + QUERY_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMAT_INVALID = 4 [(metadata) = { exit_code: 2 }]; + GRAPHLESS_PREREQ_UNMET = 5 [(metadata) = { exit_code: 2 }]; + QUERY_OUTPUT_WRITE_FAILURE = 6 [(metadata) = { exit_code: 36 }]; + QUERY_STDOUT_FLUSH_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + ANALYSIS_QUERY_PREREQ_UNMET = 14 [(metadata) = { exit_code: 2 }]; + QUERY_RESULTS_FLUSH_FAILURE = 15 [(metadata) = { exit_code: 36 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR = 16 + [(metadata) = { exit_code: 2 }]; + VARIABLE_NAME_INVALID = 17 [(metadata) = { exit_code: 7 }]; + VARIABLE_UNDEFINED = 18 [(metadata) = { exit_code: 7 }]; + BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR = 19 + [(metadata) = { exit_code: 2 }]; + BUILD_FILE_ERROR = 20 [(metadata) = { exit_code: 7 }]; + CYCLE = 21 [(metadata) = { exit_code: 7 }]; + UNIQUE_SKYKEY_THRESHOLD_EXCEEDED = 22 [(metadata) = { exit_code: 7 }]; + TARGET_NOT_IN_UNIVERSE_SCOPE = 23 [(metadata) = { exit_code: 2 }]; + INVALID_FULL_UNIVERSE_EXPRESSION = 24 [(metadata) = { exit_code: 7 }]; + UNIVERSE_SCOPE_LIMIT_EXCEEDED = 25 [(metadata) = { exit_code: 7 }]; + INVALIDATION_LIMIT_EXCEEDED = 26 [(metadata) = { exit_code: 7 }]; + OUTPUT_FORMAT_PREREQ_UNMET = 27 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_MISSING = 28 [(metadata) = { exit_code: 7 }]; + RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY = 29 [(metadata) = { exit_code: 7 }]; + FULL_TARGETS_NOT_SUPPORTED = 30 [(metadata) = { exit_code: 7 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNEXPECTED_TOKEN_ERROR = 31 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INTEGER_LITERAL_MISSING = 32 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INVALID_STARTING_CHARACTER_ERROR = 33 + [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_PREMATURE_END_OF_INPUT_ERROR = 34 + [(metadata) = { exit_code: 2 }]; + // Indicates the user specified invalid query syntax. + SYNTAX_ERROR = 35 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMATTER_IO_EXCEPTION = 36 [(metadata) = { exit_code: 36 }]; + SKYQUERY_TRANSITIVE_TARGET_ERROR = 37 [(metadata) = { exit_code: 7 }]; + SKYQUERY_TARGET_EXCEPTION = 38 [(metadata) = { exit_code: 7 }]; + INVALID_LABEL_IN_TEST_SUITE = 39 [(metadata) = { exit_code: 7 }]; + // Indicates any usage of flags that must not be combined. + ILLEGAL_FLAG_COMBINATION = 40 [(metadata) = { exit_code: 2 }]; + // Indicates a non-detailed exception that halted a query. This is a + // deficiency in Blaze/Bazel and code should be changed to attach a detailed + // exit code to this failure mode. + NON_DETAILED_ERROR = 41 [(metadata) = { exit_code: 1 }]; + + reserved 7 to 12; // For internal use + } + + Code code = 1; +} + +message LocalExecution { + enum Code { + LOCAL_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOCKFREE_OUTPUT_PREREQ_UNMET = 1 [(metadata) = { exit_code: 2 }]; + UNTRACKED_RESOURCE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message ActionCache { + enum Code { + ACTION_CACHE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message FetchCommand { + enum Code { + FETCH_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + OPTIONS_INVALID = 2 [(metadata) = { exit_code: 2 }]; + QUERY_PARSE_ERROR = 3 [(metadata) = { exit_code: 2 }]; + QUERY_EVALUATION_ERROR = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message SyncCommand { + enum Code { + SYNC_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_LOOKUP_ERROR = 1 [(metadata) = { exit_code: 7 }]; + WORKSPACE_EVALUATION_ERROR = 2 [(metadata) = { exit_code: 7 }]; + REPOSITORY_FETCH_ERRORS = 3 [(metadata) = { exit_code: 7 }]; + REPOSITORY_NAME_INVALID = 4 [(metadata) = { exit_code: 7 }]; + } + + Code code = 1; +} + +message Sandbox { + enum Code { + SANDBOX_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + EXECUTION_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + DOCKER_COMMAND_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_DOCKER_IMAGE = 4 [(metadata) = { exit_code: 1 }]; + DOCKER_IMAGE_PREPARATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BIND_MOUNT_ANALYSIS_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_DOES_NOT_EXIST = 7 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_TARGET_TYPE_MISMATCH = 8 [(metadata) = { exit_code: 1 }]; + MOUNT_TARGET_DOES_NOT_EXIST = 9 [(metadata) = { exit_code: 1 }]; + SUBPROCESS_START_FAILED = 10 [(metadata) = { exit_code: 36 }]; + FORBIDDEN_INPUT = 11 [(metadata) = { exit_code: 1 }]; + COPY_INPUTS_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + COPY_OUTPUTS_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message IncludeScanning { + enum Code { + INCLUDE_SCANNING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZE_INCLUDE_HINTS_ERROR = 1 [(metadata) = { exit_code: 36 }]; + SCANNING_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_FILE_NOT_IN_PACKAGE = 3 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + ILLEGAL_ABSOLUTE_PATH = 5 [(metadata) = { exit_code: 1 }]; + // TODO(b/166268889): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + PACKAGE_LOAD_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + USER_PACKAGE_LOAD_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + SYSTEM_PACKAGE_LOAD_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; + PackageLoading.Code package_loading_code = 2; +} + +message TestCommand { + enum Code { + TEST_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TEST_TARGETS = 1 [(metadata) = { exit_code: 4 }]; + TEST_WITH_NOANALYZE = 2 [(metadata) = { exit_code: 1 }]; + TESTS_FAILED = 3 [(metadata) = { exit_code: 3 }]; + } + + Code code = 1; +} + +message ActionQuery { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - COMMAND_LINE_EXPANSION_FAILURE: this is associated with 2, the numerical + // exit code for "bad Bazel command line", but is generated when an + // action's command line fails to expand, which sounds similar but is + // completely different. + // - OUTPUT_FAILURE: this is associated with 6, an undocumented exit code. + // - INVALID_AQUERY_EXPRESSION: this is associate with 1, which is not + // documented for (a)query. + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + ACTION_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPANSION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + OUTPUT_FAILURE = 2 [(metadata) = { exit_code: 6 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION = 5 + [(metadata) = { exit_code: 2 }]; + INVALID_AQUERY_EXPRESSION = 6 [(metadata) = { exit_code: 1 }]; + SKYFRAME_STATE_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + AQUERY_OUTPUT_TOO_BIG = 8 [(metadata) = { exit_code: 7 }]; + ILLEGAL_PATTERN_SYNTAX = 9 [(metadata) = { exit_code: 2 }]; + INCORRECT_ARGUMENTS = 10 [(metadata) = { exit_code: 2 }]; + TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED = 11 + [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_AFTER_EXECUTION = 12 [(metadata) = { exit_code: 1 }]; + LABELS_FUNCTION_NOT_SUPPORTED = 13 [(metadata) = { exit_code: 2 }]; + TEMPLATE_EXPANSION_FAILURE = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message TargetPatterns { + enum Code { + TARGET_PATTERNS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN = 1 + [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_PARSE_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + TARGET_FORMAT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ABSOLUTE_TARGET_PATTERN_INVALID = 6 [(metadata) = { exit_code: 1 }]; + CANNOT_DETERMINE_TARGET_FROM_FILENAME = 7 [(metadata) = { exit_code: 1 }]; + LABEL_SYNTAX_ERROR = 8 [(metadata) = { exit_code: 1 }]; + TARGET_CANNOT_BE_EMPTY_STRING = 9 [(metadata) = { exit_code: 1 }]; + PACKAGE_PART_CANNOT_END_IN_SLASH = 10 [(metadata) = { exit_code: 1 }]; + CYCLE = 11 [(metadata) = { exit_code: 1 }]; + CANNOT_PRELOAD_TARGET = 12 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING = 13 [(metadata) = { exit_code: 1 }]; + RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED = 14 [(metadata) = { exit_code: 1 }]; + UP_LEVEL_REFERENCES_NOT_ALLOWED = 15 [(metadata) = { exit_code: 1 }]; + NEGATIVE_TARGET_PATTERN_NOT_ALLOWED = 16 [(metadata) = { exit_code: 1 }]; + TARGET_MUST_BE_A_FILE = 17 [(metadata) = { exit_code: 1 }]; + DEPENDENCY_NOT_FOUND = 18 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 19 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CleanCommand { + enum Code { + CLEAN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_SERVICE_CLEAN_FAILURE = 1 [(metadata) = { exit_code: 6 }]; + ACTION_CACHE_CLEAN_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + OUT_ERR_CLOSE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_DELETE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_TEMP_MOVE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + ASYNC_OUTPUT_BASE_DELETE_FAILURE = 6 [(metadata) = { exit_code: 6 }]; + EXECROOT_DELETE_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + EXECROOT_TEMP_MOVE_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + ASYNC_EXECROOT_DELETE_FAILURE = 9 [(metadata) = { exit_code: 6 }]; + ARGUMENTS_NOT_RECOGNIZED = 10 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigCommand { + enum Code { + CONFIG_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_CONFIG_IDS = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigurableQuery { + enum Code { + CONFIGURABLE_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + FILTERS_NOT_SUPPORTED = 3 [(metadata) = { exit_code: 2 }]; + BUILDFILES_FUNCTION_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + SIBLINGS_FUNCTION_NOT_SUPPORTED = 5 [(metadata) = { exit_code: 2 }]; + VISIBLE_FUNCTION_NOT_SUPPORTED = 6 [(metadata) = { exit_code: 2 }]; + ATTRIBUTE_MISSING = 7 [(metadata) = { exit_code: 2 }]; + INCORRECT_CONFIG_ARGUMENT_ERROR = 8 [(metadata) = { exit_code: 2 }]; + TARGET_MISSING = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_SYNTAX_ERROR = 10 [(metadata) = { exit_code: 2 }]; + STARLARK_EVAL_ERROR = 11 [(metadata) = { exit_code: 2 }]; + // Indicates failure to correctly define a format function + FORMAT_FUNCTION_ERROR = 12 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message DumpCommand { + enum Code { + DUMP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_OUTPUT_SPECIFIED = 1 [(metadata) = { exit_code: 7 }]; + ACTION_CACHE_DUMP_FAILED = 2 [(metadata) = { exit_code: 7 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ACTION_GRAPH_DUMP_FAILED = 4 [(metadata) = { exit_code: 7 }]; + STARLARK_HEAP_DUMP_FAILED = 5 [(metadata) = { exit_code: 8 }]; + reserved 6; // For internal use + SKYFRAME_MEMORY_DUMP_FAILED = 7 [(metadata) = { exit_code: 7 }]; + // deprecated, moved to the RemoteAnalysisCaching message. + reserved 8; + } + + Code code = 1; +} + +message HelpCommand { + enum Code { + HELP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENT = 1 [(metadata) = { exit_code: 2 }]; + COMMAND_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message MobileInstall { + enum Code { + MOBILE_INSTALL_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLASSIC_UNSUPPORTED = 1 [(metadata) = { exit_code: 2 }]; + NO_TARGET_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + MULTIPLE_TARGETS_SPECIFIED = 3 [(metadata) = { exit_code: 2 }]; + TARGET_TYPE_INVALID = 4 [(metadata) = { exit_code: 6 }]; + NON_ZERO_EXIT = 5 [(metadata) = { exit_code: 6 }]; + ERROR_RUNNING_PROGRAM = 6 [(metadata) = { exit_code: 6 }]; + } + + Code code = 1; +} + +message ProfileCommand { + enum Code { + PROFILE_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OLD_BINARY_FORMAT_UNSUPPORTED = 1 [(metadata) = { exit_code: 1 }]; + FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message RunCommand { + enum Code { + RUN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TARGET_SPECIFIED = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TARGETS_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + TARGET_NOT_EXECUTABLE = 3 [(metadata) = { exit_code: 2 }]; + TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE = 4 [(metadata) = { exit_code: 1 }]; + TARGET_BUILT_BUT_PATH_VALIDATION_FAILED = 5 + [(metadata) = { exit_code: 36 }]; + RUN_UNDER_TARGET_NOT_BUILT = 6 [(metadata) = { exit_code: 2 }]; + RUN_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TEST_SHARDS_OR_RUNS = 8 [(metadata) = { exit_code: 2 }]; + TEST_ENVIRONMENT_SETUP_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + COMMAND_LINE_EXPANSION_FAILURE = 10 [(metadata) = { exit_code: 36 }]; + NO_SHELL_SPECIFIED = 11 [(metadata) = { exit_code: 2 }]; + SCRIPT_WRITE_FAILURE = 12 [(metadata) = { exit_code: 6 }]; + RUNFILES_DIRECTORIES_CREATION_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + RUNFILES_SYMLINKS_CREATION_FAILURE = 14 [(metadata) = { exit_code: 36 }]; + TEST_ENVIRONMENT_SETUP_INTERRUPTED = 15 [(metadata) = { exit_code: 8 }]; + } + + Code code = 1; +} + +message VersionCommand { + enum Code { + VERSION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NOT_AVAILABLE = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PrintActionCommand { + enum Code { + PRINT_ACTION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_NOT_FOUND = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + TARGET_KIND_UNSUPPORTED = 3 [(metadata) = { exit_code: 1 }]; + ACTIONS_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message WorkspaceStatus { + enum Code { + WORKSPACE_STATUS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + ABNORMAL_TERMINATION = 2 [(metadata) = { exit_code: 1 }]; + EXEC_FAILED = 3 [(metadata) = { exit_code: 1 }]; + PARSE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + VALIDATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + CONTENT_UPDATE_IO_EXCEPTION = 6 [(metadata) = { exit_code: 1 }]; + STDERR_IO_EXCEPTION = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message JavaCompile { + enum Code { + JAVA_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REDUCED_CLASSPATH_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + JDEPS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 36 }]; + REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ActionRewinding { + enum Code { + ACTION_REWINDING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOST_INPUT_TOO_MANY_TIMES = 1 [(metadata) = { exit_code: 1 }]; + REWIND_LOST_INPUTS_PREREQ_UNMET = 3 [(metadata) = { exit_code: 2 }]; + LOST_OUTPUT_TOO_MANY_TIMES = 4 [(metadata) = { exit_code: 1 }]; + LOST_INPUT_REWINDING_DISABLED = 5 [(metadata) = { exit_code: 1 }]; + LOST_OUTPUT_REWINDING_DISABLED = 6 [(metadata) = { exit_code: 1 }]; + // Deprecated: attempting to rewind a source artifact is now a hard crash. + DEPRECATED_LOST_INPUT_IS_SOURCE = 2 + [(metadata) = { exit_code: 1 }, deprecated = true]; + } + + Code code = 1; +} + +message CppCompile { + enum Code { + CPP_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FIND_USED_HEADERS_IO_EXCEPTION = 1 [(metadata) = { exit_code: 36 }]; + COPY_OUT_ERR_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + D_FILE_READ_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + COMMAND_GENERATION_FAILURE = 4 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_TIMEOUT = 5 [(metadata) = { exit_code: 1 }]; + INCLUDE_PATH_OUTSIDE_EXEC_ROOT = 6 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + UNDECLARED_INCLUSIONS = 8 [(metadata) = { exit_code: 1 }]; + D_FILE_PARSE_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + COVERAGE_NOTES_CREATION_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_MISSING_DATA = 11 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkAction { + enum Code { + STARLARK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + UNUSED_INPUT_LIST_READ_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + UNUSED_INPUT_LIST_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message NinjaAction { + enum Code { + NINJA_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_DEPFILE_DECLARED_DEPENDENCY = 1 [(metadata) = { exit_code: 36 }]; + D_FILE_PARSE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message DynamicExecution { + enum Code { + DYNAMIC_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + XCODE_RELATED_PREREQ_UNMET = 1 [(metadata) = { exit_code: 36 }]; + ACTION_LOG_MOVE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + RUN_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message FailAction { + enum Code { + FAIL_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INTENTIONAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + INCORRECT_PYTHON_VERSION = 2 [(metadata) = { exit_code: 1 }]; + PROGUARD_SPECS_MISSING = 3 [(metadata) = { exit_code: 1 }]; + DYNAMIC_LINKING_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 1 }]; + SOURCE_FILES_MISSING = 5 [(metadata) = { exit_code: 1 }]; + INCORRECT_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + FRAGMENT_CLASS_MISSING = 7 [(metadata) = { exit_code: 1 }]; + reserved 8, 9; // For internal use + CANT_BUILD_INCOMPATIBLE_TARGET = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message SymlinkAction { + enum Code { + SYMLINK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTABLE_INPUT_NOT_FILE = 1 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_IS_NOT = 2 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_CHECK_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + LINK_CREATION_IO_EXCEPTION = 4 [(metadata) = { exit_code: 1 }]; + LINK_TOUCH_IO_EXCEPTION = 5 [(metadata) = { exit_code: 1 }]; + LINK_LOG_IO_EXCEPTION = 6 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message CppLink { + enum Code { + CPP_LINK_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_GENERATION_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message LtoAction { + enum Code { + LTO_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_ABSOLUTE_PATH_IN_IMPORTS = 1 [(metadata) = { exit_code: 1 }]; + MISSING_BITCODE_FILES = 2 [(metadata) = { exit_code: 1 }]; + IMPORTS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message TestAction { + enum Code { + TEST_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_KEEP_GOING_TEST_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + LOCAL_TEST_PREREQ_UNMET = 2 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + DUPLICATE_CPU_TAGS = 4 [(metadata) = { exit_code: 1 }]; + INVALID_CPU_TAG = 5 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Worker { + enum Code { + WORKER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MULTIPLEXER_INSTANCE_REMOVAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + MULTIPLEXER_DOES_NOT_EXIST = 2 [(metadata) = { exit_code: 1 }]; + NO_TOOLS = 3 [(metadata) = { exit_code: 1 }]; + NO_FLAGFILE = 4 [(metadata) = { exit_code: 1 }]; + VIRTUAL_INPUT_MATERIALIZATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BORROW_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + PREFETCH_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + PREPARE_FAILURE = 8 [(metadata) = { exit_code: 1 }]; + REQUEST_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + PARSE_RESPONSE_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + NO_RESPONSE = 11 [(metadata) = { exit_code: 1 }]; + FINISH_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Analysis { + enum Code { + ANALYSIS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOAD_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + GENERIC_LOADING_PHASE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + NOT_ALL_TARGETS_ANALYZED = 3 [(metadata) = { exit_code: 1 }]; + CYCLE = 4 [(metadata) = { exit_code: 1 }]; + PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ASPECT_LABEL_SYNTAX_ERROR = 6 [(metadata) = { exit_code: 1 }]; + ASPECT_PREREQ_UNMET = 7 [(metadata) = { exit_code: 1 }]; + ASPECT_NOT_FOUND = 8 [(metadata) = { exit_code: 1 }]; + ACTION_CONFLICT = 9 [(metadata) = { exit_code: 1 }]; + ARTIFACT_PREFIX_CONFLICT = 10 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_ANALYSIS_EXCEPTION = 11 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING_ENVIRONMENTS = 12 [(metadata) = { exit_code: 1 }]; + INVALID_ENVIRONMENT = 13 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_MISSING_FROM_GROUPS = 14 [(metadata) = { exit_code: 1 }]; + EXEC_GROUP_MISSING = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXECUTION_PLATFORM = 16 [(metadata) = { exit_code: 1 }]; + ASPECT_CREATION_FAILED = 17 [(metadata) = { exit_code: 1 }]; + CONFIGURED_VALUE_CREATION_FAILED = 18 [(metadata) = { exit_code: 1 }]; + INCOMPATIBLE_TARGET_REQUESTED = 19 [(metadata) = { exit_code: 1 }]; + ANALYSIS_FAILURE_PROPAGATION_FAILED = 20 [(metadata) = { exit_code: 1 }]; + ANALYSIS_CACHE_DISCARDED = 21 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message PackageLoading { + enum Code { + PACKAGE_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACE_FILE_ERROR = 1 [(metadata) = { exit_code: 1 }]; + MAX_COMPUTATION_STEPS_EXCEEDED = 2 [(metadata) = { exit_code: 1 }]; + BUILD_FILE_MISSING = 3 [(metadata) = { exit_code: 1 }]; + REPOSITORY_MISSING = 4 [(metadata) = { exit_code: 1 }]; + PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR = 5 + [(metadata) = { exit_code: 36 }]; + TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR = 6 + [(metadata) = { exit_code: 36 }]; + INVALID_NAME = 7 [(metadata) = { exit_code: 1 }]; + // was: PRELUDE_FILE_READ_ERROR. Replaced by IMPORT_STARLARK_FILE_ERROR + // when the prelude was changed to be loaded as a Starlark module. + reserved 8; + EVAL_GLOBS_SYMLINK_ERROR = 9 [(metadata) = { exit_code: 1 }]; + IMPORT_STARLARK_FILE_ERROR = 10 [(metadata) = { exit_code: 1 }]; + PACKAGE_MISSING = 11 [(metadata) = { exit_code: 1 }]; + TARGET_MISSING = 12 [(metadata) = { exit_code: 1 }]; + NO_SUCH_THING = 13 [(metadata) = { exit_code: 1 }]; + GLOB_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + DUPLICATE_LABEL = 15 [(metadata) = { exit_code: 1 }]; + INVALID_PACKAGE_SPECIFICATION = 16 [(metadata) = { exit_code: 1 }]; + SYNTAX_ERROR = 17 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_DIFFERENT_PACKAGE = 18 [(metadata) = { exit_code: 1 }]; + DEFAULT_ENVIRONMENT_UNDECLARED = 19 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_MULTIPLE_GROUPS = 20 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_DOES_NOT_EXIST = 21 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_INVALID = 22 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_NOT_IN_GROUP = 23 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 24 [(metadata) = { exit_code: 1 }]; + STARLARK_EVAL_ERROR = 25 [(metadata) = { exit_code: 1 }]; + LICENSE_PARSE_FAILURE = 26 [(metadata) = { exit_code: 1 }]; + DISTRIBUTIONS_PARSE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 28 [(metadata) = { exit_code: 1 }]; + // Failure while evaluating or applying @_builtins injection. Since the + // builtins .bzl files are always packaged with Blaze in production, a + // failure here generally indicates a bug in Blaze. + BUILTINS_INJECTION_FAILURE = 29 [(metadata) = { exit_code: 1 }]; + SYMLINK_CYCLE_OR_INFINITE_EXPANSION = 30 [(metadata) = { exit_code: 1 }]; + OTHER_IO_EXCEPTION = 31 [(metadata) = { exit_code: 36 }]; + BAD_REPO_FILE = 32 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Toolchain { + enum Code { + TOOLCHAIN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_PROVIDER = 1 [(metadata) = { exit_code: 1 }]; + INVALID_CONSTRAINT_VALUE = 2 [(metadata) = { exit_code: 1 }]; + INVALID_PLATFORM_VALUE = 3 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN = 4 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_EXECUTION_PLATFORM = 5 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN_TYPE = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkLoading { + enum Code { + STARLARK_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CYCLE = 1 [(metadata) = { exit_code: 1 }]; + COMPILE_ERROR = 2 [(metadata) = { exit_code: 1 }]; + PARSE_ERROR = 3 [(metadata) = { exit_code: 1 }]; + EVAL_ERROR = 4 [(metadata) = { exit_code: 1 }]; + CONTAINING_PACKAGE_NOT_FOUND = 5 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 6 [(metadata) = { exit_code: 1 }]; + IO_ERROR = 7 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 8 [(metadata) = { exit_code: 1 }]; + BUILTINS_ERROR = 9 [(metadata) = { exit_code: 1 }]; + VISIBILITY_ERROR = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message ExternalDeps { + enum Code { + EXTERNAL_DEPS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MODULE_NOT_FOUND = 1 [(metadata) = { exit_code: 48 }]; + BAD_MODULE = 2 [(metadata) = { exit_code: 48 }]; + VERSION_RESOLUTION_ERROR = 3 [(metadata) = { exit_code: 48 }]; + INVALID_REGISTRY_URL = 4 [(metadata) = { exit_code: 48 }]; + ERROR_ACCESSING_REGISTRY = 5 [(metadata) = { exit_code: 32 }]; + INVALID_EXTENSION_IMPORT = 6 [(metadata) = { exit_code: 48 }]; + BAD_LOCKFILE = 7 [(metadata) = { exit_code: 48 }]; + } + + Code code = 1; +} + +message DiffAwareness { + enum Code { + DIFF_AWARENESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + DIFF_STAT_FAILED = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ModCommand { + enum Code { + MOD_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENTS = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_ARGUMENTS = 2 [(metadata) = { exit_code: 2 }]; + INVALID_ARGUMENTS = 3 [(metadata) = { exit_code: 2 }]; + BUILDOZER_FAILED = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/annotations.proto b/app/bazel_event_publisher_proto/proto/google/api/annotations.proto new file mode 100644 index 000000000000..84c48164aa92 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/client.proto b/app/bazel_event_publisher_proto/proto/google/api/client.proto new file mode 100644 index 000000000000..7ba1db3da04f --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/client.proto @@ -0,0 +1,456 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/launch_stage.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; + + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + string api_version = 525000001; +} + +// Required information for every language. +message CommonLanguageSettings { + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + string reference_docs_uri = 1 [deprecated = true]; + + // The destination where API teams want this client library to be published. + repeated ClientLibraryDestination destinations = 2; + + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration selective_gapic_generation = 3; +} + +// Details about how and where to publish client libraries. +message ClientLibrarySettings { + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + string version = 1; + + // Launch stage of this version of the API. + LaunchStage launch_stage = 2; + + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + bool rest_numeric_enums = 3; + + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings java_settings = 21; + + // Settings for C++ client libraries. + CppSettings cpp_settings = 22; + + // Settings for PHP client libraries. + PhpSettings php_settings = 23; + + // Settings for Python client libraries. + PythonSettings python_settings = 24; + + // Settings for Node client libraries. + NodeSettings node_settings = 25; + + // Settings for .NET client libraries. + DotnetSettings dotnet_settings = 26; + + // Settings for Ruby client libraries. + RubySettings ruby_settings = 27; + + // Settings for Go client libraries. + GoSettings go_settings = 28; +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +message Publishing { + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + repeated MethodSettings method_settings = 2; + + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + string new_issue_uri = 101; + + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + string documentation_uri = 102; + + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + string api_short_name = 103; + + // GitHub label to apply to issues and pull requests opened for this API. + string github_label = 104; + + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + repeated string codeowner_github_teams = 105; + + // A prefix used in sample code when demarking regions to be included in + // documentation. + string doc_tag_prefix = 106; + + // For whom the client library is being published. + ClientLibraryOrganization organization = 107; + + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + repeated ClientLibrarySettings library_settings = 109; + + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + string proto_reference_documentation_uri = 110; + + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + string rest_reference_documentation_uri = 111; +} + +// Settings for Java client libraries. +message JavaSettings { + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + string library_package = 1; + + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + map service_class_names = 2; + + // Some settings. + CommonLanguageSettings common = 3; +} + +// Settings for C++ client libraries. +message CppSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Php client libraries. +message PhpSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Python client libraries. +message PythonSettings { + // Experimental features to be included during client library generation. + // These fields will be deprecated once the feature graduates and is enabled + // by default. + message ExperimentalFeatures { + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + bool rest_async_io_enabled = 1; + } + + // Some settings. + CommonLanguageSettings common = 1; + + // Experimental features to be included during client library generation. + ExperimentalFeatures experimental_features = 2; +} + +// Settings for Node client libraries. +message NodeSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Dotnet client libraries. +message DotnetSettings { + // Some settings. + CommonLanguageSettings common = 1; + + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + map renamed_services = 2; + + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + map renamed_resources = 3; + + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + repeated string ignored_resources = 4; + + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + repeated string forced_namespace_aliases = 5; + + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + repeated string handwritten_signatures = 6; +} + +// Settings for Ruby client libraries. +message RubySettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Go client libraries. +message GoSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Describes the generator configuration for a method. +message MethodSettings { + // Describes settings to use when generating API methods that use the + // long-running operation pattern. + // All default values below are from those used in the client library + // generators (e.g. + // [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). + message LongRunning { + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + google.protobuf.Duration initial_poll_delay = 1; + + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + float poll_delay_multiplier = 2; + + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + google.protobuf.Duration max_poll_delay = 3; + + // Total polling timeout. + // Default value: 5 minutes. + google.protobuf.Duration total_poll_timeout = 4; + } + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + string selector = 1; + + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning long_running = 2; + + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + repeated string auto_populated_fields = 3; +} + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +enum ClientLibraryOrganization { + // Not useful. + CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0; + + // Google Cloud Platform Org. + CLOUD = 1; + + // Ads (Advertising) Org. + ADS = 2; + + // Photos Org. + PHOTOS = 3; + + // Street View Org. + STREET_VIEW = 4; + + // Shopping Org. + SHOPPING = 5; + + // Geo Org. + GEO = 6; + + // Generative AI - https://developers.generativeai.google + GENERATIVE_AI = 7; +} + +// To where should client libraries be published? +enum ClientLibraryDestination { + // Client libraries will neither be generated nor published to package + // managers. + CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0; + + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + GITHUB = 10; + + // Publish the library to package managers like nuget.org and npmjs.com. + PACKAGE_MANAGER = 20; +} + +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +message SelectiveGapicGeneration { + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + repeated string methods = 1; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto b/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto new file mode 100644 index 000000000000..2865ba053739 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/field_behavior.proto @@ -0,0 +1,104 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052 [packed = false]; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; + + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + IDENTIFIER = 8; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/http.proto b/app/bazel_event_publisher_proto/proto/google/api/http.proto new file mode 100644 index 000000000000..e3270371d445 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/http.proto @@ -0,0 +1,371 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto b/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto new file mode 100644 index 000000000000..9863fc23d422 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/api/launch_stage.proto @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option go_package = "google.golang.org/genproto/googleapis/api;api"; +option java_multiple_files = true; +option java_outer_classname = "LaunchStageProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +enum LaunchStage { + // Do not use this default value. + LAUNCH_STAGE_UNSPECIFIED = 0; + + // The feature is not yet implemented. Users can not use it. + UNIMPLEMENTED = 6; + + // Prelaunch features are hidden from users and are only visible internally. + PRELAUNCH = 7; + + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + EARLY_ACCESS = 1; + + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + ALPHA = 2; + + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + BETA = 3; + + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + GA = 4; + + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + DEPRECATED = 5; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto new file mode 100644 index 000000000000..43cf5e275fb9 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_events.proto @@ -0,0 +1,187 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/devtools/build/v1/build_status.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BuildEventProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// An event representing some state change that occurred in the build. This +// message does not include field for uniquely identifying an event. +message BuildEvent { + // Notification that the build system has attempted to run the build tool. + message InvocationAttemptStarted { + // The number of the invocation attempt, starting at 1 and increasing by 1 + // for each new attempt. Can be used to determine if there is a later + // invocation attempt replacing the current one a client is processing. + int64 attempt_number = 1; + + // Arbitrary details about the invocation attempt. + google.protobuf.Any details = 2; + } + + // Notification that an invocation attempt has finished. + message InvocationAttemptFinished { + // Final status of the invocation. + BuildStatus invocation_status = 3; + + // Arbitrary details about the invocation attempt. + google.protobuf.Any details = 4; + } + + // Notification that the build request is enqueued. + message BuildEnqueued { + // Additional details about the Build. + google.protobuf.Any details = 1; + } + + // Notification that the build request has finished, and no further + // invocations will occur. Note that this applies to the entire Build. + // Individual invocations trigger InvocationFinished when they finish. + message BuildFinished { + // Final status of the build. + BuildStatus status = 1; + + // Additional details about the Build. + google.protobuf.Any details = 2; + } + + // Textual output written to standard output or standard error. + message ConsoleOutput { + // The output stream type. + ConsoleOutputStream type = 1; + + // The output stream content. + oneof output { + // Regular UTF-8 output; normal text. + string text_output = 2; + + // Used if the output is not UTF-8 text (for example, a binary proto). + bytes binary_output = 3; + } + } + + // Notification of the end of a build event stream published by a build + // component other than CONTROLLER (See StreamId.BuildComponents). + message BuildComponentStreamFinished { + // How did the event stream finish. + enum FinishType { + // Unknown or unspecified; callers should never set this value. + FINISH_TYPE_UNSPECIFIED = 0; + + // Set by the event publisher to indicate a build event stream is + // finished. + FINISHED = 1; + + // Set by the WatchBuild RPC server when the publisher of a build event + // stream stops publishing events without publishing a + // BuildComponentStreamFinished event whose type equals FINISHED. + EXPIRED = 2; + } + + // How the event stream finished. + FinishType type = 1; + } + + // This should be precisely the time when this event happened, and not when + // the event proto was created or sent. + google.protobuf.Timestamp event_time = 1; + + // ////////////////////////////////////////////////////////////////////////// + // Events that indicate a state change of a build request in the build + // queue. + oneof event { + // An invocation attempt has started. + InvocationAttemptStarted invocation_attempt_started = 51; + + // An invocation attempt has finished. + InvocationAttemptFinished invocation_attempt_finished = 52; + + // The build is enqueued. + BuildEnqueued build_enqueued = 53; + + // The build has finished. Set when the build is terminated. + BuildFinished build_finished = 55; + + // An event containing printed text. + ConsoleOutput console_output = 56; + + // Indicates the end of a build event stream (with the same StreamId) from + // a build component executing the requested build task. + // *** This field does not indicate the WatchBuild RPC is finished. *** + BuildComponentStreamFinished component_stream_finished = 59; + + // Structured build event generated by Bazel about its execution progress. + google.protobuf.Any bazel_event = 60; + + // An event that contains supplemental tool-specific information about + // build execution. + google.protobuf.Any build_execution_event = 61; + + // An event that contains supplemental tool-specific information about + // source fetching. + google.protobuf.Any source_fetch_event = 62; + } +} + +// Unique identifier for a build event stream. +message StreamId { + // Which build component generates this event stream. Each build component + // may generate one event stream. + enum BuildComponent { + // Unknown or unspecified; callers should never set this value. + UNKNOWN_COMPONENT = 0; + + // A component that coordinates builds. + CONTROLLER = 1; + + // A component that runs executables needed to complete a build. + WORKER = 2; + + // A component that builds something. + TOOL = 3; + } + + // The id of a Build message. + string build_id = 1; + + // The unique invocation ID within this build. + // It should be the same as {invocation} (below) during the migration. + string invocation_id = 6; + + // The component that emitted this event. + BuildComponent component = 3; +} + +// The type of console output stream. +enum ConsoleOutputStream { + // Unspecified or unknown. + UNKNOWN = 0; + + // Normal output stream. + STDOUT = 1; + + // Error output stream. + STDERR = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto new file mode 100644 index 000000000000..93a525f131ca --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/build_status.proto @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BuildStatusProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// Status used for both invocation attempt and overall build completion. +message BuildStatus { + // The end result of the Build. + enum Result { + // Unspecified or unknown. + UNKNOWN_STATUS = 0; + + // Build was successful and tests (if requested) all pass. + COMMAND_SUCCEEDED = 1; + + // Build error and/or test failure. + COMMAND_FAILED = 2; + + // Unable to obtain a result due to input provided by the user. + USER_ERROR = 3; + + // Unable to obtain a result due to a failure within the build system. + SYSTEM_ERROR = 4; + + // Build required too many resources, such as build tool RAM. + RESOURCE_EXHAUSTED = 5; + + // An invocation attempt time exceeded its deadline. + INVOCATION_DEADLINE_EXCEEDED = 6; + + // Build request time exceeded the request_deadline + REQUEST_DEADLINE_EXCEEDED = 8; + + // The build was cancelled by a call to CancelBuild. + CANCELLED = 7; + } + + // The end result. + Result result = 1; + + // Final invocation ID of the build, if there was one. + // This field is only set on a status in BuildFinished event. + string final_invocation_id = 3; + + // Build tool exit code. Integer value returned by the executed build tool. + // Might not be available in some cases, e.g., a build timeout. + google.protobuf.Int32Value build_tool_exit_code = 4; + + // Human-readable error message. Do not use for programmatic purposes. + string error_message = 5; + + // Fine-grained diagnostic information to complement the status. + google.protobuf.Any details = 2; +} diff --git a/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto new file mode 100644 index 000000000000..641ba5af2e9a --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/google/devtools/build/v1/publish_build_event.proto @@ -0,0 +1,187 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.build.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/devtools/build/v1/build_events.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/devtools/build/v1;build"; +option java_multiple_files = true; +option java_outer_classname = "BackendProto"; +option java_package = "com.google.devtools.build.v1"; +option php_namespace = "Google\\Cloud\\Build\\V1"; + +// A service for publishing BuildEvents. BuildEvents are generated by Build +// Systems to record actions taken during a Build. Events occur in streams, +// are identified by a StreamId, and ordered by sequence number in a stream. +// +// A Build may contain several streams of BuildEvents, depending on the systems +// that are involved in the Build. Some BuildEvents are used to declare the +// beginning and end of major portions of a Build; these are called +// LifecycleEvents, and are used (for example) to indicate the beginning or end +// of a Build, and the beginning or end of an Invocation attempt (there can be +// more than 1 Invocation in a Build if, for example, a failure occurs somewhere +// and it needs to be retried). +// +// Other, build-tool events represent actions taken by the Build tool, such as +// target objects produced via compilation, tests run, et cetera. There could be +// more than one build tool stream for an invocation attempt of a build. +service PublishBuildEvent { + option (google.api.default_host) = "buildeventservice.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Publish a build event stating the new state of a build (typically from the + // build queue). The BuildEnqueued event must be published before all other + // events for the same build ID. + // + // The backend will persist the event and deliver it to registered frontend + // jobs immediately without batching. + // + // The commit status of the request is reported by the RPC's util_status() + // function. The error code is the canonical error code defined in + // //util/task/codes.proto. + rpc PublishLifecycleEvent(PublishLifecycleEventRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/projects/{project_id=*}/lifecycleEvents:publish" + body: "*" + additional_bindings { post: "/v1/lifecycleEvents:publish" body: "*" } + }; + } + + // Publish build tool events belonging to the same stream to a backend job + // using bidirectional streaming. + rpc PublishBuildToolEventStream(stream PublishBuildToolEventStreamRequest) + returns (stream PublishBuildToolEventStreamResponse) { + option (google.api.http) = { + post: "/v1/projects/{project_id=*}/events:publish" + body: "*" + additional_bindings { post: "/v1/events:publish" body: "*" } + }; + option (google.api.method_signature) = + "ordered_build_event,notification_keywords,project_id"; + } +} + +// Publishes 'lifecycle events' that update the high-level state of a build: +// - BuildEnqueued: When a build is scheduled. +// - InvocationAttemptStarted: When work for a build starts; there can be +// multiple invocations for a build (e.g. retries). +// - InvocationAttemptCompleted: When work for a build finishes. +// - BuildFinished: When a build is finished. +message PublishLifecycleEventRequest { + // The service level of the build request. Backends only uses this value when + // the BuildEnqueued event is published to determine what level of service + // this build should receive. + enum ServiceLevel { + // Non-interactive builds can tolerate longer event latencies. This is the + // default ServiceLevel if callers do not specify one. + NONINTERACTIVE = 0; + + // The events of an interactive build should be delivered with low latency. + INTERACTIVE = 1; + } + + // The interactivity of this build. + ServiceLevel service_level = 1; + + // Required. The lifecycle build event. If this is a build tool event, the RPC + // will fail with INVALID_REQUEST. + OrderedBuildEvent build_event = 2 [(google.api.field_behavior) = REQUIRED]; + + // If the next event for this build or invocation (depending on the event + // type) hasn't been published after this duration from when {build_event} + // is written to BES, consider this stream expired. If this field is not set, + // BES backend will use its own default value. + google.protobuf.Duration stream_timeout = 3; + + // Additional information about a build request. These are define by the event + // publishers, and the Build Event Service does not validate or interpret + // them. They are used while notifying internal systems of new builds and + // invocations if the OrderedBuildEvent.event type is + // BuildEnqueued/InvocationAttemptStarted. + repeated string notification_keywords = 4; + + // Required. The project this build is associated with. + // This should match the project used for the initial call to + // PublishLifecycleEvent (containing a BuildEnqueued message). + string project_id = 6 [(google.api.field_behavior) = REQUIRED]; + + // Whether to require a previously received matching parent lifecycle event + // for the current request's event before continuing processing. + // - InvocationAttemptStarted and BuildFinished events require a BuildEnqueued + // parent event. + // - InvocationAttemptFinished events require an InvocationAttemptStarted + // parent event. + bool check_preceding_lifecycle_events_present = 7; +} + +// States which event has been committed. Any failure to commit will cause +// RPC errors, hence not recorded by this proto. +message PublishBuildToolEventStreamResponse { + // The stream that contains this event. + StreamId stream_id = 1; + + // The sequence number of this event that has been committed. + int64 sequence_number = 2; +} + +// Build event with contextual information about the stream it belongs to and +// its position in that stream. +message OrderedBuildEvent { + // Which build event stream this event belongs to. + StreamId stream_id = 1; + + // The position of this event in the stream. The sequence numbers for a build + // event stream should be a sequence of consecutive natural numbers starting + // from one. (1, 2, 3, ...) + int64 sequence_number = 2; + + // The actual event. + BuildEvent event = 3; +} + +// Streaming request message for PublishBuildToolEventStream. +message PublishBuildToolEventStreamRequest { + // Required. The build event with position info. + // New publishing clients should use this field rather than the 3 above. + OrderedBuildEvent ordered_build_event = 4 + [(google.api.field_behavior) = REQUIRED]; + + // The keywords to be attached to the notification which notifies the start + // of a new build event stream. BES only reads this field when sequence_number + // or ordered_build_event.sequence_number is 1 in this message. If this field + // is empty, BES will not publish notification messages for this stream. + repeated string notification_keywords = 5; + + // Required. The project this build is associated with. + // This should match the project used for the initial call to + // PublishLifecycleEvent (containing a BuildEnqueued message). + string project_id = 6 [(google.api.field_behavior) = REQUIRED]; + + // Whether to require a previously received matching InvocationAttemptStarted + // event before continuing event processing for the event in the current + // request. BES only performs this check for events with sequence_number 1 + // i.e. the first event in the stream. + bool check_preceding_lifecycle_events_present = 7; +} diff --git a/app/bazel_event_publisher_proto/proto/invocation_policy.proto b/app/bazel_event_publisher_proto/proto/invocation_policy.proto new file mode 100644 index 000000000000..a6820f0af580 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/invocation_policy.proto @@ -0,0 +1,207 @@ +// Copyright 2015 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.invocation_policy; + +import "strategy_policy.proto"; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// The --invocation_policy flag takes a base64-encoded binary-serialized or text +// formatted InvocationPolicy message. +message InvocationPolicy { + // Order matters. + // After expanding policies on expansion flags or flags with implicit + // requirements, only the final policy on a specific flag will be enforced + // onto the user's command line. + repeated FlagPolicy flag_policies = 1; + + optional blaze.strategy_policy.StrategyPolicy strategy_policy = 2; +} + +// A policy for controlling the value of a flag. +message FlagPolicy { + // The name of the flag to enforce this policy on. + // + // Note that this should be the full name of the flag, not the abbreviated + // name of the flag. If the user specifies the abbreviated name of a flag, + // that flag will be matched using its full name. + // + // The "no" prefix will not be parsed, so for boolean flags, use + // the flag's full name and explicitly set it to true or false. + optional string flag_name = 1; + + // If set, this flag policy is applied only if one of the given commands or a + // command that inherits from one of the given commands is being run. For + // instance, if "build" is one of the commands here, then this policy will + // apply to any command that inherits from build, such as info, coverage, or + // test. If empty, this flag policy is applied for all commands. This allows + // the policy setter to add all policies to the proto without having to + // determine which Bazel command the user is actually running. Additionally, + // Bazel allows multiple flags to be defined by the same name, and the + // specific flag definition is determined by the command. + repeated string commands = 2; + + oneof operation { + SetValue set_value = 3; + UseDefault use_default = 4; + DisallowValues disallow_values = 5; + AllowValues allow_values = 6; + } +} + +message SetValue { + // Use this value for the specified flag, overriding any default or user-set + // value (unless behavior = APPEND for repeatable flags). + // + // This field is repeated for repeatable flags. It is an error to set + // multiple values for a flag that is not actually a repeatable flag. + // This requires at least 1 value, if even the empty string. + // + // If the flag allows multiple values, all of its values are replaced with the + // value or values from the policy (i.e., no diffing or merging is performed), + // unless behavior = APPEND (see below). + // + // Note that some flags are tricky. For example, some flags look like boolean + // flags, but are actually Void expansion flags that expand into other flags. + // The Bazel flag parser will accept "--void_flag=false", but because + // the flag is Void, the "=false" is ignored. It can get even trickier, like + // "--novoid_flag" which is also an expansion flag with the type Void whose + // name is explicitly "novoid_flag" and which expands into other flags that + // are the opposite of "--void_flag". For expansion flags, it's best to + // explicitly override the flags they expand into. + // + // Other flags may be differently tricky: A flag could have a converter that + // converts some string to a list of values, but that flag may not itself have + // allowMultiple set to true. + // + // An example is "--test_tag_filters": this flag sets its converter to + // CommaSeparatedOptionListConverter, but does not set allowMultiple to true. + // So "--test_tag_filters=foo,bar" results in ["foo", "bar"], however + // "--test_tag_filters=foo --test_tag_filters=bar" results in just ["bar"] + // since the 2nd value overrides the 1st. + // + // Similarly, "--test_tag_filters=foo,bar --test_tag_filters=baz,qux" results + // in ["baz", "qux"]. For flags like these, the policy should specify + // "foo,bar" instead of separately specifying "foo" and "bar" so that the + // converter is appropriately invoked. + // + // Note that the opposite is not necessarily + // true: for a flag that specifies allowMultiple=true, "--flag=foo,bar" + // may fail to parse or result in an unexpected value. + repeated string flag_value = 1; + + // Obsolete overridable and append fields. + reserved 2, 3; + + enum Behavior { + UNDEFINED = 0; + // Change the flag value but allow it to be overridden by explicit settings + // from command line/config expansion/rc files. + // Matching old flag values: append = false, overridable = true. + ALLOW_OVERRIDES = 1; + // Append a new value for a repeatable flag, leave old values and allow + // further overrides. + // Matching old flag values: append = true, overridable = false. + APPEND = 2; + // Set a final value of the flag. Any overrides provided by the user for + // this flag will be ignored. + // Matching old flag values: append = false, overridable = false. + FINAL_VALUE_IGNORE_OVERRIDES = 3; + } + + // Defines how invocation policy should interact with user settings for the + // same flag. + optional Behavior behavior = 4; +} + +message UseDefault { + // Use the default value of the flag, as defined by Bazel (or equivalently, do + // not allow the user to set this flag). + // + // Note on implementation: UseDefault sets the default by clearing the flag, + // so that when the value is requested and no flag is found, the flag parser + // returns the default. This is mostly relevant for expansion flags: it will + // erase user values in *all* flags that the expansion flag expands to. Only + // use this on expansion flags if this is acceptable behavior. Since the last + // policy wins, later policies on this same flag will still remove the + // expanded UseDefault, so there is a way around, but it's really best not to + // use this on expansion flags at all. +} + +message DisallowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any of these values (that is, the Bazel + // command will fail), unless new_value or use_default is set. + // + // For repeatable flags, if any one of the values in the flag matches a value + // in the list of disallowed values, an error is thrown. + // + // Care must be taken for flags with complicated converters. For example, + // it's possible for a repeated flag to be of type List>, so that + // "--foo=a,b --foo=c,d" results in foo=[["a","b"], ["c", "d"]]. In this case, + // it is not possible to disallow just "b", nor will ["b", "a"] match, nor + // will ["b", "c"] (but ["a", "b"] will still match). + repeated string disallowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} + +message AllowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any value not in this list, unless + // new_value or use_default is set. + repeated string allowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} diff --git a/app/bazel_event_publisher_proto/proto/option_filters.proto b/app/bazel_event_publisher_proto/proto/option_filters.proto new file mode 100644 index 000000000000..629e006888a3 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/option_filters.proto @@ -0,0 +1,61 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto3"; + +package options; + +// option java_api_version = 2; +option java_package = "com.google.devtools.common.options.proto"; + +// IMPORTANT NOTE: These two enums must be kept in sync with their Java +// equivalents in src/main/java/com/google/devtools/common/options. +// Changing this proto has specific compatibility requirements, please see the +// Java documentation for details. + +// Docs in java enum. +enum OptionEffectTag { + // This option's effect or intent is unknown. + UNKNOWN = 0; + + // This flag has literally no effect. + NO_OP = 1; + + LOSES_INCREMENTAL_STATE = 2; + CHANGES_INPUTS = 3; + AFFECTS_OUTPUTS = 4; + BUILD_FILE_SEMANTICS = 5; + BAZEL_INTERNAL_CONFIGURATION = 6; + LOADING_AND_ANALYSIS = 7; + EXECUTION = 8; + HOST_MACHINE_RESOURCE_OPTIMIZATIONS = 9; + EAGERNESS_TO_EXIT = 10; + BAZEL_MONITORING = 11; + TERMINAL_OUTPUT = 12; + ACTION_COMMAND_LINES = 13; + TEST_RUNNER = 14; +} + +// Docs in java enum. +enum OptionMetadataTag { + EXPERIMENTAL = 0; + INCOMPATIBLE_CHANGE = 1; + DEPRECATED = 2; + HIDDEN = 3; + INTERNAL = 4; + reserved "TRIGGERED_BY_ALL_INCOMPATIBLE_CHANGES"; + reserved 5; + reserved "EXPLICIT_IN_OUTPUT_PATH"; + reserved 6; + IMMUTABLE = 7; +} diff --git a/app/bazel_event_publisher_proto/proto/package_load_metrics.proto b/app/bazel_event_publisher_proto/proto/package_load_metrics.proto new file mode 100644 index 000000000000..e27ecceaf445 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/package_load_metrics.proto @@ -0,0 +1,44 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto2"; + +package devtools.build.lib.packages.metrics; + +import "google/protobuf/duration.proto"; + +option java_package = "com.google.devtools.build.lib.packages.metrics"; +option java_multiple_files = true; + +// Message used to concisely report all package metrics. +message PackageLoadMetrics { + // Name of the package. + optional string name = 1; + + // Wall-time duration it took to construct the package. + optional google.protobuf.Duration load_duration = 2; + + // Number of targets created in the package. + optional uint64 num_targets = 3; + + // Number of Starlark computation steps required to create the package. + optional uint64 computation_steps = 4; + + // Number of transitive Starlark load()s required to create the package. + optional uint64 num_transitive_loads = 5; + + // Numeric value given to the memory and general accounting costs associated + // with a loaded package. Values are an approximate but reasonable proxy for + // the real storage costs of a package. + optional uint64 package_overhead = 6; +} diff --git a/app/bazel_event_publisher_proto/proto/strategy_policy.proto b/app/bazel_event_publisher_proto/proto/strategy_policy.proto new file mode 100644 index 000000000000..0f58c9b81f35 --- /dev/null +++ b/app/bazel_event_publisher_proto/proto/strategy_policy.proto @@ -0,0 +1,67 @@ +// Copyright 2022 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.strategy_policy; + +option java_multiple_files = true; +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// Provides control over what strategies (local, remote, etc) may be used. +// +// An empty policies (e.g. unset) implies no enforcement, anything is allowed. +// +// Policies are enforced against both user-provided values (flags) and +// application-internal defaults. The latter is useful for guarding against +// unexpectedly hard-coded defaults. +// +// Sample usage to allow everything to execute remotely, while only allowing +// genrules to execute locally: +// +// strategy_policy { +// mnemonic_policy { +// default_allowlist: ["remote"] +// strategy_allowlist: [ +// { mnemonic: "Genrule" strategy: ["local"] } +// ] +// } +// } +message StrategyPolicy { + // Controls per-mnemonic policies for regular spawn/action execution. Relevant + // command-line flags this controls include --strategy and --genrule_strategy. + optional MnemonicPolicy mnemonic_policy = 1; + + // Controls per-mnemonic policies for the remote execution leg of dynamic + // execution. Relevant flag is --dynamic_remote_strategy. + optional MnemonicPolicy dynamic_remote_policy = 2; + + // Controls per-mnemonic policies for the local execution leg of dynamic + // execution. Relevant flag is --dynamic_local_strategy. + optional MnemonicPolicy dynamic_local_policy = 3; +} + +message MnemonicPolicy { + // Default allowed strategies for mnemonics not present in `strategy` list. + repeated string default_allowlist = 1; + + repeated StrategiesForMnemonic strategy_allowlist = 2; +} + +// Per-mnemonic allowlist settings. +message StrategiesForMnemonic { + optional string mnemonic = 1; + repeated string strategy = 2; +} diff --git a/app/bazel_event_publisher_proto/src/lib.rs b/app/bazel_event_publisher_proto/src/lib.rs new file mode 100644 index 000000000000..e34b7627fb57 --- /dev/null +++ b/app/bazel_event_publisher_proto/src/lib.rs @@ -0,0 +1,61 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under both the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree and the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. + */ + +#![feature(error_generic_member_access)] + +pub mod blaze { + tonic::include_proto!("blaze"); + pub mod invocation_policy { + tonic::include_proto!("blaze.invocation_policy"); + } + pub mod strategy_policy { + tonic::include_proto!("blaze.strategy_policy"); + } +} + +pub mod build_event_stream { + tonic::include_proto!("build_event_stream"); +} + +pub mod command_line { + tonic::include_proto!("command_line"); +} + +pub mod devtools { + pub mod build { + pub mod lib { + pub mod packages { + pub mod metrics { + tonic::include_proto!("devtools.build.lib.packages.metrics"); + } + } + } + } +} + +pub mod failure_details { + tonic::include_proto!("failure_details"); +} + +pub mod google { + pub mod api { + tonic::include_proto!("google.api"); + } + pub mod devtools { + pub mod build { + pub mod v1 { + tonic::include_proto!("google.devtools.build.v1"); + } + } + } +} + +pub mod options { + tonic::include_proto!("options"); +} From 81b1763306b18b6ff27f470eb2a0daa8efa0b778 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 15 Nov 2024 11:53:17 +0100 Subject: [PATCH 04/11] Implement RemoteEventSink to publish to Bazel BES Repurposes the RemoteEventSink used to connect to Scribe within Meta internal Buck2 and connects it to a Bazel Build Event Service over gRPC instead. Adds a translation layer between Buck2 build events and Bazel build events. --- Cargo.toml | 1 + app/buck2_events/Cargo.toml | 8 + app/buck2_events/src/sink/remote.rs | 282 +++++++++++++++++++++++++++- 3 files changed, 285 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 29710b502384..4a56c9eff0be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -131,6 +131,7 @@ async-compression = { version = "0.4.1", features = ["tokio", "gzip", "zstd"] } async-condvar-fair = { version = "1.0", features = ["parking_lot_0_11", "tokio"] } async-recursion = "1.0" async-scoped = { version = "0.9", features = ["use-tokio"] } +async-stream = "0.3.6" async-trait = "0.1.24" atomic = "0.5.1" backtrace = "0.3.51" diff --git a/app/buck2_events/Cargo.toml b/app/buck2_events/Cargo.toml index 83ac27882934..0e2cb6c2bb72 100644 --- a/app/buck2_events/Cargo.toml +++ b/app/buck2_events/Cargo.toml @@ -7,6 +7,7 @@ version = "0.1.0" [dependencies] anyhow = { workspace = true } +async-stream = { workspace = true } crossbeam-channel = { workspace = true } derive_more = { workspace = true } futures = { workspace = true } @@ -14,10 +15,14 @@ hostname = { workspace = true } is_proc_translated = { workspace = true } once_cell = { workspace = true } pin-project = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } serde = { workspace = true } smallvec = { workspace = true } sys-info = { workspace = true } +tonic = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } uuid = { workspace = true } allocative = { workspace = true } @@ -25,10 +30,13 @@ dupe = { workspace = true } fbinit = { workspace = true } gazebo = { workspace = true } +bazel_event_publisher_proto = { workspace = true } + buck2_build_info = { workspace = true } buck2_cli_proto = { workspace = true } buck2_core = { workspace = true } buck2_data = { workspace = true } +buck2_event_publisher_proto = { workspace = true } buck2_error = { workspace = true } buck2_util = { workspace = true } buck2_wrapper_common = { workspace = true } diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index a4d99c8bc652..8b82818b5cc4 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -334,7 +334,41 @@ mod fbcode { #[cfg(not(fbcode_build))] mod fbcode { + use std::collections::HashMap; use std::sync::Arc; + use std::thread::JoinHandle; + + use anyhow::Context; + + use async_stream::stream; + + use bazel_event_publisher_proto::build_event_stream; + use bazel_event_publisher_proto::google::devtools::build::v1; + use buck2_data; + use buck2_data::BuildCommandStart; + use buck2_util::future::try_join_all; + use futures::stream; + + use futures::Stream; + use futures::StreamExt; + use tonic::transport::Channel; + use tonic::Request; + + use tokio::runtime::Builder; + use tokio::sync::mpsc; + use tokio::sync::mpsc::UnboundedReceiver; + use tokio::sync::mpsc::UnboundedSender; + + use tokio_stream::wrappers::UnboundedReceiverStream; + + use bazel_event_publisher_proto::google::devtools::build::v1::OrderedBuildEvent; + use bazel_event_publisher_proto::google::devtools::build::v1::publish_build_event_client::PublishBuildEventClient; + use bazel_event_publisher_proto::google::devtools::build::v1::PublishBuildToolEventStreamRequest; + use bazel_event_publisher_proto::google::devtools::build::v1::StreamId; + + use prost; + use prost::Message; + use prost_types; use crate::BuckEvent; use crate::Event; @@ -342,15 +376,238 @@ mod fbcode { use crate::EventSinkStats; use crate::EventSinkWithStats; - pub enum RemoteEventSink {} + pub struct RemoteEventSink { + _handler: JoinHandle<()>, + send: UnboundedSender>, + } + + async fn connect_build_event_server() -> anyhow::Result> { + let uri = std::env::var("BES_URI")?.parse()?; + let channel = Channel::builder(uri); + // TODO: enable TLS and handle API token + // let tls_config = ClientTlsConfig::new(); + // channel = channel.tls_config(tls_config)?; + channel + .connect() + .await + .context("connecting to Bazel event stream gRPC server")?; + let client = PublishBuildEventClient::connect(channel) + .await + .context("creating Bazel event stream gRPC client")?; + Ok(client) + } + + fn buck_to_bazel_events>(events: S) -> impl Stream { + stream! { + for await event in events { + println!("EVENT {:?} {:?}", event.event.trace_id, event); + match event.data() { + buck2_data::buck_event::Data::SpanStart(start) => { + println!("START {:?}", start); + match start.data.as_ref() { + None => {}, + Some(buck2_data::span_start_event::Data::Command(command)) => { + match command.data.as_ref() { + None => {}, + Some(buck2_data::command_start::Data::Build(BuildCommandStart {})) => { + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::Started(build_event_stream::build_event_id::BuildStartedId {})) }), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Started(build_event_stream::BuildStarted { + uuid: event.event.trace_id.clone(), + start_time_millis: 0, + start_time: Some(event.timestamp().into()), + build_tool_version: "BUCK2".to_owned(), + options_description: "UNKNOWN".to_owned(), + command: "build".to_owned(), + working_directory: "UNKNOWN".to_owned(), + workspace_directory: "UNKNOWN".to_owned(), + server_pid: std::process::id() as i64, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, + Some(_) => {}, + } + }, + Some(_) => {}, + } + }, + buck2_data::buck_event::Data::SpanEnd(end) => { + println!("END {:?}", end); + match end.data.as_ref() { + None => {}, + Some(buck2_data::span_end_event::Data::Command(command)) => { + match command.data.as_ref() { + None => {}, + Some(buck2_data::command_end::Data::Build(_build)) => { + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::BuildFinished(build_event_stream::build_event_id::BuildFinishedId {})) }), + children: vec![], + last_message: true, + payload: Some(build_event_stream::build_event::Payload::Finished(build_event_stream::BuildFinished { + overall_success: command.is_success, + exit_code: Some( + if command.is_success { + build_event_stream::build_finished::ExitCode { + name: "SUCCESS".to_owned(), + code: 0, + } + } else { + build_event_stream::build_finished::ExitCode { + name: "FAILURE".to_owned(), + code: 1, + } + }), + finish_time_millis: 0, + finish_time: Some(event.timestamp().into()), + anomaly_report: None, + // TODO: convert Buck2 ErrorReport + failure_detail: None, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + break; + }, + Some(_) => {}, + } + }, + Some(_) => {}, + } + }, + buck2_data::buck_event::Data::Instant(instant) => { + println!("INST {:?}", instant); + }, + buck2_data::buck_event::Data::Record(record) => { + println!("REC {:?}", record); + }, + } + } + } + } + + fn stream_build_tool_events>(trace_id: String, events: S) -> impl Stream { + stream::iter(1..) + .zip(events) + .map(move |(sequence_number, event)| { + PublishBuildToolEventStreamRequest { + check_preceding_lifecycle_events_present: false, + notification_keywords: vec![], + ordered_build_event: Some(OrderedBuildEvent { + stream_id: Some(StreamId { + build_id: trace_id.clone(), + invocation_id: trace_id.clone(), + component: 0, + }), + sequence_number, + event: Some(event), + }), + project_id: "12341234".to_owned(), // TODO: needed + } + }) + } + + async fn event_sink_loop(recv: UnboundedReceiver>) -> anyhow::Result<()> { + let mut handlers: HashMap, tokio::task::JoinHandle>)> = HashMap::new(); + let client = connect_build_event_server().await?; + let mut recv = UnboundedReceiverStream::new(recv) + .flat_map(|v|stream::iter(v)); + while let Some(event) = recv.next().await { + let dbg_trace_id = event.event.trace_id.clone(); + println!("event_sink_loop event {:?}", &dbg_trace_id); + if let Some((send, _)) = handlers.get(&event.event.trace_id) { + println!("event_sink_loop redirect {:?}", &dbg_trace_id); + send.send(event).unwrap_or_else(|e| println!("build event send failed {:?}", e)); + } else { + println!("event_sink_loop new handler {:?}", event.event.trace_id); + let (send, recv) = mpsc::unbounded_channel::(); + let mut client = client.clone(); + let dbg_trace_id = dbg_trace_id.clone(); + let trace_id = event.event.trace_id.clone(); + let handler = tokio::spawn(async move { + let recv = UnboundedReceiverStream::new(recv); + let request = Request::new(stream_build_tool_events(trace_id, buck_to_bazel_events(recv))); + println!("new handler request {:?}", &dbg_trace_id); + let response = client.publish_build_tool_event_stream(request).await?; + println!("new handler response {:?}", &dbg_trace_id); + let mut inbound = response.into_inner(); + while let Some(ack) = inbound.message().await? { + // TODO: Handle ACKs properly and add retry. + println!("ACK {:?}", ack); + } + Ok(()) + }); + handlers.insert(event.event.trace_id.to_owned(), (send, handler)); + } + } + println!("event_sink_loop recv CLOSED"); + // TODO: handle closure and retry. + // close send handles and await all handlers. + let handlers: Vec>> = handlers.into_values().map(|(_, handler)|handler).collect(); + // TODO: handle retry. + try_join_all(handlers).await?.into_iter().collect::>>()?; + Ok(()) + } impl RemoteEventSink { - pub async fn send_now(&self, _event: BuckEvent) {} - pub async fn send_messages_now(&self, _events: Vec) {} + pub fn new() -> anyhow::Result { + let (send, recv) = mpsc::unbounded_channel::>(); + let handler = std::thread::Builder::new() + .name("buck-event-producer".to_owned()) + .spawn({ + move || { + let runtime = Builder::new_current_thread().enable_all().build().unwrap(); + runtime.block_on(event_sink_loop(recv)).unwrap(); + } + }).context("spawning buck-event-producer thread")?; + Ok(RemoteEventSink { + _handler: handler, + send, + }) + } + pub async fn send_now(&self, event: BuckEvent) { + self.send_messages_now(vec![event]).await; + } + pub async fn send_messages_now(&self, events: Vec) { + // TODO: does this make sense for BES? If so, implement send now variant. + if let Err(err) = self.send.send(events) { + // TODO: proper error handling + dbg!(err); + } + } + pub fn offer(&self, event: BuckEvent) { + if let Err(err) = self.send.send(vec![event]) { + // TODO: proper error handling + dbg!(err); + } + } } impl EventSink for RemoteEventSink { - fn send(&self, _event: Event) {} + fn send(&self, event: Event) { + match event { + Event::Buck(event) => { + self.offer(event); + } + Event::CommandResult(..) => {}, + Event::PartialResult(..) => {}, + } + } } impl EventSinkWithStats for RemoteEventSink { @@ -359,7 +616,20 @@ mod fbcode { } fn stats(&self) -> EventSinkStats { - match *self {} + EventSinkStats { + successes: 0, + failures_invalid_request: 0, + failures_unauthorized: 0, + failures_rate_limited: 0, + failures_pushed_back: 0, + failures_enqueue_failed: 0, + failures_internal_error: 0, + failures_timed_out: 0, + failures_unknown: 0, + buffered: 0, + dropped: 0, + bytes_written: 0, + } } } } @@ -393,7 +663,7 @@ fn new_remote_event_sink_if_fbcode( retry_attempts, message_batch_size, ); - Ok(None) + Ok(Some(RemoteEventSink::new()?)) } } From 5b8a6ef2aaca6e21f611186c1035835f4df39c25 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 20 Nov 2024 14:54:54 +0100 Subject: [PATCH 05/11] Publish target, action, and failure information BuildBuddy uses pattern expand messages to collect the set of targets to report on. Buck2 does not have a direct correspondance to the target completed message, for now we collect all targets for which actions are completed and then emit a series of target completed events before we close the stream. Stdout and stderr are only included inline in Buck2 and not as CAS items, so we forward them as inline data. Failure details are used by BuildBuddy to display build errors in the UI, so we emit these. --- app/buck2_events/src/sink/remote.rs | 202 ++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index 8b82818b5cc4..e991af9c2a1c 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -343,6 +343,8 @@ mod fbcode { use async_stream::stream; use bazel_event_publisher_proto::build_event_stream; + use bazel_event_publisher_proto::build_event_stream::build_event_id; + use bazel_event_publisher_proto::build_event_stream::BuildEventId; use bazel_event_publisher_proto::google::devtools::build::v1; use buck2_data; use buck2_data::BuildCommandStart; @@ -398,6 +400,7 @@ mod fbcode { } fn buck_to_bazel_events>(events: S) -> impl Stream { + let mut target_actions: HashMap<(String, String), Vec<(BuildEventId, bool)>> = HashMap::new(); stream! { for await event in events { println!("EVENT {:?} {:?}", event.event.trace_id, event); @@ -438,6 +441,65 @@ mod fbcode { Some(_) => {}, } }, + Some(buck2_data::span_start_event::Data::Analysis(analysis)) => { + let label = match analysis.target.as_ref() { + None => None, + Some(buck2_data::analysis_start::Target::StandardTarget(label)) => + label.label.as_ref().map(|label| format!("{}:{}", label.package, label.name)), + Some(buck2_data::analysis_start::Target::AnonTarget(_anon)) => None, // TODO + Some(buck2_data::analysis_start::Target::DynamicLambda(_owner)) => None, // TODO + }; + match label { + None => {}, + Some(label) => { + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetConfigured(build_event_id::TargetConfiguredId { + label: label.clone(), + aspect: "".to_owned(), + })) }), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Configured(bazel_event_publisher_proto::build_event_stream::TargetConfigured { + target_kind: "UNKNOWN".to_owned(), + test_size: 0, + tag: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::Pattern(build_event_id::PatternExpandedId { + pattern: vec![label.clone()], + })) }), + children: vec![ + build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetConfigured(bazel_event_publisher_proto::build_event_stream::build_event_id::TargetConfiguredId { + label: label, + aspect: "".to_owned(), + }))}, + ], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Expanded(bazel_event_publisher_proto::build_event_stream::PatternExpanded { + test_suite_expansions: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, + } + }, Some(_) => {}, } }, @@ -449,6 +511,41 @@ mod fbcode { match command.data.as_ref() { None => {}, Some(buck2_data::command_end::Data::Build(_build)) => { + // flush the target completed map. + for ((label, config), actions) in target_actions.into_iter() { + let success = actions.iter().all(|(_, success)| *success); + let children: Vec<_> = actions.into_iter().map(|(id, _)| id).collect(); + let bes_event = build_event_stream::BuildEvent { + id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::TargetCompleted(build_event_id::TargetCompletedId { + label: label, + configuration: Some(build_event_id::ConfigurationId { id: config }), + aspect: "".to_owned(), + })) }), + children: children, + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Completed(build_event_stream::TargetComplete { + success: success, + target_kind: "".to_owned(), + test_size: 0, + output_group: vec![], + important_output: vec![], + directory_output: vec![], + tag: vec![], + test_timeout_seconds: 0, + test_timeout: None, + failure_detail: None, + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + } + let bes_event = build_event_stream::BuildEvent { id: Some(build_event_stream::BuildEventId { id: Some(build_event_stream::build_event_id::Id::BuildFinished(build_event_stream::build_event_id::BuildFinishedId {})) }), children: vec![], @@ -487,6 +584,111 @@ mod fbcode { Some(_) => {}, } }, + Some(buck2_data::span_end_event::Data::ActionExecution(action)) => { + let configuration = match &action.key { + None => None, + Some(key) => match &key.owner { + None => None, + Some(owner) => match owner { + buck2_data::action_key::Owner::TargetLabel(target) => target.configuration.clone(), + buck2_data::action_key::Owner::TestTargetLabel(test) => test.configuration.clone(), + buck2_data::action_key::Owner::LocalResourceSetup(resource) => resource.configuration.clone(), + buck2_data::action_key::Owner::AnonTarget(_anon) => None, // TODO: execution configuration? + buck2_data::action_key::Owner::BxlKey(_bxl) => None, + }, + }, + }.map(|configuration| build_event_id::ConfigurationId { id: configuration.full_name.clone() }); + let label = match &action.key { + None => None, + Some(key) => match &key.owner { + None => None, + Some(owner) => match owner { + buck2_data::action_key::Owner::TargetLabel(target) => target.label.clone(), + buck2_data::action_key::Owner::TestTargetLabel(test) => test.label.clone(), + buck2_data::action_key::Owner::LocalResourceSetup(resource) => resource.label.clone(), + buck2_data::action_key::Owner::AnonTarget(anon) => anon.name.clone(), + buck2_data::action_key::Owner::BxlKey(_bxl) => None, // TODO: handle bxl + }, + }, + }.map(|label| format!("{}:{}", label.package, label.name)); + let action_id = BuildEventId {id: Some(build_event_id::Id::ActionCompleted(build_event_id::ActionCompletedId { + configuration: configuration.clone(), + label: label.clone().unwrap_or("UNKOWN".to_owned()), + primary_output: "UNKNOWN".to_owned(), + }))}; + let mnemonic = action.name.as_ref().map(|name| name.category.clone()).unwrap_or("UNKNOWN".to_owned()); + let success = !action.failed; + let last_command_details = action.commands.last().and_then(|command| command.details.as_ref()); + let command_line: Vec = match last_command_details.and_then(|command| command.command_kind.as_ref()).and_then(|kind| kind.command.as_ref()) { + None => vec![], + Some(buck2_data::command_execution_kind::Command::LocalCommand(command)) => command.argv.clone(), + Some(_) => vec![], // TODO: handle remote, worker, and other commands + }; + let exit_code = last_command_details.and_then(|details| details.signed_exit_code).unwrap_or(0); + let stdout = last_command_details.map(|details| details.stdout.clone()); + let stderr = last_command_details.map(|details| details.stderr.clone()); + let stdout_file = stdout.map(|stdout| bazel_event_publisher_proto::build_event_stream::File { + path_prefix: vec![], + name: "stdout".to_owned(), + digest: "".to_owned(), + length: stdout.len() as i64, + file: Some(bazel_event_publisher_proto::build_event_stream::file::File::Contents(stdout.into())), + }); + let stderr_file = stderr.clone().map(|stderr| bazel_event_publisher_proto::build_event_stream::File { + path_prefix: vec![], + name: "stderr".to_owned(), + digest: "".to_owned(), + length: stderr.len() as i64, + file: Some(bazel_event_publisher_proto::build_event_stream::file::File::Contents(stderr.into())), + }); + let start_time = last_command_details.and_then(|details| details.metadata.as_ref().and_then(|metadata| metadata.start_time.clone())); + //let wall_time = last_command_details.and_then(|details| details.metadata.as_ref().and_then(|metadata| metadata.wall_time.clone())); + //let end_time = ...; // TODO: add start_time and wall_time + match (label.as_ref(), configuration.as_ref()) { + (Some(label), Some(configuration)) => { + target_actions + .entry((label.clone(), configuration.id.clone())) + .or_default() + .push((action_id.clone(), success)); + }, + _ => {}, + } + let failure_detail = if success { None } else { + Some(bazel_event_publisher_proto::failure_details::FailureDetail { + message: stderr.unwrap_or("UNKNOWN".to_owned()), + category: None, // TODO + }) + }; + let bes_event = build_event_stream::BuildEvent { + id: Some(action_id), + children: vec![], + last_message: false, + payload: Some(build_event_stream::build_event::Payload::Action(build_event_stream::ActionExecuted { + success: success, + r#type: mnemonic, + exit_code: exit_code, + stdout: stdout_file, + stderr: stderr_file, + label: "".to_owned(), + configuration: None, + primary_output: None, + command_line: command_line, + action_metadata_logs: vec![], + failure_detail: failure_detail, + start_time: start_time, // TODO: should we deduct queue time? + end_time: None, + strategy_details: vec![], + })), + }; + let bazel_event = v1::build_event::Event::BazelEvent(prost_types::Any { + type_url: "type.googleapis.com/build_event_stream.BuildEvent".to_owned(), + value: bes_event.encode_to_vec(), + }); + yield v1::BuildEvent { + event_time: Some(event.timestamp().into()), + event: Some(bazel_event), + }; + }, Some(_) => {}, } }, From 09c7f44734a420e9d5fb3a7e27b3adb8dd023756 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 18 Dec 2024 14:49:37 +0100 Subject: [PATCH 06/11] Enable TLS if requested --- app/buck2_events/src/sink/remote.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index e991af9c2a1c..1f284f905dbe 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -354,6 +354,7 @@ mod fbcode { use futures::Stream; use futures::StreamExt; use tonic::transport::Channel; + use tonic::transport::channel::ClientTlsConfig; use tonic::Request; use tokio::runtime::Builder; @@ -385,10 +386,19 @@ mod fbcode { async fn connect_build_event_server() -> anyhow::Result> { let uri = std::env::var("BES_URI")?.parse()?; - let channel = Channel::builder(uri); - // TODO: enable TLS and handle API token - // let tls_config = ClientTlsConfig::new(); - // channel = channel.tls_config(tls_config)?; + let mut channel = Channel::builder(uri); + let tls_config = ClientTlsConfig::new(); + { + let tls_setting = std::env::var("BES_TLS").unwrap_or("0".to_owned()); + match tls_setting.as_str() { + "1" | "true" => { + channel = channel.tls_config(tls_config)?; + }, + _ => {}, + } + } + // TODO: parse PEM + // TODO: handle API token channel .connect() .await From f994822b916374da5ef4225221a01fc35c253c25 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 18 Dec 2024 15:02:42 +0100 Subject: [PATCH 07/11] Copy InjectHeadersInterceptor --- app/buck2_events/Cargo.toml | 1 + app/buck2_events/src/sink/remote.rs | 113 ++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+) diff --git a/app/buck2_events/Cargo.toml b/app/buck2_events/Cargo.toml index 0e2cb6c2bb72..fb26a6b860d7 100644 --- a/app/buck2_events/Cargo.toml +++ b/app/buck2_events/Cargo.toml @@ -17,6 +17,7 @@ once_cell = { workspace = true } pin-project = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } +regex = { workspace = true } serde = { workspace = true } smallvec = { workspace = true } sys-info = { workspace = true } diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index 1f284f905dbe..c0e57aee504a 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -335,9 +335,12 @@ mod fbcode { #[cfg(not(fbcode_build))] mod fbcode { use std::collections::HashMap; + use std::env::VarError; + use std::str::FromStr; use std::sync::Arc; use std::thread::JoinHandle; + use allocative::Allocative; use anyhow::Context; use async_stream::stream; @@ -349,10 +352,17 @@ mod fbcode { use buck2_data; use buck2_data::BuildCommandStart; use buck2_util::future::try_join_all; + use dupe::Dupe; use futures::stream; + use once_cell::sync::Lazy; use futures::Stream; use futures::StreamExt; + use tonic::metadata; + use tonic::metadata::MetadataKey; + use tonic::metadata::MetadataValue; + use tonic::service::interceptor::InterceptedService; + use tonic::service::Interceptor; use tonic::transport::Channel; use tonic::transport::channel::ClientTlsConfig; use tonic::Request; @@ -373,6 +383,8 @@ mod fbcode { use prost::Message; use prost_types; + use regex::Regex; + use crate::BuckEvent; use crate::Event; use crate::EventSink; @@ -384,6 +396,107 @@ mod fbcode { send: UnboundedSender>, } + // TODO[AH] re-use definitions from REOSS crate. + #[derive(Clone, Debug, Default, Allocative)] + pub struct HttpHeader { + pub key: String, + pub value: String, + } + + impl FromStr for HttpHeader { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let mut iter = s.split(':'); + match (iter.next(), iter.next(), iter.next()) { + (Some(key), Some(value), None) => Ok(Self { + key: key.trim().to_owned(), + value: value.trim().to_owned(), + }), + _ => Err(anyhow::anyhow!( + "Invalid header (expect exactly one `:`): `{}`", + s + )), + } + } + } + + /// Replace occurrences of $FOO in a string with the value of the env var $FOO. + fn substitute_env_vars(s: &str) -> anyhow::Result { + substitute_env_vars_impl(s, |v| std::env::var(v)) + } + + fn substitute_env_vars_impl( + s: &str, + getter: impl Fn(&str) -> Result, + ) -> anyhow::Result { + static ENV_REGEX: Lazy = Lazy::new(|| Regex::new("\\$[a-zA-Z_][a-zA-Z_0-9]*").unwrap()); + + let mut out = String::with_capacity(s.len()); + let mut last_idx = 0; + + for mat in ENV_REGEX.find_iter(s) { + out.push_str(&s[last_idx..mat.start()]); + let var = &mat.as_str()[1..]; + let val = getter(var).with_context(|| format!("Error substituting `{}`", mat.as_str()))?; + out.push_str(&val); + last_idx = mat.end(); + } + + if last_idx < s.len() { + out.push_str(&s[last_idx..s.len()]); + } + + Ok(out) + } + + #[derive(Clone, Dupe)] + struct InjectHeadersInterceptor { + headers: Arc, MetadataValue)>>, + } + + impl InjectHeadersInterceptor { + pub fn new(headers: &[HttpHeader]) -> anyhow::Result { + let headers = headers + .iter() + .map(|h| { + // This means we can't have `$` in a header key or value, which isn't great. On the + // flip side, env vars are good for things like credentials, which those headers + // are likely to contain. In time, we should allow escaping. + let key = substitute_env_vars(&h.key)?; + let value = substitute_env_vars(&h.value)?; + + let key = MetadataKey::::from_bytes(key.as_bytes()) + .with_context(|| format!("Invalid key in header: `{}: {}`", key, value))?; + + let value = MetadataValue::try_from(&value) + .with_context(|| format!("Invalid value in header: `{}: {}`", key, value))?; + + anyhow::Ok((key, value)) + }) + .collect::>() + .context("Error converting headers")?; + + Ok(Self { + headers: Arc::new(headers), + }) + } + } + + impl Interceptor for InjectHeadersInterceptor { + fn call( + &mut self, + mut request: tonic::Request<()>, + ) -> Result, tonic::Status> { + for (k, v) in self.headers.iter() { + request.metadata_mut().insert(k.clone(), v.clone()); + } + Ok(request) + } + } + + type GrpcService = InterceptedService; + async fn connect_build_event_server() -> anyhow::Result> { let uri = std::env::var("BES_URI")?.parse()?; let mut channel = Channel::builder(uri); From 5f2cdb7a1cbbdee5820607fa532d6a745885529d Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 18 Dec 2024 15:43:26 +0100 Subject: [PATCH 08/11] Forward BES HTTP headers --- app/buck2_events/src/sink/remote.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index c0e57aee504a..4f7bb3d8002a 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -497,7 +497,7 @@ mod fbcode { type GrpcService = InterceptedService; - async fn connect_build_event_server() -> anyhow::Result> { + async fn connect_build_event_server() -> anyhow::Result> { let uri = std::env::var("BES_URI")?.parse()?; let mut channel = Channel::builder(uri); let tls_config = ClientTlsConfig::new(); @@ -511,14 +511,19 @@ mod fbcode { } } // TODO: parse PEM - // TODO: handle API token - channel + let endpoint = channel .connect() .await .context("connecting to Bazel event stream gRPC server")?; - let client = PublishBuildEventClient::connect(channel) - .await - .context("creating Bazel event stream gRPC client")?; + let mut headers = vec![]; + for hdr in std::env::var("BES_HEADERS").unwrap_or("".to_owned()).split(",") { + let hdr = hdr.trim(); + if !hdr.is_empty() { + headers.push(HttpHeader::from_str(hdr)?); + } + }; + let interceptor = InjectHeadersInterceptor::new(&headers)?; + let client = PublishBuildEventClient::with_interceptor(endpoint, interceptor); Ok(client) } From 5eb819a963d23dbb6a56249e676a557ce756fd82 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 18 Dec 2024 15:55:46 +0100 Subject: [PATCH 09/11] reduce debug noise --- app/buck2_events/src/sink/remote.rs | 30 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index 4f7bb3d8002a..15a1292f498e 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -531,10 +531,10 @@ mod fbcode { let mut target_actions: HashMap<(String, String), Vec<(BuildEventId, bool)>> = HashMap::new(); stream! { for await event in events { - println!("EVENT {:?} {:?}", event.event.trace_id, event); + //println!("EVENT {:?} {:?}", event.event.trace_id, event); match event.data() { buck2_data::buck_event::Data::SpanStart(start) => { - println!("START {:?}", start); + //println!("START {:?}", start); match start.data.as_ref() { None => {}, Some(buck2_data::span_start_event::Data::Command(command)) => { @@ -632,7 +632,7 @@ mod fbcode { } }, buck2_data::buck_event::Data::SpanEnd(end) => { - println!("END {:?}", end); + //println!("END {:?}", end); match end.data.as_ref() { None => {}, Some(buck2_data::span_end_event::Data::Command(command)) => { @@ -820,11 +820,11 @@ mod fbcode { Some(_) => {}, } }, - buck2_data::buck_event::Data::Instant(instant) => { - println!("INST {:?}", instant); + buck2_data::buck_event::Data::Instant(_instant) => { + //println!("INST {:?}", instant); }, - buck2_data::buck_event::Data::Record(record) => { - println!("REC {:?}", record); + buck2_data::buck_event::Data::Record(_record) => { + //println!("REC {:?}", record); }, } } @@ -859,12 +859,12 @@ mod fbcode { .flat_map(|v|stream::iter(v)); while let Some(event) = recv.next().await { let dbg_trace_id = event.event.trace_id.clone(); - println!("event_sink_loop event {:?}", &dbg_trace_id); + //println!("event_sink_loop event {:?}", &dbg_trace_id); if let Some((send, _)) = handlers.get(&event.event.trace_id) { - println!("event_sink_loop redirect {:?}", &dbg_trace_id); + //println!("event_sink_loop redirect {:?}", &dbg_trace_id); send.send(event).unwrap_or_else(|e| println!("build event send failed {:?}", e)); } else { - println!("event_sink_loop new handler {:?}", event.event.trace_id); + //println!("event_sink_loop new handler {:?}", event.event.trace_id); let (send, recv) = mpsc::unbounded_channel::(); let mut client = client.clone(); let dbg_trace_id = dbg_trace_id.clone(); @@ -872,20 +872,20 @@ mod fbcode { let handler = tokio::spawn(async move { let recv = UnboundedReceiverStream::new(recv); let request = Request::new(stream_build_tool_events(trace_id, buck_to_bazel_events(recv))); - println!("new handler request {:?}", &dbg_trace_id); + println!("BES request {:?}", &dbg_trace_id); let response = client.publish_build_tool_event_stream(request).await?; - println!("new handler response {:?}", &dbg_trace_id); + println!("BES response {:?}", &dbg_trace_id); let mut inbound = response.into_inner(); - while let Some(ack) = inbound.message().await? { + while let Some(_ack) = inbound.message().await? { // TODO: Handle ACKs properly and add retry. - println!("ACK {:?}", ack); + //println!("ACK {:?}", ack); } Ok(()) }); handlers.insert(event.event.trace_id.to_owned(), (send, handler)); } } - println!("event_sink_loop recv CLOSED"); + //println!("event_sink_loop recv CLOSED"); // TODO: handle closure and retry. // close send handles and await all handlers. let handlers: Vec>> = handlers.into_values().map(|(_, handler)|handler).collect(); From bbc66b1fc425502bf270cf05ac33abd1733270be Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 19 Dec 2024 15:50:30 +0100 Subject: [PATCH 10/11] Handle missing BES configuration gracefully --- app/buck2_events/src/sink/remote.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index 15a1292f498e..3e26f0064051 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -993,7 +993,10 @@ fn new_remote_event_sink_if_fbcode( retry_attempts, message_batch_size, ); - Ok(Some(RemoteEventSink::new()?)) + match std::env::var("BES_URI") { + Ok(_) => Ok(Some(RemoteEventSink::new()?)), + _ => Ok(None), + } } } From 115d389f67bed225f7a5a0faafce958c23be1676 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 19 Dec 2024 16:02:17 +0100 Subject: [PATCH 11/11] Configure a BES URL through BES_RESULT env-var --- app/buck2_events/src/sink/remote.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/app/buck2_events/src/sink/remote.rs b/app/buck2_events/src/sink/remote.rs index 3e26f0064051..975d657c31cb 100644 --- a/app/buck2_events/src/sink/remote.rs +++ b/app/buck2_events/src/sink/remote.rs @@ -857,8 +857,9 @@ mod fbcode { let client = connect_build_event_server().await?; let mut recv = UnboundedReceiverStream::new(recv) .flat_map(|v|stream::iter(v)); + let result_uri = std::env::var("BES_RESULT").ok(); while let Some(event) = recv.next().await { - let dbg_trace_id = event.event.trace_id.clone(); + //let dbg_trace_id = event.event.trace_id.clone(); //println!("event_sink_loop event {:?}", &dbg_trace_id); if let Some((send, _)) = handlers.get(&event.event.trace_id) { //println!("event_sink_loop redirect {:?}", &dbg_trace_id); @@ -867,19 +868,26 @@ mod fbcode { //println!("event_sink_loop new handler {:?}", event.event.trace_id); let (send, recv) = mpsc::unbounded_channel::(); let mut client = client.clone(); - let dbg_trace_id = dbg_trace_id.clone(); + let result_uri = result_uri.clone(); + //let dbg_trace_id = dbg_trace_id.clone(); let trace_id = event.event.trace_id.clone(); let handler = tokio::spawn(async move { let recv = UnboundedReceiverStream::new(recv); - let request = Request::new(stream_build_tool_events(trace_id, buck_to_bazel_events(recv))); - println!("BES request {:?}", &dbg_trace_id); + let request = Request::new(stream_build_tool_events(trace_id.clone(), buck_to_bazel_events(recv))); + if let Some(result_uri) = result_uri.as_ref() { + println!("BES results: {}{}", &result_uri, &trace_id); + } + //println!("BES request {:?}", &dbg_trace_id); let response = client.publish_build_tool_event_stream(request).await?; - println!("BES response {:?}", &dbg_trace_id); + //println!("BES response {:?}", &dbg_trace_id); let mut inbound = response.into_inner(); while let Some(_ack) = inbound.message().await? { // TODO: Handle ACKs properly and add retry. //println!("ACK {:?}", ack); } + if let Some(result_uri) = result_uri.as_ref() { + println!("BES results: {}{}", &result_uri, &trace_id); + } Ok(()) }); handlers.insert(event.event.trace_id.to_owned(), (send, handler));