Skip to content

Commit

Permalink
self-profile: Switch to new approach for event_id generation that ena…
Browse files Browse the repository at this point in the history
…bles query-invocation-specific event_ids.
  • Loading branch information
michaelwoerister committed Dec 18, 2019
1 parent 19bd934 commit 603892d
Show file tree
Hide file tree
Showing 9 changed files with 254 additions and 104 deletions.
4 changes: 2 additions & 2 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2021,9 +2021,9 @@ dependencies = [

[[package]]
name = "measureme"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c420bbc064623934620b5ab2dc0cf96451b34163329e82f95e7fa1b7b99a6ac8"
checksum = "36dcc09c1a633097649f7d48bde3d8a61d2a43c01ce75525e31fbbc82c0fccf4"
dependencies = [
"byteorder",
"memmap",
Expand Down
2 changes: 1 addition & 1 deletion src/librustc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ byteorder = { version = "1.3" }
chalk-engine = { version = "0.9.0", default-features=false }
rustc_fs_util = { path = "../librustc_fs_util" }
smallvec = { version = "1.0", features = ["union", "may_dangle"] }
measureme = "0.5"
measureme = "0.6.0"
rustc_error_codes = { path = "../librustc_error_codes" }
rustc_session = { path = "../librustc_session" }
27 changes: 24 additions & 3 deletions src/librustc/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_index::vec::{Idx, IndexVec};
use smallvec::SmallVec;
use rustc_data_structures::profiling::QueryInvocationId;
use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, AtomicU64, Ordering};
use rustc_data_structures::sharded::{self, Sharded};
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::Ordering::Relaxed;
use std::env;
use std::hash::Hash;
use std::collections::hash_map::Entry;
Expand All @@ -25,6 +26,12 @@ use super::prev::PreviousDepGraph;
#[derive(Clone)]
pub struct DepGraph {
data: Option<Lrc<DepGraphData>>,

/// This field is used for assigning DepNodeIndices when running in
/// non-incremental mode. Even in non-incremental mode we make sure that
/// each task as a `DepNodeIndex` that uniquely identifies it. This unique
/// ID is used for self-profiling.
virtual_dep_node_index: Lrc<AtomicU32>,
}

rustc_index::newtype_index! {
Expand All @@ -35,6 +42,13 @@ impl DepNodeIndex {
pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
}

impl std::convert::From<DepNodeIndex> for QueryInvocationId {
#[inline]
fn from(dep_node_index: DepNodeIndex) -> Self {
QueryInvocationId(dep_node_index.as_u32())
}
}

#[derive(PartialEq)]
pub enum DepNodeColor {
Red,
Expand Down Expand Up @@ -103,12 +117,14 @@ impl DepGraph {
previous: prev_graph,
colors: DepNodeColorMap::new(prev_graph_node_count),
})),
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
}
}

pub fn new_disabled() -> DepGraph {
DepGraph {
data: None,
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
}
}

Expand Down Expand Up @@ -319,7 +335,7 @@ impl DepGraph {

(result, dep_node_index)
} else {
(task(cx, arg), DepNodeIndex::INVALID)
(task(cx, arg), self.next_virtual_depnode_index())
}
}

Expand Down Expand Up @@ -354,7 +370,7 @@ impl DepGraph {
.complete_anon_task(dep_kind, task_deps);
(result, dep_node_index)
} else {
(op(), DepNodeIndex::INVALID)
(op(), self.next_virtual_depnode_index())
}
}

Expand Down Expand Up @@ -877,6 +893,11 @@ impl DepGraph {
}
}
}

fn next_virtual_depnode_index(&self) -> DepNodeIndex {
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
DepNodeIndex::from_u32(index)
}
}

/// A "work product" is an intermediate result that we save into the
Expand Down
4 changes: 2 additions & 2 deletions src/librustc/ty/query/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::dep_graph::{DepKind, DepNode};
use crate::hir::def_id::{CrateNum, DefId};
use crate::ty::TyCtxt;
use crate::ty::query::queries;
use crate::ty::query::{Query, QueryName};
use crate::ty::query::{Query};
use crate::ty::query::QueryCache;
use crate::ty::query::plumbing::CycleError;
use rustc_data_structures::profiling::ProfileCategory;
Expand All @@ -20,7 +20,7 @@ use crate::ich::StableHashingContext;
// FIXME(eddyb) false positive, the lifetime parameter is used for `Key`/`Value`.
#[allow(unused_lifetimes)]
pub trait QueryConfig<'tcx> {
const NAME: QueryName;
const NAME: &'static str;
const CATEGORY: ProfileCategory;

type Key: Eq + Hash + Clone + Debug;
Expand Down
117 changes: 61 additions & 56 deletions src/librustc/ty/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
if let Some((_, value)) =
lock.results.raw_entry().from_key_hashed_nocheck(key_hash, key)
{
tcx.prof.query_cache_hit(Q::NAME);
tcx.prof.query_cache_hit(value.index.into());
let result = (value.value.clone(), value.index);
#[cfg(debug_assertions)]
{
Expand Down Expand Up @@ -356,7 +356,7 @@ impl<'tcx> TyCtxt<'tcx> {
#[inline(never)]
pub(super) fn get_query<Q: QueryDescription<'tcx>>(self, span: Span, key: Q::Key) -> Q::Value {
debug!("ty::query::get_query<{}>(key={:?}, span={:?})",
Q::NAME.as_str(),
Q::NAME,
key,
span);

Expand All @@ -378,7 +378,7 @@ impl<'tcx> TyCtxt<'tcx> {

if Q::ANON {

let prof_timer = self.prof.query_provider(Q::NAME);
let prof_timer = self.prof.query_provider();

let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
Expand All @@ -388,7 +388,7 @@ impl<'tcx> TyCtxt<'tcx> {
})
});

drop(prof_timer);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());

self.dep_graph.read_index(dep_node_index);

Expand Down Expand Up @@ -445,8 +445,9 @@ impl<'tcx> TyCtxt<'tcx> {
// First we try to load the result from the on-disk cache.
let result = if Q::cache_on_disk(self, key.clone(), None) &&
self.sess.opts.debugging_opts.incremental_queries {
let _prof_timer = self.prof.incr_cache_loading(Q::NAME);
let prof_timer = self.prof.incr_cache_loading();
let result = Q::try_load_from_disk(self, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());

// We always expect to find a cached result for things that
// can be forced from `DepNode`.
Expand All @@ -465,13 +466,15 @@ impl<'tcx> TyCtxt<'tcx> {
} else {
// We could not load a result from the on-disk cache, so
// recompute.
let _prof_timer = self.prof.query_provider(Q::NAME);
let prof_timer = self.prof.query_provider();

// The dep-graph for this computation is already in-place.
let result = self.dep_graph.with_ignore(|| {
Q::compute(self, key)
});

prof_timer.finish_with_query_invocation_id(dep_node_index.into());

result
};

Expand Down Expand Up @@ -534,7 +537,7 @@ impl<'tcx> TyCtxt<'tcx> {
- dep-node: {:?}",
key, dep_node);

let prof_timer = self.prof.query_provider(Q::NAME);
let prof_timer = self.prof.query_provider();

let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
Expand All @@ -554,7 +557,7 @@ impl<'tcx> TyCtxt<'tcx> {
})
});

drop(prof_timer);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());

if unlikely!(!diagnostics.is_empty()) {
if dep_node.kind != crate::dep_graph::DepKind::Null {
Expand Down Expand Up @@ -586,17 +589,19 @@ impl<'tcx> TyCtxt<'tcx> {

let dep_node = Q::to_dep_node(self, &key);

if self.dep_graph.try_mark_green_and_read(self, &dep_node).is_none() {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.

let _ = self.get_query::<Q>(DUMMY_SP, key);
} else {
self.prof.query_cache_hit(Q::NAME);
match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
let _ = self.get_query::<Q>(DUMMY_SP, key);
}
Some((_, dep_node_index)) => {
self.prof.query_cache_hit(dep_node_index.into());
}
}
}

Expand Down Expand Up @@ -713,6 +718,42 @@ macro_rules! define_queries_inner {
}
}

/// All self-profiling events generated by the query engine use a
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here. (The latter is not yet
/// implemented.)
pub fn allocate_self_profile_query_strings(
&self,
profiler: &rustc_data_structures::profiling::SelfProfiler
) {
// Walk the entire query cache and allocate the appropriate
// string representation. Each cache entry is uniquely
// identified by its dep_node_index.
$({
let query_name_string_id =
profiler.get_or_alloc_cached_string(stringify!($name));

let result_cache = self.$name.lock_shards();

for shard in result_cache.iter() {
let query_invocation_ids = shard
.results
.values()
.map(|v| v.index)
.map(|dep_node_index| dep_node_index.into());

profiler.bulk_map_query_invocation_id_to_single_string(
query_invocation_ids,
query_name_string_id
);
}
})*
}

#[cfg(parallel_compiler)]
pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
let mut jobs = Vec::new();
Expand Down Expand Up @@ -830,36 +871,6 @@ macro_rules! define_queries_inner {
}
}

#[allow(nonstandard_style)]
#[derive(Clone, Copy)]
pub enum QueryName {
$($name),*
}

impl rustc_data_structures::profiling::QueryName for QueryName {
fn discriminant(self) -> std::mem::Discriminant<QueryName> {
std::mem::discriminant(&self)
}

fn as_str(self) -> &'static str {
QueryName::as_str(&self)
}
}

impl QueryName {
pub fn register_with_profiler(
profiler: &rustc_data_structures::profiling::SelfProfiler,
) {
$(profiler.register_query_name(QueryName::$name);)*
}

pub fn as_str(&self) -> &'static str {
match self {
$(QueryName::$name => stringify!($name),)*
}
}
}

#[allow(nonstandard_style)]
#[derive(Clone, Debug)]
pub enum Query<$tcx> {
Expand Down Expand Up @@ -900,12 +911,6 @@ macro_rules! define_queries_inner {
$(Query::$name(key) => key.default_span(tcx),)*
}
}

pub fn query_name(&self) -> QueryName {
match self {
$(Query::$name(_) => QueryName::$name,)*
}
}
}

impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
Expand Down Expand Up @@ -940,7 +945,7 @@ macro_rules! define_queries_inner {
type Key = $K;
type Value = $V;

const NAME: QueryName = QueryName::$name;
const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category;
}

Expand Down
15 changes: 12 additions & 3 deletions src/librustc_codegen_ssa/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(

ongoing_codegen.codegen_finished(tcx);

assert_and_save_dep_graph(tcx);
finalize_tcx(tcx);

ongoing_codegen.check_for_errors(tcx.sess);

Expand Down Expand Up @@ -647,7 +647,8 @@ pub fn codegen_crate<B: ExtraBackendMethods>(

ongoing_codegen.check_for_errors(tcx.sess);

assert_and_save_dep_graph(tcx);
finalize_tcx(tcx);

ongoing_codegen.into_inner()
}

Expand Down Expand Up @@ -698,14 +699,22 @@ impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
}
}

fn assert_and_save_dep_graph(tcx: TyCtxt<'_>) {
fn finalize_tcx(tcx: TyCtxt<'_>) {
time(tcx.sess,
"assert dep graph",
|| ::rustc_incremental::assert_dep_graph(tcx));

time(tcx.sess,
"serialize dep graph",
|| ::rustc_incremental::save_dep_graph(tcx));

// We assume that no queries are run past here. If there are new queries
// after this point, they'll show up as "<unknown>" in self-profiling data.
tcx.prof.with_profiler(|profiler| {
let _prof_timer =
tcx.prof.generic_activity("self_profile_alloc_query_strings");
tcx.queries.allocate_self_profile_query_strings(profiler);
});
}

impl CrateInfo {
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_data_structures/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ rustc-hash = "1.0.1"
smallvec = { version = "1.0", features = ["union", "may_dangle"] }
rustc_index = { path = "../librustc_index", package = "rustc_index" }
bitflags = "1.2.1"
measureme = "0.5"
measureme = "0.6.0"

[dependencies.parking_lot]
version = "0.9"
Expand Down
Loading

0 comments on commit 603892d

Please sign in to comment.