Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use more fine grained locks for the dep graph #63756

Merged
merged 1 commit into from
Oct 17, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 72 additions & 60 deletions src/librustc/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_index::vec::{Idx, IndexVec};
use smallvec::SmallVec;
use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, AtomicU64, Ordering};
use std::sync::atomic::Ordering::SeqCst;
use std::env;
use std::hash::Hash;
use std::collections::hash_map::Entry;
Expand Down Expand Up @@ -53,7 +54,7 @@ struct DepGraphData {
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
/// current one anymore.
current: Lock<CurrentDepGraph>,
current: CurrentDepGraph,

/// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them.
Expand Down Expand Up @@ -95,7 +96,7 @@ impl DepGraph {
data: Some(Lrc::new(DepGraphData {
previous_work_products: prev_work_products,
dep_node_debug: Default::default(),
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
current: CurrentDepGraph::new(prev_graph_node_count),
emitting_diagnostics: Default::default(),
emitting_diagnostics_cond_var: Condvar::new(),
previous: prev_graph,
Expand All @@ -117,13 +118,12 @@ impl DepGraph {
}

pub fn query(&self) -> DepGraphQuery {
let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
let nodes: Vec<_> = current_dep_graph.data.iter().map(|n| n.node).collect();
let data = self.data.as_ref().unwrap().current.data.lock();
let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
let mut edges = Vec::new();
for (from, edge_targets) in current_dep_graph.data.iter()
.map(|d| (d.node, &d.edges)) {
for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
for &edge_target in edge_targets.iter() {
let to = current_dep_graph.data[edge_target].node;
let to = data[edge_target].node;
edges.push((from, to));
}
}
Expand Down Expand Up @@ -202,7 +202,7 @@ impl DepGraph {
read_set: Default::default(),
}),
|data, key, fingerprint, task| {
data.borrow_mut().complete_task(key, task.unwrap(), fingerprint)
data.complete_task(key, task.unwrap(), fingerprint)
},
hash_result)
}
Expand All @@ -223,7 +223,7 @@ impl DepGraph {
self.with_task_impl(key, cx, input, true, identity_fn,
|_| None,
|data, key, fingerprint, _| {
data.borrow_mut().alloc_node(key, SmallVec::new(), fingerprint)
data.alloc_node(key, SmallVec::new(), fingerprint)
},
hash_result::<R>)
}
Expand All @@ -236,7 +236,7 @@ impl DepGraph {
no_tcx: bool,
task: fn(C, A) -> R,
create_task: fn(DepNode) -> Option<TaskDeps>,
finish_task_and_alloc_depnode: fn(&Lock<CurrentDepGraph>,
finish_task_and_alloc_depnode: fn(&CurrentDepGraph,
DepNode,
Fingerprint,
Option<TaskDeps>) -> DepNodeIndex,
Expand Down Expand Up @@ -350,7 +350,6 @@ impl DepGraph {
(r, task_deps.into_inner())
});
let dep_node_index = data.current
.borrow_mut()
.complete_anon_task(dep_kind, task_deps);
(result, dep_node_index)
} else {
Expand All @@ -374,18 +373,17 @@ impl DepGraph {
self.with_task_impl(key, cx, arg, false, task,
|_| None,
|data, key, fingerprint, _| {
let mut current = data.borrow_mut();
current.alloc_node(key, smallvec![], fingerprint)
data.alloc_node(key, smallvec![], fingerprint)
},
hash_result)
}

#[inline]
pub fn read(&self, v: DepNode) {
if let Some(ref data) = self.data {
let current = data.current.borrow_mut();
if let Some(&dep_node_index) = current.node_to_node_index.get(&v) {
std::mem::drop(current);
let map = data.current.node_to_node_index.lock();
if let Some(dep_node_index) = map.get(&v).copied() {
std::mem::drop(map);
data.read_index(dep_node_index);
} else {
bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
Expand All @@ -406,8 +404,8 @@ impl DepGraph {
.as_ref()
.unwrap()
.current
.borrow_mut()
.node_to_node_index
.lock()
.get(dep_node)
.cloned()
.unwrap()
Expand All @@ -416,16 +414,16 @@ impl DepGraph {
#[inline]
pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
if let Some(ref data) = self.data {
data.current.borrow_mut().node_to_node_index.contains_key(dep_node)
data.current.node_to_node_index.lock().contains_key(dep_node)
} else {
false
}
}

#[inline]
pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
let current = self.data.as_ref().expect("dep graph enabled").current.borrow_mut();
current.data[dep_node_index].fingerprint
let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
data[dep_node_index].fingerprint
}

pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
Expand Down Expand Up @@ -479,32 +477,29 @@ impl DepGraph {

pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
if cfg!(debug_assertions) {
let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
let current_dep_graph = &self.data.as_ref().unwrap().current;

Some((current_dep_graph.total_read_count,
current_dep_graph.total_duplicate_read_count))
Some((current_dep_graph.total_read_count.load(SeqCst),
current_dep_graph.total_duplicate_read_count.load(SeqCst)))
} else {
None
}
}

pub fn serialize(&self) -> SerializedDepGraph {
let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
let data = self.data.as_ref().unwrap().current.data.lock();

let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
current_dep_graph.data.iter().map(|d| d.fingerprint).collect();
data.iter().map(|d| d.fingerprint).collect();
let nodes: IndexVec<SerializedDepNodeIndex, _> =
current_dep_graph.data.iter().map(|d| d.node).collect();
data.iter().map(|d| d.node).collect();

let total_edge_count: usize = current_dep_graph.data.iter()
.map(|d| d.edges.len())
.sum();
let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();

let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
let mut edge_list_data = Vec::with_capacity(total_edge_count);

for (current_dep_node_index, edges) in current_dep_graph.data.iter_enumerated()
.map(|(i, d)| (i, &d.edges)) {
for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
let start = edge_list_data.len() as u32;
// This should really just be a memcpy :/
edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
Expand Down Expand Up @@ -600,7 +595,7 @@ impl DepGraph {

#[cfg(not(parallel_compiler))]
{
debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node));
debug_assert!(!data.current.node_to_node_index.lock().contains_key(dep_node));
debug_assert!(data.colors.get(prev_dep_node_index).is_none());
}

Expand Down Expand Up @@ -733,15 +728,13 @@ impl DepGraph {
// There may be multiple threads trying to mark the same dep node green concurrently

let dep_node_index = {
let mut current = data.current.borrow_mut();

// Copy the fingerprint from the previous graph,
// so we don't have to recompute it
let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);

// We allocating an entry for the node in the current dependency graph and
// adding all the appropriate edges imported from the previous graph
current.intern_node(*dep_node, current_deps, fingerprint)
data.current.intern_node(*dep_node, current_deps, fingerprint)
};

// ... emitting any stored diagnostic ...
Expand Down Expand Up @@ -917,9 +910,27 @@ struct DepNodeData {
fingerprint: Fingerprint,
}

/// `CurrentDepGraph` stores the dependency graph for the current session.
/// It will be populated as we run queries or tasks.
///
/// The nodes in it are identified by an index (`DepNodeIndex`).
/// The data for each node is stored in its `DepNodeData`, found in the `data` field.
///
/// We never remove nodes from the graph: they are only added.
///
/// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
/// locked separately. Operations that take a `DepNodeIndex` typically just access
/// the data field.
///
/// The only operation that must manipulate both locks is adding new nodes, in which case
/// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
/// acquire the lock on `data.`
pub(super) struct CurrentDepGraph {
data: IndexVec<DepNodeIndex, DepNodeData>,
node_to_node_index: FxHashMap<DepNode, DepNodeIndex>,
data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment below is great -- but it'd be good to document just a bit more (this is pre-existing, as the code wasn't all that thoroughlly commented before =)

I'd say something like:


Encodes the rustc dependency graph. The nodes are identified by an index (DepNodeIndex); the data for each node is stored in its DepNodeData, found in the data field.

We never remove nodes from the graph: they are only added. Most of the time, the node is mapped 1-to-1 with some DepNode, which basically identifies a query. When such nodes are allocated, we add the DepNod into the node_to_node_index map and allocate a fresh node index.

This struct uses two locks internally: the data and node_to_node_index field are locked separately. Operations that begin with a DepNodeIndex typically just access the data field.

The only operation that must manipulate both fields is adding new nodes, in which case we first acquire the node_to_node_index lock and then, once a new node is to be inserted, acquire the lock on data.

(We also have a field fields that use atomics: these are simple counters that are used for profiling and debugging and are not used with debug_assertions.)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added that, but removed the incorrect parts =P

node_to_node_index: Lock<FxHashMap<DepNode, DepNodeIndex>>,
Zoxc marked this conversation as resolved.
Show resolved Hide resolved

/// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`.
#[allow(dead_code)]
forbidden_edge: Option<EdgeFilter>,

Expand All @@ -936,8 +947,10 @@ pub(super) struct CurrentDepGraph {
/// the `DepGraph` is created.
anon_id_seed: Fingerprint,

total_read_count: u64,
total_duplicate_read_count: u64,
/// These are simple counters that are for profiling and
/// debugging and only active with `debug_assertions`.
total_read_count: AtomicU64,
total_duplicate_read_count: AtomicU64,
}

impl CurrentDepGraph {
Expand Down Expand Up @@ -971,28 +984,28 @@ impl CurrentDepGraph {
let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;

CurrentDepGraph {
data: IndexVec::with_capacity(new_node_count_estimate),
node_to_node_index: FxHashMap::with_capacity_and_hasher(
data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
node_to_node_index: Lock::new(FxHashMap::with_capacity_and_hasher(
new_node_count_estimate,
Default::default(),
),
)),
anon_id_seed: stable_hasher.finish(),
forbidden_edge,
total_read_count: 0,
total_duplicate_read_count: 0,
total_read_count: AtomicU64::new(0),
total_duplicate_read_count: AtomicU64::new(0),
}
}

fn complete_task(
&mut self,
&self,
node: DepNode,
task_deps: TaskDeps,
fingerprint: Fingerprint
) -> DepNodeIndex {
self.alloc_node(node, task_deps.reads, fingerprint)
}

fn complete_anon_task(&mut self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
debug_assert!(!kind.is_eval_always());

let mut hasher = StableHasher::new();
Expand All @@ -1017,28 +1030,27 @@ impl CurrentDepGraph {
}

fn alloc_node(
&mut self,
&self,
dep_node: DepNode,
edges: SmallVec<[DepNodeIndex; 8]>,
fingerprint: Fingerprint
) -> DepNodeIndex {
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
debug_assert!(!self.node_to_node_index.lock().contains_key(&dep_node));
self.intern_node(dep_node, edges, fingerprint)
}

fn intern_node(
&mut self,
&self,
dep_node: DepNode,
edges: SmallVec<[DepNodeIndex; 8]>,
fingerprint: Fingerprint
) -> DepNodeIndex {
debug_assert_eq!(self.node_to_node_index.len(), self.data.len());

match self.node_to_node_index.entry(dep_node) {
match self.node_to_node_index.lock().entry(dep_node) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index = DepNodeIndex::new(self.data.len());
self.data.push(DepNodeData {
let mut data = self.data.lock();
let dep_node_index = DepNodeIndex::new(data.len());
data.push(DepNodeData {
node: dep_node,
edges,
fingerprint
Expand All @@ -1057,17 +1069,17 @@ impl DepGraphData {
if let Some(task_deps) = icx.task_deps {
let mut task_deps = task_deps.lock();
if cfg!(debug_assertions) {
self.current.lock().total_read_count += 1;
self.current.total_read_count.fetch_add(1, SeqCst);
}
if task_deps.read_set.insert(source) {
task_deps.reads.push(source);

#[cfg(debug_assertions)]
{
if let Some(target) = task_deps.node {
let graph = self.current.lock();
if let Some(ref forbidden_edge) = graph.forbidden_edge {
let source = graph.data[source].node;
let data = self.current.data.lock();
if let Some(ref forbidden_edge) = self.current.forbidden_edge {
let source = data[source].node;
if forbidden_edge.test(&source, &target) {
bug!("forbidden edge {:?} -> {:?} created",
source,
Expand All @@ -1077,7 +1089,7 @@ impl DepGraphData {
}
}
} else if cfg!(debug_assertions) {
self.current.lock().total_duplicate_read_count += 1;
self.current.total_duplicate_read_count.fetch_add(1, SeqCst);
}
}
})
Expand Down