Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 7591ef0

Browse files
committedJan 29, 2021
quinn-proto: change defragmentation strategy
1 parent b87741c commit 7591ef0

File tree

1 file changed

+51
-15
lines changed

1 file changed

+51
-15
lines changed
 

‎quinn-proto/src/connection/assembler.rs

+51-15
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,19 @@ impl Assembler {
9494
// counter to the new number of chunks left in the heap so that we can decide
9595
// when to defragment the queue again if necessary.
9696
fn defragment(&mut self) {
97-
let buffered = self.data.iter().map(|c| c.bytes.len()).sum::<usize>();
97+
let high_utilization_over_allocation = self
98+
.data
99+
.iter()
100+
.filter(|b| b.high_utilization())
101+
.map(|b| b.over_allocation)
102+
.sum::<usize>();
103+
let include_highly_utilized = high_utilization_over_allocation > DEFRAGMENTATION_THRESHOLD;
104+
let buffered = self
105+
.data
106+
.iter()
107+
.filter(|b| b.should_defragment(include_highly_utilized))
108+
.map(|b| b.bytes.len())
109+
.sum::<usize>();
98110
let mut buffer = BytesMut::with_capacity(buffered);
99111
let mut offset = self
100112
.data
@@ -106,20 +118,24 @@ impl Assembler {
106118
let new = BinaryHeap::with_capacity(self.data.len());
107119
let old = mem::replace(&mut self.data, new);
108120
for chunk in old.into_sorted_vec().into_iter().rev() {
109-
let end = offset + (buffer.len() as u64);
110-
if let Some(overlap) = end.checked_sub(chunk.offset) {
111-
if let Some(bytes) = chunk.bytes.get(overlap as usize..) {
112-
buffer.extend_from_slice(bytes);
121+
if chunk.should_defragment(include_highly_utilized) {
122+
let end = offset + (buffer.len() as u64);
123+
if let Some(overlap) = end.checked_sub(chunk.offset) {
124+
if let Some(bytes) = chunk.bytes.get(overlap as usize..) {
125+
buffer.extend_from_slice(bytes);
126+
}
127+
} else {
128+
let bytes = buffer.split().freeze();
129+
self.data.push(Buffer {
130+
offset,
131+
bytes,
132+
over_allocation: 0,
133+
});
134+
offset = chunk.offset;
135+
buffer.extend_from_slice(&chunk.bytes);
113136
}
114137
} else {
115-
let bytes = buffer.split().freeze();
116-
self.data.push(Buffer {
117-
offset,
118-
bytes,
119-
over_allocation: 0,
120-
});
121-
offset = chunk.offset;
122-
buffer.extend_from_slice(&chunk.bytes);
138+
self.data.push(chunk);
123139
}
124140
}
125141

@@ -129,7 +145,11 @@ impl Assembler {
129145
bytes,
130146
over_allocation: 0,
131147
});
132-
self.over_allocation = 0;
148+
self.over_allocation = if include_highly_utilized {
149+
0
150+
} else {
151+
high_utilization_over_allocation
152+
};
133153
}
134154

135155
pub(crate) fn insert(&mut self, mut offset: u64, mut bytes: Bytes, mut allocation_size: usize) {
@@ -170,7 +190,7 @@ impl Assembler {
170190
// of memory allocated. In a worst case scenario like 32 1-byte chunks,
171191
// each one from a ~1000-byte datagram, this limits us to having a
172192
// maximum pathological over-allocation of about 32k bytes.
173-
if self.over_allocation > 32 * 1024 {
193+
if self.over_allocation > DEFRAGMENTATION_THRESHOLD {
174194
self.defragment()
175195
}
176196
}
@@ -208,6 +228,8 @@ impl Assembler {
208228
}
209229
}
210230

231+
const DEFRAGMENTATION_THRESHOLD: usize = 32 * 1024;
232+
211233
/// A chunk of data from the receive stream
212234
#[non_exhaustive]
213235
#[derive(Debug, PartialEq)]
@@ -231,6 +253,20 @@ struct Buffer {
231253
over_allocation: usize,
232254
}
233255

256+
impl Buffer {
257+
fn high_utilization(&self) -> bool {
258+
self.bytes.len() >= self.over_allocation
259+
}
260+
261+
fn should_defragment(&self, include_highly_utilized: bool) -> bool {
262+
if include_highly_utilized {
263+
self.over_allocation > 0
264+
} else {
265+
!self.high_utilization()
266+
}
267+
}
268+
}
269+
234270
impl Ord for Buffer {
235271
// Invert ordering based on offset (max-heap, min offset first),
236272
// prioritize longer chunks at the same offset.

0 commit comments

Comments
 (0)
Please sign in to comment.