Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Event monitor: Batch together all events from all transactions included in a block #958

Merged
merged 8 commits into from
May 21, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,24 @@

## Unreleased

> Nothing yet,
### FEATURES

> Nothing

### IMPROVEMENTS

- [ibc-relayer]
- Bulk events from all transactions included in a block ([#957])

### BUG FIXES

> Nothing

### BREAKING CHANGES

> Nothing

[#957]: https://github.com/informalsystems/ibc-rs/issues/957

## v0.3.1
*May 14h, 2021*
Expand Down Expand Up @@ -32,6 +49,8 @@ as well as support Protobuf-encoded keys.

### BREAKING CHANGES

> Nothing


[#875]: https://github.com/informalsystems/ibc-rs/issues/875
[#920]: https://github.com/informalsystems/ibc-rs/issues/920
Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions relayer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ tonic = "0.4"
dirs-next = "2.0.0"
dyn-clone = "1.0.3"
retry = { version = "1.2.1", default-features = false }
async-stream = "0.3.1"

[dependencies.tendermint]
version = "=0.19.0"
Expand Down
93 changes: 58 additions & 35 deletions relayer/src/event/monitor.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
use std::sync::Arc;

use crossbeam_channel as channel;
use futures::stream::StreamExt;
use futures::{stream::select_all, Stream};
use itertools::Itertools;
use futures::{
pin_mut,
stream::{self, select_all, StreamExt},
Stream,
};
use thiserror::Error;
use tokio::task::JoinHandle;
use tokio::{runtime::Runtime as TokioRuntime, sync::mpsc};
use tracing::{error, info, trace};
use tracing::{debug, error, info, trace};

use tendermint_rpc::{
event::Event as RpcEvent,
Expand All @@ -18,7 +20,10 @@ use tendermint_rpc::{

use ibc::{events::IbcEvent, ics02_client::height::Height, ics24_host::identifier::ChainId};

use crate::util::retry::{retry_with_index, RetryResult};
use crate::util::{
retry::{retry_with_index, RetryResult},
stream::group_while,
};

mod retry_strategy {
use crate::util::retry::clamp_total;
Expand Down Expand Up @@ -276,24 +281,30 @@ impl EventMonitor {

/// Event monitor loop
pub fn run(mut self) {
info!(chain.id = %self.chain_id, "starting event monitor");
debug!(chain.id = %self.chain_id, "starting event monitor");

let rt = self.rt.clone();

// Take ownership of the subscriptions
let subscriptions =
std::mem::replace(&mut self.subscriptions, Box::new(futures::stream::empty()));

// Convert the stream of RPC events into a stream of event batches.
let batches = stream_batches(subscriptions, self.chain_id.clone());

// Needed to be able to poll the stream
pin_mut!(batches);

loop {
let result = rt.block_on(async {
tokio::select! {
Some(event) = self.subscriptions.next() => {
event
.map_err(Error::NextEventBatchFailed)
.and_then(|e| self.collect_events(e))
},
Some(batch) = batches.next() => Ok(batch),
Some(e) = self.rx_err.recv() => Err(Error::WebSocketDriver(e)),
}
});

match result {
Ok(batches) => self.process_batches(batches).unwrap_or_else(|e| {
Ok(batch) => self.process_batch(batch).unwrap_or_else(|e| {
error!("failed to process event batch: {}", e);
}),
Err(e) => {
Expand All @@ -307,31 +318,43 @@ impl EventMonitor {
}

/// Collect the IBC events from the subscriptions
fn process_batches(&self, batches: Vec<EventBatch>) -> Result<()> {
for batch in batches {
self.tx_batch
.send(Ok(batch))
.map_err(|_| Error::ChannelSendFailed)?;
}
fn process_batch(&self, batch: EventBatch) -> Result<()> {
self.tx_batch
.send(Ok(batch))
.map_err(|_| Error::ChannelSendFailed)?;

Ok(())
}
}

/// Collect the IBC events from the subscriptions
fn collect_events(&mut self, event: RpcEvent) -> Result<Vec<EventBatch>> {
let ibc_events = crate::event::rpc::get_all_events(&self.chain_id, event)
.map_err(Error::CollectEventsFailed)?;

let events_by_height = ibc_events.into_iter().into_group_map();
let batches = events_by_height
.into_iter()
.map(|(height, events)| EventBatch {
chain_id: self.chain_id.clone(),
height,
events,
})
.collect();

Ok(batches)
}
/// Collect the IBC events from an RPC event
fn collect_events(chain_id: &ChainId, event: RpcEvent) -> impl Stream<Item = (Height, IbcEvent)> {
let events = crate::event::rpc::get_all_events(chain_id, event).unwrap_or_default();
stream::iter(events)
}

/// Convert a stream of RPC event into a stream of event batches
fn stream_batches(
subscriptions: Box<SubscriptionStream>,
chain_id: ChainId,
) -> impl Stream<Item = EventBatch> {
let id = chain_id.clone();

// Collect IBC events from each RPC event
let batches = subscriptions
.filter_map(|rpc_event| async { rpc_event.ok() })
.flat_map(move |rpc_event| collect_events(&id, rpc_event));

// Group events by height
let grouped = group_while(batches, |(h0, _), (h1, _)| h0 == h1);

// Convert each group to a batch
grouped.map(move |events| {
let (height, _) = events.first().expect("internal error: found empty group"); // SAFETY: upheld by `group_while`
EventBatch {
height: *height,
chain_id: chain_id.clone(),
events: events.into_iter().map(|e| e.1).collect(),
}
})
}
1 change: 1 addition & 0 deletions relayer/src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ pub use recv_multiple::{recv_multiple, try_recv_multiple};
pub mod iter;
pub mod retry;
pub mod sled;
pub mod stream;
74 changes: 74 additions & 0 deletions relayer/src/util/stream.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
use async_stream::stream;
use futures::stream::Stream;

/// ## Example
///
/// ```rust,ignore
/// let input = stream::iter(vec![0, 0, 0, 1, 1, 2, 3, 3, 3, 3]);
/// let output = group_while(stream, |a, b| a == b).collect::<Vec<_>>().await;
/// assert_eq!(output, vec![vec![0, 0, 0], vec![1, 1], vec![2], vec![3, 3, 3, 3]]);
/// ```
pub fn group_while<A, S, F>(input: S, group_these: F) -> impl Stream<Item = Vec<A>>
where
S: Stream<Item = A>,
F: Fn(&A, &A) -> bool + 'static,
{
struct State<A> {
cur: A,
group: Vec<A>,
}

stream! {
let mut state = None;

for await x in input {
match &mut state {
None => {
state = Some(State { cur: x, group: vec![] });
},
Some(state) if group_these(&state.cur, &x) => {
let prev = std::mem::replace(&mut state.cur, x);
state.group.push(prev);
},
Some(state) => {
let cur = std::mem::replace(&mut state.cur, x);
state.group.push(cur);
let group = std::mem::replace(&mut state.group, vec![]);
yield group;
}
}
}

if let Some(State{ cur, mut group }) = state {
group.push(cur);
yield group;
}
}
}

#[cfg(test)]
mod tests {
use super::group_while;
use futures::{executor::block_on, stream, StreamExt};

#[test]
fn group_while_non_empty() {
let input = stream::iter(vec![1, 1, 2, 3, 3, 3, 4, 5, 5]);
let output = group_while(input, |a, b| a == b).collect::<Vec<_>>();
let result = block_on(output);

assert_eq!(
result,
vec![vec![1, 1], vec![2], vec![3, 3, 3], vec![4], vec![5, 5]]
);
}

#[test]
fn group_while_empty() {
let input = stream::iter(Vec::<i32>::new());
let output = group_while(input, |a, b| a == b).collect::<Vec<_>>();
let result = block_on(output);

assert_eq!(result, Vec::<Vec<i32>>::new());
}
}