diff --git a/crates/re_data_store/examples/memory_usage.rs b/crates/re_data_store/examples/memory_usage.rs index fae5be192c39..27e50c6a49bb 100644 --- a/crates/re_data_store/examples/memory_usage.rs +++ b/crates/re_data_store/examples/memory_usage.rs @@ -48,7 +48,7 @@ fn live_bytes() -> usize { // ---------------------------------------------------------------------------- -use re_log_types::{entity_path, DataRow, RecordingId, RecordingType, RowId}; +use re_log_types::{entity_path, DataRow, RowId, StoreId, StoreKind}; fn main() { log_messages(); @@ -91,7 +91,7 @@ fn log_messages() { const NUM_POINTS: usize = 1_000; - let recording_id = RecordingId::random(RecordingType::Data); + let store_id = StoreId::random(StoreKind::Recording); let timeline = Timeline::new_sequence("frame_nr"); let mut time_point = TimePoint::default(); time_point.insert(timeline, TimeInt::from(0)); @@ -118,7 +118,7 @@ fn log_messages() { ); let table_bytes = live_bytes() - used_bytes_start; let log_msg = Box::new(LogMsg::ArrowMsg( - recording_id.clone(), + store_id.clone(), table.to_arrow_msg().unwrap(), )); let log_msg_bytes = live_bytes() - used_bytes_start; @@ -143,10 +143,7 @@ fn log_messages() { .into_table(), ); let table_bytes = live_bytes() - used_bytes_start; - let log_msg = Box::new(LogMsg::ArrowMsg( - recording_id, - table.to_arrow_msg().unwrap(), - )); + let log_msg = Box::new(LogMsg::ArrowMsg(store_id, table.to_arrow_msg().unwrap())); let log_msg_bytes = live_bytes() - used_bytes_start; println!("Arrow payload containing a Pos2 uses {table_bytes} bytes in RAM"); let encoded = encode_log_msg(&log_msg); diff --git a/crates/re_data_store/src/instance_path.rs b/crates/re_data_store/src/instance_path.rs index dd4d9b4ae86b..dbdc9532265c 100644 --- a/crates/re_data_store/src/instance_path.rs +++ b/crates/re_data_store/src/instance_path.rs @@ -2,7 +2,7 @@ use std::hash::Hash; use re_log_types::{EntityPath, EntityPathHash, InstanceKey}; -use crate::log_db::EntityDb; +use crate::store_db::EntityDb; // ---------------------------------------------------------------------------- diff --git a/crates/re_data_store/src/lib.rs b/crates/re_data_store/src/lib.rs index 1b9c48780344..1f17ab8975ff 100644 --- a/crates/re_data_store/src/lib.rs +++ b/crates/re_data_store/src/lib.rs @@ -9,13 +9,13 @@ mod editable_auto_value; pub mod entity_properties; pub mod entity_tree; mod instance_path; -pub mod log_db; +pub mod store_db; mod util; pub use entity_properties::*; pub use entity_tree::*; pub use instance_path::*; -pub use log_db::LogDb; +pub use store_db::StoreDb; pub use util::*; #[cfg(feature = "serde")] diff --git a/crates/re_data_store/src/log_db.rs b/crates/re_data_store/src/store_db.rs similarity index 88% rename from crates/re_data_store/src/log_db.rs rename to crates/re_data_store/src/store_db.rs index 1352773c71a0..d08cd6fec0b5 100644 --- a/crates/re_data_store/src/log_db.rs +++ b/crates/re_data_store/src/store_db.rs @@ -4,9 +4,9 @@ use nohash_hasher::IntMap; use re_arrow_store::{DataStoreConfig, TimeInt}; use re_log_types::{ - ArrowMsg, Component as _, ComponentPath, DataCell, DataRow, DataTable, EntityPath, - EntityPathHash, EntityPathOpMsg, InstanceKey, LogMsg, PathOp, RecordingId, RecordingInfo, - RecordingType, RowId, SetRecordingInfo, TimePoint, Timeline, + ApplicationId, ArrowMsg, Component as _, ComponentPath, DataCell, DataRow, DataTable, + EntityPath, EntityPathHash, EntityPathOpMsg, InstanceKey, LogMsg, PathOp, RowId, SetStoreInfo, + StoreId, StoreInfo, StoreKind, TimePoint, Timeline, }; use crate::{Error, TimesPerTimeline}; @@ -171,27 +171,27 @@ impl EntityDb { // ---------------------------------------------------------------------------- /// A in-memory database built from a stream of [`LogMsg`]es. -pub struct LogDb { - /// The [`RecordingId`] for this log. - recording_id: RecordingId, +pub struct StoreDb { + /// The [`StoreId`] for this log. + store_id: StoreId, /// All [`EntityPathOpMsg`]s ever received. entity_op_msgs: BTreeMap, - /// Set by whomever created this [`LogDb`]. + /// Set by whomever created this [`StoreDb`]. pub data_source: Option, - /// Comes in a special message, [`LogMsg::SetRecordingInfo`]. - recording_msg: Option, + /// Comes in a special message, [`LogMsg::SetStoreInfo`]. + recording_msg: Option, /// Where we store the entities. pub entity_db: EntityDb, } -impl LogDb { - pub fn new(recording_id: RecordingId) -> Self { +impl StoreDb { + pub fn new(store_id: StoreId) -> Self { Self { - recording_id, + store_id, entity_op_msgs: Default::default(), data_source: None, recording_msg: None, @@ -199,20 +199,24 @@ impl LogDb { } } - pub fn recording_msg(&self) -> Option<&SetRecordingInfo> { + pub fn recording_msg(&self) -> Option<&SetStoreInfo> { self.recording_msg.as_ref() } - pub fn recording_info(&self) -> Option<&RecordingInfo> { + pub fn store_info(&self) -> Option<&StoreInfo> { self.recording_msg().map(|msg| &msg.info) } - pub fn recording_type(&self) -> RecordingType { - self.recording_id.variant + pub fn app_id(&self) -> Option<&ApplicationId> { + self.store_info().map(|ri| &ri.application_id) } - pub fn recording_id(&self) -> &RecordingId { - &self.recording_id + pub fn store_kind(&self) -> StoreKind { + self.store_id.kind + } + + pub fn store_id(&self) -> &StoreId { + &self.store_id } pub fn timelines(&self) -> impl ExactSizeIterator { @@ -239,8 +243,10 @@ impl LogDb { pub fn add(&mut self, msg: &LogMsg) -> Result<(), Error> { re_tracing::profile_function!(); + debug_assert_eq!(msg.store_id(), self.store_id()); + match &msg { - LogMsg::SetRecordingInfo(msg) => self.add_begin_recording_msg(msg), + LogMsg::SetStoreInfo(msg) => self.add_begin_recording_msg(msg), LogMsg::EntityPathOpMsg(_, msg) => { let EntityPathOpMsg { row_id, @@ -256,11 +262,11 @@ impl LogDb { Ok(()) } - pub fn add_begin_recording_msg(&mut self, msg: &SetRecordingInfo) { + pub fn add_begin_recording_msg(&mut self, msg: &SetStoreInfo) { self.recording_msg = Some(msg.clone()); } - /// Returns an iterator over all [`EntityPathOpMsg`]s that have been written to this `LogDb`. + /// Returns an iterator over all [`EntityPathOpMsg`]s that have been written to this `StoreDb`. pub fn iter_entity_op_msgs(&self) -> impl Iterator { self.entity_op_msgs.values() } @@ -287,7 +293,7 @@ impl LogDb { let cutoff_times = self.entity_db.data_store.oldest_time_per_timeline(); let Self { - recording_id: _, + store_id: _, entity_op_msgs, data_source: _, recording_msg: _, diff --git a/crates/re_data_store/src/util.rs b/crates/re_data_store/src/util.rs index 6ba7686b045a..75f164aa411f 100644 --- a/crates/re_data_store/src/util.rs +++ b/crates/re_data_store/src/util.rs @@ -3,7 +3,7 @@ use re_log_types::{ DataRow, DeserializableComponent, EntityPath, RowId, SerializableComponent, TimePoint, Timeline, }; -use crate::LogDb; +use crate::StoreDb; // ---------------------------------------------------------------------------- @@ -60,7 +60,7 @@ where /// Store a single value for a given [`re_log_types::Component`]. pub fn store_one_component( - log_db: &mut LogDb, + store_db: &mut StoreDb, entity_path: &EntityPath, timepoint: &TimePoint, component: C, @@ -74,7 +74,7 @@ pub fn store_one_component( ); row.compute_all_size_bytes(); - match log_db.entity_db.try_add_data_row(&row) { + match store_db.entity_db.try_add_data_row(&row) { Ok(()) => {} Err(err) => { re_log::warn_once!( diff --git a/crates/re_data_ui/src/annotation_context.rs b/crates/re_data_ui/src/annotation_context.rs index a5207bd3341c..3dcc49afa2b4 100644 --- a/crates/re_data_ui/src/annotation_context.rs +++ b/crates/re_data_ui/src/annotation_context.rs @@ -78,7 +78,7 @@ fn annotation_info( keypoint_id: &re_components::KeypointId, ) -> Option { let class_id = ctx - .log_db + .store_db .entity_db .data_store .query_latest_component::(entity_path, query)?; diff --git a/crates/re_data_ui/src/component_path.rs b/crates/re_data_ui/src/component_path.rs index 2bb0eefefdcb..c9ba8e8395ac 100644 --- a/crates/re_data_ui/src/component_path.rs +++ b/crates/re_data_ui/src/component_path.rs @@ -11,7 +11,7 @@ impl DataUi for ComponentPath { verbosity: UiVerbosity, query: &re_arrow_store::LatestAtQuery, ) { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; if let Some((_, component_data)) = re_query::get_component_with_instances( store, diff --git a/crates/re_data_ui/src/instance_path.rs b/crates/re_data_ui/src/instance_path.rs index 2f3ac4e93dc5..2db8df3a327c 100644 --- a/crates/re_data_ui/src/instance_path.rs +++ b/crates/re_data_ui/src/instance_path.rs @@ -17,7 +17,7 @@ impl DataUi for InstancePath { verbosity: UiVerbosity, query: &re_arrow_store::LatestAtQuery, ) { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; let Some(mut components) = store.all_components(&query.timeline, &self.entity_path) else { ui.label(format!("No components in entity {}", self.entity_path)); diff --git a/crates/re_data_ui/src/log_msg.rs b/crates/re_data_ui/src/log_msg.rs index 1f2cfc8712e5..a6d4e31455db 100644 --- a/crates/re_data_ui/src/log_msg.rs +++ b/crates/re_data_ui/src/log_msg.rs @@ -1,4 +1,4 @@ -use re_log_types::{ArrowMsg, DataTable, EntityPathOpMsg, LogMsg, RecordingInfo, SetRecordingInfo}; +use re_log_types::{ArrowMsg, DataTable, EntityPathOpMsg, LogMsg, SetStoreInfo, StoreInfo}; use re_viewer_context::{UiVerbosity, ViewerContext}; use super::DataUi; @@ -13,14 +13,14 @@ impl DataUi for LogMsg { query: &re_arrow_store::LatestAtQuery, ) { match self { - LogMsg::SetRecordingInfo(msg) => msg.data_ui(ctx, ui, verbosity, query), + LogMsg::SetStoreInfo(msg) => msg.data_ui(ctx, ui, verbosity, query), LogMsg::EntityPathOpMsg(_, msg) => msg.data_ui(ctx, ui, verbosity, query), LogMsg::ArrowMsg(_, msg) => msg.data_ui(ctx, ui, verbosity, query), } } } -impl DataUi for SetRecordingInfo { +impl DataUi for SetStoreInfo { fn data_ui( &self, _ctx: &mut ViewerContext<'_>, @@ -28,15 +28,15 @@ impl DataUi for SetRecordingInfo { _verbosity: UiVerbosity, _query: &re_arrow_store::LatestAtQuery, ) { - ui.code("SetRecordingInfo"); - let SetRecordingInfo { row_id: _, info } = self; - let RecordingInfo { + ui.code("SetStoreInfo"); + let SetStoreInfo { row_id: _, info } = self; + let StoreInfo { application_id, - recording_id, + store_id, started, - recording_source, + store_source, is_official_example, - recording_type, + store_kind, } = info; egui::Grid::new("fields").num_columns(2).show(ui, |ui| { @@ -44,24 +44,24 @@ impl DataUi for SetRecordingInfo { ui.label(application_id.to_string()); ui.end_row(); - ui.monospace("recording_id:"); - ui.label(format!("{recording_id:?}")); + ui.monospace("store_id:"); + ui.label(format!("{store_id:?}")); ui.end_row(); ui.monospace("started:"); ui.label(started.format()); ui.end_row(); - ui.monospace("recording_source:"); - ui.label(format!("{recording_source}")); + ui.monospace("store_source:"); + ui.label(format!("{store_source}")); ui.end_row(); ui.monospace("is_official_example:"); ui.label(format!("{is_official_example}")); ui.end_row(); - ui.monospace("recording_type:"); - ui.label(format!("{recording_type}")); + ui.monospace("store_kind:"); + ui.label(format!("{store_kind}")); ui.end_row(); }); } diff --git a/crates/re_log_encoding/benches/msg_encode_benchmark.rs b/crates/re_log_encoding/benches/msg_encode_benchmark.rs index 6c51867b0adf..6d4171256cba 100644 --- a/crates/re_log_encoding/benches/msg_encode_benchmark.rs +++ b/crates/re_log_encoding/benches/msg_encode_benchmark.rs @@ -7,7 +7,7 @@ static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; use re_components::datagen::{build_frame_nr, build_some_colors, build_some_point2d}; use re_log_types::{ - entity_path, DataRow, DataTable, Index, LogMsg, RecordingId, RecordingType, RowId, TableId, + entity_path, DataRow, DataTable, Index, LogMsg, RowId, StoreId, StoreKind, TableId, }; use criterion::{criterion_group, criterion_main, Criterion}; @@ -44,10 +44,10 @@ fn decode_log_msgs(mut bytes: &[u8]) -> Vec { messages } -fn generate_messages(recording_id: &RecordingId, tables: &[DataTable]) -> Vec { +fn generate_messages(store_id: &StoreId, tables: &[DataTable]) -> Vec { tables .iter() - .map(|table| LogMsg::ArrowMsg(recording_id.clone(), table.to_arrow_msg().unwrap())) + .map(|table| LogMsg::ArrowMsg(store_id.clone(), table.to_arrow_msg().unwrap())) .collect() } @@ -83,7 +83,7 @@ fn mono_points_arrow(c: &mut Criterion) { } { - let recording_id = RecordingId::random(RecordingType::Data); + let store_id = StoreId::random(StoreKind::Recording); let mut group = c.benchmark_group("mono_points_arrow"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); group.bench_function("generate_message_bundles", |b| { @@ -91,14 +91,14 @@ fn mono_points_arrow(c: &mut Criterion) { }); let tables = generate_tables(); group.bench_function("generate_messages", |b| { - b.iter(|| generate_messages(&recording_id, &tables)); + b.iter(|| generate_messages(&store_id, &tables)); }); - let messages = generate_messages(&recording_id, &tables); + let messages = generate_messages(&store_id, &tables); group.bench_function("encode_log_msg", |b| { b.iter(|| encode_log_msgs(&messages)); }); group.bench_function("encode_total", |b| { - b.iter(|| encode_log_msgs(&generate_messages(&recording_id, &generate_tables()))); + b.iter(|| encode_log_msgs(&generate_messages(&store_id, &generate_tables()))); }); let encoded = encode_log_msgs(&messages); @@ -139,7 +139,7 @@ fn mono_points_arrow_batched(c: &mut Criterion) { } { - let recording_id = RecordingId::random(RecordingType::Data); + let store_id = StoreId::random(StoreKind::Recording); let mut group = c.benchmark_group("mono_points_arrow_batched"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); group.bench_function("generate_message_bundles", |b| { @@ -147,14 +147,14 @@ fn mono_points_arrow_batched(c: &mut Criterion) { }); let tables = [generate_table()]; group.bench_function("generate_messages", |b| { - b.iter(|| generate_messages(&recording_id, &tables)); + b.iter(|| generate_messages(&store_id, &tables)); }); - let messages = generate_messages(&recording_id, &tables); + let messages = generate_messages(&store_id, &tables); group.bench_function("encode_log_msg", |b| { b.iter(|| encode_log_msgs(&messages)); }); group.bench_function("encode_total", |b| { - b.iter(|| encode_log_msgs(&generate_messages(&recording_id, &[generate_table()]))); + b.iter(|| encode_log_msgs(&generate_messages(&store_id, &[generate_table()]))); }); let encoded = encode_log_msgs(&messages); @@ -196,7 +196,7 @@ fn batch_points_arrow(c: &mut Criterion) { } { - let recording_id = RecordingId::random(RecordingType::Data); + let store_id = StoreId::random(StoreKind::Recording); let mut group = c.benchmark_group("batch_points_arrow"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); group.bench_function("generate_message_bundles", |b| { @@ -204,14 +204,14 @@ fn batch_points_arrow(c: &mut Criterion) { }); let tables = generate_tables(); group.bench_function("generate_messages", |b| { - b.iter(|| generate_messages(&recording_id, &tables)); + b.iter(|| generate_messages(&store_id, &tables)); }); - let messages = generate_messages(&recording_id, &tables); + let messages = generate_messages(&store_id, &tables); group.bench_function("encode_log_msg", |b| { b.iter(|| encode_log_msgs(&messages)); }); group.bench_function("encode_total", |b| { - b.iter(|| encode_log_msgs(&generate_messages(&recording_id, &generate_tables()))); + b.iter(|| encode_log_msgs(&generate_messages(&store_id, &generate_tables()))); }); let encoded = encode_log_msgs(&messages); diff --git a/crates/re_log_encoding/src/decoder.rs b/crates/re_log_encoding/src/decoder.rs index c9fcdb628be9..d2d9fd114775 100644 --- a/crates/re_log_encoding/src/decoder.rs +++ b/crates/re_log_encoding/src/decoder.rs @@ -162,22 +162,22 @@ impl Iterator for Decoder { #[test] fn test_encode_decode() { use re_log_types::{ - ApplicationId, LogMsg, RecordingId, RecordingInfo, RecordingSource, RecordingType, RowId, - SetRecordingInfo, Time, + ApplicationId, LogMsg, RowId, SetStoreInfo, StoreId, StoreInfo, StoreKind, StoreSource, + Time, }; - let messages = vec![LogMsg::SetRecordingInfo(SetRecordingInfo { + let messages = vec![LogMsg::SetStoreInfo(SetStoreInfo { row_id: RowId::random(), - info: RecordingInfo { + info: StoreInfo { application_id: ApplicationId("test".to_owned()), - recording_id: RecordingId::random(RecordingType::Data), + store_id: StoreId::random(StoreKind::Recording), is_official_example: true, started: Time::now(), - recording_source: RecordingSource::RustSdk { + store_source: StoreSource::RustSdk { rustc_version: String::new(), llvm_version: String::new(), }, - recording_type: re_log_types::RecordingType::Data, + store_kind: re_log_types::StoreKind::Recording, }, })]; diff --git a/crates/re_log_types/src/lib.rs b/crates/re_log_types/src/lib.rs index 79bbf97e4847..0bf8eb04ce1a 100644 --- a/crates/re_log_types/src/lib.rs +++ b/crates/re_log_types/src/lib.rs @@ -85,64 +85,67 @@ macro_rules! impl_into_enum { // ---------------------------------------------------------------------------- -/// What type of `Recording` this is. +/// What kind of Store this is. /// -/// `Data` recordings contain user-data logged via `log_` API calls. +/// `Recording` stores contain user-data logged via `log_` API calls. /// -/// In the future, `Blueprint` recordings describe how that data is laid out +/// In the future, `Blueprint` stores describe how that data is laid out /// in the viewer, though this is not currently supported. /// -/// Both of these types can go over the same stream and be stored in the +/// Both of these kinds can go over the same stream and be stored in the /// same datastore, but the viewer wants to treat them very differently. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub enum RecordingType { +pub enum StoreKind { /// A recording of user-data. - Data, + Recording, - /// Not currently used: recording data associated with the blueprint state. + /// Data associated with the blueprint state. Blueprint, } -impl std::fmt::Display for RecordingType { +impl std::fmt::Display for StoreKind { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::Data => "Data".fmt(f), + Self::Recording => "Recording".fmt(f), Self::Blueprint => "Blueprint".fmt(f), } } } -/// A unique id per recording (a stream of [`LogMsg`]es). +/// A unique id per store. +/// +/// The kind of store is part of the id, and can be either a +/// [`StoreKind::Recording`] or a [`StoreKind::Blueprint`]. #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct RecordingId { - pub variant: RecordingType, +pub struct StoreId { + pub kind: StoreKind, pub id: Arc, } -impl RecordingId { +impl StoreId { #[inline] - pub fn random(variant: RecordingType) -> Self { + pub fn random(kind: StoreKind) -> Self { Self { - variant, + kind, id: Arc::new(uuid::Uuid::new_v4().to_string()), } } #[inline] - pub fn from_uuid(variant: RecordingType, uuid: uuid::Uuid) -> Self { + pub fn from_uuid(kind: StoreKind, uuid: uuid::Uuid) -> Self { Self { - variant, + kind, id: Arc::new(uuid.to_string()), } } #[inline] - pub fn from_string(variant: RecordingType, str: String) -> Self { + pub fn from_string(kind: StoreKind, str: String) -> Self { Self { - variant, + kind, id: Arc::new(str), } } @@ -153,12 +156,13 @@ impl RecordingId { } } -impl std::fmt::Display for RecordingId { +impl std::fmt::Display for StoreId { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { variant, id } = self; - f.write_fmt(format_args!("{variant}:{id}"))?; - Ok(()) + // `StoreKind` is not part of how we display the id, + // because that can easily lead to confusion and bugs + // when roundtripping to a string (e.g. via Python SDK). + self.id.fmt(f) } } @@ -214,46 +218,45 @@ pub enum LogMsg { /// A new recording has begun. /// /// Should usually be the first message sent. - SetRecordingInfo(SetRecordingInfo), + SetStoreInfo(SetStoreInfo), /// Server-backed operation on an [`EntityPath`]. - EntityPathOpMsg(RecordingId, EntityPathOpMsg), + EntityPathOpMsg(StoreId, EntityPathOpMsg), /// Log an entity using an [`ArrowMsg`]. - ArrowMsg(RecordingId, ArrowMsg), + ArrowMsg(StoreId, ArrowMsg), } impl LogMsg { - pub fn recording_id(&self) -> &RecordingId { + pub fn store_id(&self) -> &StoreId { match self { - Self::SetRecordingInfo(msg) => &msg.info.recording_id, - Self::EntityPathOpMsg(recording_id, _) | Self::ArrowMsg(recording_id, _) => { - recording_id - } + Self::SetStoreInfo(msg) => &msg.info.store_id, + Self::EntityPathOpMsg(store_id, _) | Self::ArrowMsg(store_id, _) => store_id, } } } -impl_into_enum!(SetRecordingInfo, LogMsg, SetRecordingInfo); +impl_into_enum!(SetStoreInfo, LogMsg, SetStoreInfo); // ---------------------------------------------------------------------------- #[must_use] #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct SetRecordingInfo { +pub struct SetStoreInfo { pub row_id: RowId, - pub info: RecordingInfo, + pub info: StoreInfo, } +/// Information about a recording or blueprint. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct RecordingInfo { +pub struct StoreInfo { /// The user-chosen name of the application doing the logging. pub application_id: ApplicationId, /// Should be unique for each recording. - pub recording_id: RecordingId, + pub store_id: StoreId, /// True if the recording is one of the official Rerun examples. pub is_official_example: bool, @@ -263,16 +266,16 @@ pub struct RecordingInfo { /// Should be an absolute time, i.e. relative to Unix Epoch. pub started: Time, - pub recording_source: RecordingSource, + pub store_source: StoreSource, - pub recording_type: RecordingType, + pub store_kind: StoreKind, } -impl RecordingInfo { - /// Whether this `RecordingInfo` is the default used when a user is not explicitly +impl StoreInfo { + /// Whether this `StoreInfo` is the default used when a user is not explicitly /// creating their own blueprint. pub fn is_app_default_blueprint(&self) -> bool { - self.application_id.as_str() == self.recording_id.as_str() + self.application_id.as_str() == self.store_id.as_str() } } @@ -304,9 +307,10 @@ impl std::fmt::Display for PythonVersion { } } +/// The source of a recording or blueprint. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub enum RecordingSource { +pub enum StoreSource { Unknown, /// The official Rerun Python Logging SDK @@ -322,7 +326,7 @@ pub enum RecordingSource { Other(String), } -impl std::fmt::Display for RecordingSource { +impl std::fmt::Display for StoreSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Unknown => "Unknown".fmt(f), diff --git a/crates/re_sdk/src/global.rs b/crates/re_sdk/src/global.rs index caa00e842653..b7d0d3422e9f 100644 --- a/crates/re_sdk/src/global.rs +++ b/crates/re_sdk/src/global.rs @@ -6,7 +6,7 @@ use std::cell::RefCell; use once_cell::sync::OnceCell; use parking_lot::RwLock; -use crate::{RecordingStream, RecordingType}; +use crate::{RecordingStream, StoreKind}; // --- @@ -42,20 +42,17 @@ impl RecordingStream { /// Returns `overrides` if it exists, otherwise returns the most appropriate active recording /// of the specified type (i.e. thread-local first, then global scope), if any. #[inline] - pub fn get( - which: RecordingType, - overrides: Option, - ) -> Option { + pub fn get(kind: StoreKind, overrides: Option) -> Option { let rec = overrides.or_else(|| { - Self::get_any(RecordingScope::ThreadLocal, which) - .or_else(|| Self::get_any(RecordingScope::Global, which)) + Self::get_any(RecordingScope::ThreadLocal, kind) + .or_else(|| Self::get_any(RecordingScope::Global, kind)) }); if rec.is_none() { // NOTE: This is the one and only place where a warning about missing active recording // should be printed, don't stutter! re_log::warn_once!( - "There is no currently active {which} recording available \ + "There is no currently active {kind} stream available \ for the current thread ({:?}): have you called `set_global()` and/or \ `set_thread_local()` first?", std::thread::current().id(), @@ -70,19 +67,19 @@ impl RecordingStream { #[inline] #[doc(hidden)] pub fn get_quiet( - which: RecordingType, + kind: StoreKind, overrides: Option, ) -> Option { let rec = overrides.or_else(|| { - Self::get_any(RecordingScope::ThreadLocal, which) - .or_else(|| Self::get_any(RecordingScope::Global, which)) + Self::get_any(RecordingScope::ThreadLocal, kind) + .or_else(|| Self::get_any(RecordingScope::Global, kind)) }); if rec.is_none() { // NOTE: This is the one and only place where a warning about missing active recording // should be printed, don't stutter! re_log::debug_once!( - "There is no currently active {which} recording available \ + "There is no currently active {kind} stream available \ for the current thread ({:?}): have you called `set_global()` and/or \ `set_thread_local()` first?", std::thread::current().id(), @@ -96,8 +93,8 @@ impl RecordingStream { /// Returns the currently active recording of the specified type in the global scope, if any. #[inline] - pub fn global(which: RecordingType) -> Option { - Self::get_any(RecordingScope::Global, which) + pub fn global(kind: StoreKind) -> Option { + Self::get_any(RecordingScope::Global, kind) } /// Replaces the currently active recording of the specified type in the global scope with @@ -105,11 +102,8 @@ impl RecordingStream { /// /// Returns the previous one, if any. #[inline] - pub fn set_global( - which: RecordingType, - rec: Option, - ) -> Option { - Self::set_any(RecordingScope::Global, which, rec) + pub fn set_global(kind: StoreKind, rec: Option) -> Option { + Self::set_any(RecordingScope::Global, kind, rec) } // --- Thread local --- @@ -117,25 +111,25 @@ impl RecordingStream { /// Returns the currently active recording of the specified type in the thread-local scope, /// if any. #[inline] - pub fn thread_local(which: RecordingType) -> Option { - Self::get_any(RecordingScope::ThreadLocal, which) + pub fn thread_local(kind: StoreKind) -> Option { + Self::get_any(RecordingScope::ThreadLocal, kind) } /// Replaces the currently active recording of the specified type in the thread-local scope /// with the specified one. #[inline] pub fn set_thread_local( - which: RecordingType, + kind: StoreKind, rec: Option, ) -> Option { - Self::set_any(RecordingScope::ThreadLocal, which, rec) + Self::set_any(RecordingScope::ThreadLocal, kind, rec) } // --- Internal helpers --- - fn get_any(scope: RecordingScope, which: RecordingType) -> Option { - match which { - RecordingType::Data => match scope { + fn get_any(scope: RecordingScope, kind: StoreKind) -> Option { + match kind { + StoreKind::Recording => match scope { RecordingScope::Global => GLOBAL_DATA_RECORDING .get_or_init(Default::default) .read() @@ -144,7 +138,7 @@ impl RecordingStream { LOCAL_DATA_RECORDING.with(|rec| rec.borrow().clone()) } }, - RecordingType::Blueprint => match scope { + StoreKind::Blueprint => match scope { RecordingScope::Global => GLOBAL_BLUEPRINT_RECORDING .get_or_init(Default::default) .read() @@ -158,11 +152,11 @@ impl RecordingStream { fn set_any( scope: RecordingScope, - which: RecordingType, + kind: StoreKind, rec: Option, ) -> Option { - match which { - RecordingType::Data => match scope { + match kind { + StoreKind::Recording => match scope { RecordingScope::Global => std::mem::replace( &mut *GLOBAL_DATA_RECORDING.get_or_init(Default::default).write(), rec, @@ -172,7 +166,7 @@ impl RecordingStream { std::mem::replace(&mut *cell, rec) }), }, - RecordingType::Blueprint => match scope { + StoreKind::Blueprint => match scope { RecordingScope::Global => std::mem::replace( &mut *GLOBAL_BLUEPRINT_RECORDING .get_or_init(Default::default) @@ -198,62 +192,61 @@ mod tests { #[test] fn fallbacks() { - fn check_recording_id(expected: &RecordingStream, got: Option) { + fn check_store_id(expected: &RecordingStream, got: Option) { assert_eq!( - expected.recording_info().unwrap().recording_id, - got.unwrap().recording_info().unwrap().recording_id + expected.store_info().unwrap().store_id, + got.unwrap().store_info().unwrap().store_id ); } // nothing is set - assert!(RecordingStream::get(RecordingType::Data, None).is_none()); - assert!(RecordingStream::get(RecordingType::Blueprint, None).is_none()); + assert!(RecordingStream::get(StoreKind::Recording, None).is_none()); + assert!(RecordingStream::get(StoreKind::Blueprint, None).is_none()); // nothing is set -- explicit wins let explicit = RecordingStreamBuilder::new("explicit").buffered().unwrap(); - check_recording_id( + check_store_id( &explicit, - RecordingStream::get(RecordingType::Data, explicit.clone().into()), + RecordingStream::get(StoreKind::Recording, explicit.clone().into()), ); - check_recording_id( + check_store_id( &explicit, - RecordingStream::get(RecordingType::Blueprint, explicit.clone().into()), + RecordingStream::get(StoreKind::Blueprint, explicit.clone().into()), ); let global_data = RecordingStreamBuilder::new("global_data") .buffered() .unwrap(); assert!( - RecordingStream::set_global(RecordingType::Data, Some(global_data.clone())).is_none() + RecordingStream::set_global(StoreKind::Recording, Some(global_data.clone())).is_none() ); let global_blueprint = RecordingStreamBuilder::new("global_blueprint") .buffered() .unwrap(); - assert!(RecordingStream::set_global( - RecordingType::Blueprint, - Some(global_blueprint.clone()) - ) - .is_none()); + assert!( + RecordingStream::set_global(StoreKind::Blueprint, Some(global_blueprint.clone())) + .is_none() + ); // globals are set, no explicit -- globals win - check_recording_id( + check_store_id( &global_data, - RecordingStream::get(RecordingType::Data, None), + RecordingStream::get(StoreKind::Recording, None), ); - check_recording_id( + check_store_id( &global_blueprint, - RecordingStream::get(RecordingType::Blueprint, None), + RecordingStream::get(StoreKind::Blueprint, None), ); // overwrite globals with themselves -- we expect to get the same value back - check_recording_id( + check_store_id( &global_data, - RecordingStream::set_global(RecordingType::Data, Some(global_data.clone())), + RecordingStream::set_global(StoreKind::Recording, Some(global_data.clone())), ); - check_recording_id( + check_store_id( &global_blueprint, - RecordingStream::set_global(RecordingType::Blueprint, Some(global_blueprint.clone())), + RecordingStream::set_global(StoreKind::Blueprint, Some(global_blueprint.clone())), ); std::thread::Builder::new() @@ -262,20 +255,20 @@ mod tests { let global_blueprint = global_blueprint.clone(); move || { // globals are still set, no explicit -- globals still win - check_recording_id( + check_store_id( &global_data, - RecordingStream::get(RecordingType::Data, None), + RecordingStream::get(StoreKind::Recording, None), ); - check_recording_id( + check_store_id( &global_blueprint, - RecordingStream::get(RecordingType::Blueprint, None), + RecordingStream::get(StoreKind::Blueprint, None), ); let local_data = RecordingStreamBuilder::new("local_data") .buffered() .unwrap(); assert!(RecordingStream::set_thread_local( - RecordingType::Data, + StoreKind::Recording, Some(local_data.clone()) ) .is_none()); @@ -284,29 +277,29 @@ mod tests { .buffered() .unwrap(); assert!(RecordingStream::set_thread_local( - RecordingType::Blueprint, + StoreKind::Blueprint, Some(local_blueprint.clone()) ) .is_none()); // locals are set for this thread -- locals win - check_recording_id( + check_store_id( &local_data, - RecordingStream::get(RecordingType::Data, None), + RecordingStream::get(StoreKind::Recording, None), ); - check_recording_id( + check_store_id( &local_blueprint, - RecordingStream::get(RecordingType::Blueprint, None), + RecordingStream::get(StoreKind::Blueprint, None), ); // explicit still outsmarts everyone no matter what - check_recording_id( + check_store_id( &explicit, - RecordingStream::get(RecordingType::Data, explicit.clone().into()), + RecordingStream::get(StoreKind::Recording, explicit.clone().into()), ); - check_recording_id( + check_store_id( &explicit, - RecordingStream::get(RecordingType::Blueprint, explicit.clone().into()), + RecordingStream::get(StoreKind::Blueprint, explicit.clone().into()), ); } }) @@ -315,20 +308,20 @@ mod tests { .unwrap(); // locals should not exist in this thread -- global wins - check_recording_id( + check_store_id( &global_data, - RecordingStream::get(RecordingType::Data, None), + RecordingStream::get(StoreKind::Recording, None), ); - check_recording_id( + check_store_id( &global_blueprint, - RecordingStream::get(RecordingType::Blueprint, None), + RecordingStream::get(StoreKind::Blueprint, None), ); let local_data = RecordingStreamBuilder::new("local_data") .buffered() .unwrap(); assert!( - RecordingStream::set_thread_local(RecordingType::Data, Some(local_data.clone())) + RecordingStream::set_thread_local(StoreKind::Recording, Some(local_data.clone())) .is_none() ); @@ -336,25 +329,28 @@ mod tests { .buffered() .unwrap(); assert!(RecordingStream::set_thread_local( - RecordingType::Blueprint, + StoreKind::Blueprint, Some(local_blueprint.clone()) ) .is_none()); - check_recording_id( + check_store_id( &global_data, - RecordingStream::set_global(RecordingType::Data, None), + RecordingStream::set_global(StoreKind::Recording, None), ); - check_recording_id( + check_store_id( &global_blueprint, - RecordingStream::set_global(RecordingType::Blueprint, None), + RecordingStream::set_global(StoreKind::Blueprint, None), ); // locals still win - check_recording_id(&local_data, RecordingStream::get(RecordingType::Data, None)); - check_recording_id( + check_store_id( + &local_data, + RecordingStream::get(StoreKind::Recording, None), + ); + check_store_id( &local_blueprint, - RecordingStream::get(RecordingType::Blueprint, None), + RecordingStream::get(StoreKind::Blueprint, None), ); } } diff --git a/crates/re_sdk/src/lib.rs b/crates/re_sdk/src/lib.rs index 31e68b2a3211..2ed73a7d85e3 100644 --- a/crates/re_sdk/src/lib.rs +++ b/crates/re_sdk/src/lib.rs @@ -23,8 +23,7 @@ pub use self::recording_stream::{RecordingStream, RecordingStreamBuilder}; pub use re_sdk_comms::default_server_addr; pub use re_log_types::{ - ApplicationId, Component, ComponentName, EntityPath, RecordingId, RecordingType, - SerializableComponent, + ApplicationId, Component, ComponentName, EntityPath, SerializableComponent, StoreId, StoreKind, }; #[cfg(not(target_arch = "wasm32"))] @@ -162,21 +161,21 @@ pub fn decide_logging_enabled(default_enabled: bool) -> bool { // ---------------------------------------------------------------------------- -/// Creates a new [`re_log_types::RecordingInfo`] which can be used with [`RecordingStream::new`]. +/// Creates a new [`re_log_types::StoreInfo`] which can be used with [`RecordingStream::new`]. #[track_caller] // track_caller so that we can see if we are being called from an official example. -pub fn new_recording_info( +pub fn new_store_info( application_id: impl Into, -) -> re_log_types::RecordingInfo { - re_log_types::RecordingInfo { +) -> re_log_types::StoreInfo { + re_log_types::StoreInfo { application_id: application_id.into(), - recording_id: RecordingId::random(RecordingType::Data), + store_id: StoreId::random(StoreKind::Recording), is_official_example: called_from_official_rust_example(), started: re_log_types::Time::now(), - recording_source: re_log_types::RecordingSource::RustSdk { + store_source: re_log_types::StoreSource::RustSdk { rustc_version: env!("RE_BUILD_RUSTC_VERSION").into(), llvm_version: env!("RE_BUILD_LLVM_VERSION").into(), }, - recording_type: re_log_types::RecordingType::Data, + store_kind: re_log_types::StoreKind::Recording, } } diff --git a/crates/re_sdk/src/msg_sender.rs b/crates/re_sdk/src/msg_sender.rs index 3253fbe60b9d..8565ea0a01c7 100644 --- a/crates/re_sdk/src/msg_sender.rs +++ b/crates/re_sdk/src/msg_sender.rs @@ -1,4 +1,4 @@ -use re_log_types::{DataRow, DataTableError, InstanceKey, RecordingId, RowId}; +use re_log_types::{DataRow, DataTableError, InstanceKey, RowId, StoreId}; use crate::{ log::DataCell, @@ -310,16 +310,13 @@ impl MsgSender { } /// Turns the current message into a single [`re_log_types::LogMsg`] - pub fn into_log_msg( - self, - recording_id: RecordingId, - ) -> Result { + pub fn into_log_msg(self, store_id: StoreId) -> Result { let data_table = re_log_types::DataTable::from_rows( re_log_types::TableId::random(), self.into_rows().into_iter().flatten(), ); let arrow_msg = data_table.to_arrow_msg()?; - Ok(re_log_types::LogMsg::ArrowMsg(recording_id, arrow_msg)) + Ok(re_log_types::LogMsg::ArrowMsg(store_id, arrow_msg)) } fn into_rows(self) -> [Option; 2] { diff --git a/crates/re_sdk/src/recording_stream.rs b/crates/re_sdk/src/recording_stream.rs index 44c5572d15f0..f01779715121 100644 --- a/crates/re_sdk/src/recording_stream.rs +++ b/crates/re_sdk/src/recording_stream.rs @@ -5,8 +5,8 @@ use crossbeam::channel::{Receiver, Sender}; use re_log_types::{ ApplicationId, DataRow, DataTable, DataTableBatcher, DataTableBatcherConfig, - DataTableBatcherError, LogMsg, RecordingId, RecordingInfo, RecordingSource, RecordingType, - Time, TimeInt, TimePoint, TimeType, Timeline, TimelineName, + DataTableBatcherError, LogMsg, StoreId, StoreInfo, StoreKind, StoreSource, Time, TimeInt, + TimePoint, TimeType, Timeline, TimelineName, }; use crate::sink::{LogSink, MemorySinkStorage}; @@ -45,9 +45,9 @@ pub type RecordingStreamResult = Result; /// ``` pub struct RecordingStreamBuilder { application_id: ApplicationId, - recording_type: RecordingType, - recording_id: Option, - recording_source: Option, + store_kind: StoreKind, + store_id: Option, + store_source: Option, default_enabled: bool, enabled: Option, @@ -76,9 +76,9 @@ impl RecordingStreamBuilder { Self { application_id, - recording_type: RecordingType::Data, - recording_id: None, - recording_source: None, + store_kind: StoreKind::Recording, + store_id: None, + store_source: None, default_enabled: true, enabled: None, @@ -108,17 +108,17 @@ impl RecordingStreamBuilder { self } - /// Set the [`RecordingId`] for this context. + /// Set the [`StoreId`] for this context. /// /// If you're logging from multiple processes and want all the messages to end up as the same - /// recording, you must make sure they all set the same [`RecordingId`] using this function. + /// store, you must make sure they all set the same [`StoreId`] using this function. /// - /// Note that many recordings can share the same [`ApplicationId`], but they all have - /// unique [`RecordingId`]s. + /// Note that many stores can share the same [`ApplicationId`], but they all have + /// unique [`StoreId`]s. /// - /// The default is to use a random [`RecordingId`]. - pub fn recording_id(mut self, recording_id: RecordingId) -> Self { - self.recording_id = Some(recording_id); + /// The default is to use a random [`StoreId`]. + pub fn store_id(mut self, store_id: StoreId) -> Self { + self.store_id = Some(store_id); self } @@ -131,8 +131,8 @@ impl RecordingStreamBuilder { } #[doc(hidden)] - pub fn recording_source(mut self, recording_source: RecordingSource) -> Self { - self.recording_source = Some(recording_source); + pub fn store_source(mut self, store_source: StoreSource) -> Self { + self.store_source = Some(store_source); self } @@ -144,7 +144,7 @@ impl RecordingStreamBuilder { #[doc(hidden)] pub fn blueprint(mut self) -> Self { - self.recording_type = RecordingType::Blueprint; + self.store_kind = StoreKind::Blueprint; self } @@ -157,10 +157,10 @@ impl RecordingStreamBuilder { /// # Ok::<(), Box>(()) /// ``` pub fn buffered(self) -> RecordingStreamResult { - let (enabled, recording_info, batcher_config) = self.into_args(); + let (enabled, store_info, batcher_config) = self.into_args(); if enabled { RecordingStream::new( - recording_info, + store_info, batcher_config, Box::new(crate::log_sink::BufferedSink::new()), ) @@ -185,9 +185,9 @@ impl RecordingStreamBuilder { let sink = crate::log_sink::MemorySink::default(); let storage = sink.buffer(); - let (enabled, recording_info, batcher_config) = self.into_args(); + let (enabled, store_info, batcher_config) = self.into_args(); if enabled { - RecordingStream::new(recording_info, batcher_config, Box::new(sink)) + RecordingStream::new(store_info, batcher_config, Box::new(sink)) .map(|rec_stream| (rec_stream, storage)) } else { re_log::debug!("Rerun disabled - call to memory() ignored"); @@ -206,10 +206,10 @@ impl RecordingStreamBuilder { /// # Ok::<(), Box>(()) /// ``` pub fn connect(self, addr: std::net::SocketAddr) -> RecordingStreamResult { - let (enabled, recording_info, batcher_config) = self.into_args(); + let (enabled, store_info, batcher_config) = self.into_args(); if enabled { RecordingStream::new( - recording_info, + store_info, batcher_config, Box::new(crate::log_sink::TcpSink::new(addr)), ) @@ -233,11 +233,11 @@ impl RecordingStreamBuilder { self, path: impl Into, ) -> RecordingStreamResult { - let (enabled, recording_info, batcher_config) = self.into_args(); + let (enabled, store_info, batcher_config) = self.into_args(); if enabled { RecordingStream::new( - recording_info, + store_info, batcher_config, Box::new(crate::sink::FileSink::new(path)?), ) @@ -247,17 +247,17 @@ impl RecordingStreamBuilder { } } - /// Returns whether or not logging is enabled, a [`RecordingInfo`] and the associated batcher + /// Returns whether or not logging is enabled, a [`StoreInfo`] and the associated batcher /// configuration. /// /// This can be used to then construct a [`RecordingStream`] manually using /// [`RecordingStream::new`]. - pub fn into_args(self) -> (bool, RecordingInfo, DataTableBatcherConfig) { + pub fn into_args(self) -> (bool, StoreInfo, DataTableBatcherConfig) { let Self { application_id, - recording_type, - recording_id, - recording_source, + store_kind, + store_id, + store_source, default_enabled, enabled, batcher_config, @@ -265,25 +265,25 @@ impl RecordingStreamBuilder { } = self; let enabled = enabled.unwrap_or_else(|| crate::decide_logging_enabled(default_enabled)); - let recording_id = recording_id.unwrap_or(RecordingId::random(recording_type)); - let recording_source = recording_source.unwrap_or_else(|| RecordingSource::RustSdk { + let store_id = store_id.unwrap_or(StoreId::random(store_kind)); + let store_source = store_source.unwrap_or_else(|| StoreSource::RustSdk { rustc_version: env!("RE_BUILD_RUSTC_VERSION").into(), llvm_version: env!("RE_BUILD_LLVM_VERSION").into(), }); - let recording_info = RecordingInfo { + let store_info = StoreInfo { application_id, - recording_id, + store_id, is_official_example, started: Time::now(), - recording_source, - recording_type, + store_source, + store_kind, }; let batcher_config = batcher_config .unwrap_or_else(|| DataTableBatcherConfig::from_env().unwrap_or_default()); - (enabled, recording_info, batcher_config) + (enabled, store_info, batcher_config) } } @@ -329,7 +329,7 @@ pub struct RecordingStream { } struct RecordingStreamInner { - info: RecordingInfo, + info: StoreInfo, tick: AtomicI64, /// The one and only entrypoint into the pipeline: this is _never_ cloned nor publicly exposed, @@ -356,7 +356,7 @@ impl Drop for RecordingStreamInner { impl RecordingStreamInner { fn new( - info: RecordingInfo, + info: StoreInfo, batcher_config: DataTableBatcherConfig, sink: Box, ) -> RecordingStreamResult { @@ -365,11 +365,11 @@ impl RecordingStreamInner { { re_log::debug!( app_id = %info.application_id, - rec_id = %info.recording_id, + rec_id = %info.store_id, "setting recording info", ); sink.send( - re_log_types::SetRecordingInfo { + re_log_types::SetStoreInfo { row_id: re_log_types::RowId::random(), info: info.clone(), } @@ -420,19 +420,19 @@ impl Command { } impl RecordingStream { - /// Creates a new [`RecordingStream`] with a given [`RecordingInfo`] and [`LogSink`]. + /// Creates a new [`RecordingStream`] with a given [`StoreInfo`] and [`LogSink`]. /// - /// You can create a [`RecordingInfo`] with [`crate::new_recording_info`]; + /// You can create a [`StoreInfo`] with [`crate::new_store_info`]; /// - /// The [`RecordingInfo`] is immediately sent to the sink in the form of a - /// [`re_log_types::SetRecordingInfo`]. + /// The [`StoreInfo`] is immediately sent to the sink in the form of a + /// [`re_log_types::SetStoreInfo`]. /// /// You can find sinks in [`crate::sink`]. /// /// See also: [`RecordingStreamBuilder`]. #[must_use = "Recording will get closed automatically once all instances of this object have been dropped"] pub fn new( - info: RecordingInfo, + info: StoreInfo, batcher_config: DataTableBatcherConfig, sink: Box, ) -> RecordingStreamResult { @@ -454,14 +454,14 @@ impl RecordingStream { #[allow(clippy::needless_pass_by_value)] fn forwarding_thread( - info: RecordingInfo, + info: StoreInfo, mut sink: Box, cmds_rx: Receiver, tables: Receiver, ) { /// Returns `true` to indicate that processing can continue; i.e. `false` means immediate /// shutdown. - fn handle_cmd(info: &RecordingInfo, cmd: Command, sink: &mut Box) -> bool { + fn handle_cmd(info: &StoreInfo, cmd: Command, sink: &mut Box) -> bool { match cmd { Command::RecordMsg(msg) => { sink.send(msg); @@ -482,11 +482,11 @@ fn forwarding_thread( { re_log::debug!( app_id = %info.application_id, - rec_id = %info.recording_id, + rec_id = %info.store_id, "setting recording info", ); new_sink.send( - re_log_types::SetRecordingInfo { + re_log_types::SetStoreInfo { row_id: re_log_types::RowId::random(), info: info.clone(), } @@ -526,7 +526,7 @@ fn forwarding_thread( continue; } }; - sink.send(LogMsg::ArrowMsg(info.recording_id.clone(), table)); + sink.send(LogMsg::ArrowMsg(info.store_id.clone(), table)); } select! { @@ -544,7 +544,7 @@ fn forwarding_thread( continue; } }; - sink.send(LogMsg::ArrowMsg(info.recording_id.clone(), table)); + sink.send(LogMsg::ArrowMsg(info.store_id.clone(), table)); } recv(cmds_rx) -> res => { let Ok(cmd) = res else { @@ -572,9 +572,9 @@ impl RecordingStream { self.inner.is_some() } - /// The [`RecordingInfo`] associated with this `RecordingStream`. + /// The [`StoreInfo`] associated with this `RecordingStream`. #[inline] - pub fn recording_info(&self) -> Option<&RecordingInfo> { + pub fn store_info(&self) -> Option<&StoreInfo> { (*self.inner).as_ref().map(|inner| &inner.info) } } @@ -610,7 +610,7 @@ impl RecordingStream { }; self.record_msg(LogMsg::EntityPathOpMsg( - this.info.recording_id.clone(), + this.info.store_id.clone(), re_log_types::EntityPathOpMsg { row_id: re_log_types::RowId::random(), time_point: timepoint, @@ -788,19 +788,19 @@ impl RecordingStream { #[derive(Default)] struct ThreadInfo { /// The current time per-thread per-recording, which can be set by users. - timepoints: HashMap, + timepoints: HashMap, } impl ThreadInfo { - fn thread_now(rid: &RecordingId) -> TimePoint { + fn thread_now(rid: &StoreId) -> TimePoint { Self::with(|ti| ti.now(rid)) } - fn set_thread_time(rid: &RecordingId, timeline: Timeline, time_int: Option) { + fn set_thread_time(rid: &StoreId, timeline: Timeline, time_int: Option) { Self::with(|ti| ti.set_time(rid, timeline, time_int)); } - fn reset_thread_time(rid: &RecordingId) { + fn reset_thread_time(rid: &StoreId) { Self::with(|ti| ti.reset_time(rid)); } @@ -818,13 +818,13 @@ impl ThreadInfo { }) } - fn now(&self, rid: &RecordingId) -> TimePoint { + fn now(&self, rid: &StoreId) -> TimePoint { let mut timepoint = self.timepoints.get(rid).cloned().unwrap_or_default(); timepoint.insert(Timeline::log_time(), Time::now().into()); timepoint } - fn set_time(&mut self, rid: &RecordingId, timeline: Timeline, time_int: Option) { + fn set_time(&mut self, rid: &StoreId, timeline: Timeline, time_int: Option) { if let Some(time_int) = time_int { self.timepoints .entry(rid.clone()) @@ -835,7 +835,7 @@ impl ThreadInfo { } } - fn reset_time(&mut self, rid: &RecordingId) { + fn reset_time(&mut self, rid: &StoreId) { if let Some(timepoint) = self.timepoints.get_mut(rid) { *timepoint = TimePoint::default(); } @@ -850,7 +850,7 @@ impl RecordingStream { return TimePoint::default(); }; - ThreadInfo::thread_now(&this.info.recording_id) + ThreadInfo::thread_now(&this.info.store_id) } /// Set the current time of the recording, for the current calling thread. @@ -867,7 +867,7 @@ impl RecordingStream { }; ThreadInfo::set_thread_time( - &this.info.recording_id, + &this.info.store_id, Timeline::new(timeline, TimeType::Sequence), sequence.map(TimeInt::from), ); @@ -887,7 +887,7 @@ impl RecordingStream { }; ThreadInfo::set_thread_time( - &this.info.recording_id, + &this.info.store_id, Timeline::new(timeline, TimeType::Time), seconds.map(|secs| Time::from_seconds_since_epoch(secs).into()), ); @@ -907,7 +907,7 @@ impl RecordingStream { }; ThreadInfo::set_thread_time( - &this.info.recording_id, + &this.info.store_id, Timeline::new(timeline, TimeType::Time), ns.map(|ns| Time::from_ns_since_epoch(ns).into()), ); @@ -924,7 +924,7 @@ impl RecordingStream { return; }; - ThreadInfo::reset_thread_time(&this.info.recording_id); + ThreadInfo::reset_thread_time(&this.info.store_id); } } @@ -951,7 +951,7 @@ mod tests { .buffered() .unwrap(); - let rec_info = rec_stream.recording_info().cloned().unwrap(); + let store_info = rec_stream.store_info().cloned().unwrap(); let mut table = data_table_example(false); table.compute_all_size_bytes(); @@ -966,32 +966,32 @@ mod tests { msgs }; - // First message should be a set_recording_info resulting from the original sink swap to + // First message should be a set_store_info resulting from the original sink swap to // buffered mode. match msgs.pop().unwrap() { - LogMsg::SetRecordingInfo(msg) => { + LogMsg::SetStoreInfo(msg) => { assert!(msg.row_id != RowId::ZERO); - similar_asserts::assert_eq!(rec_info, msg.info); + similar_asserts::assert_eq!(store_info, msg.info); } - _ => panic!("expected SetRecordingInfo"), + _ => panic!("expected SetStoreInfo"), } - // Second message should be a set_recording_info resulting from the later sink swap from + // Second message should be a set_store_info resulting from the later sink swap from // buffered mode into in-memory mode. // This arrives _before_ the data itself since we're using manual flushing. match msgs.pop().unwrap() { - LogMsg::SetRecordingInfo(msg) => { + LogMsg::SetStoreInfo(msg) => { assert!(msg.row_id != RowId::ZERO); - similar_asserts::assert_eq!(rec_info, msg.info); + similar_asserts::assert_eq!(store_info, msg.info); } - _ => panic!("expected SetRecordingInfo"), + _ => panic!("expected SetStoreInfo"), } // Third message is the batched table itself, which was sent as a result of the implicit // flush when swapping the underlying sink from buffered to in-memory. match msgs.pop().unwrap() { LogMsg::ArrowMsg(rid, msg) => { - assert_eq!(rec_info.recording_id, rid); + assert_eq!(store_info.store_id, rid); let mut got = DataTable::from_arrow_msg(&msg).unwrap(); // TODO(1760): we shouldn't have to (re)do this! @@ -1016,7 +1016,7 @@ mod tests { .buffered() .unwrap(); - let rec_info = rec_stream.recording_info().cloned().unwrap(); + let store_info = rec_stream.store_info().cloned().unwrap(); let mut table = data_table_example(false); table.compute_all_size_bytes(); @@ -1031,25 +1031,25 @@ mod tests { msgs }; - // First message should be a set_recording_info resulting from the original sink swap to + // First message should be a set_store_info resulting from the original sink swap to // buffered mode. match msgs.pop().unwrap() { - LogMsg::SetRecordingInfo(msg) => { + LogMsg::SetStoreInfo(msg) => { assert!(msg.row_id != RowId::ZERO); - similar_asserts::assert_eq!(rec_info, msg.info); + similar_asserts::assert_eq!(store_info, msg.info); } - _ => panic!("expected SetRecordingInfo"), + _ => panic!("expected SetStoreInfo"), } - // Second message should be a set_recording_info resulting from the later sink swap from + // Second message should be a set_store_info resulting from the later sink swap from // buffered mode into in-memory mode. // This arrives _before_ the data itself since we're using manual flushing. match msgs.pop().unwrap() { - LogMsg::SetRecordingInfo(msg) => { + LogMsg::SetStoreInfo(msg) => { assert!(msg.row_id != RowId::ZERO); - similar_asserts::assert_eq!(rec_info, msg.info); + similar_asserts::assert_eq!(store_info, msg.info); } - _ => panic!("expected SetRecordingInfo"), + _ => panic!("expected SetStoreInfo"), } let mut rows = { @@ -1061,7 +1061,7 @@ mod tests { let mut assert_next_row = || { match msgs.pop().unwrap() { LogMsg::ArrowMsg(rid, msg) => { - assert_eq!(rec_info.recording_id, rid); + assert_eq!(store_info.store_id, rid); let mut got = DataTable::from_arrow_msg(&msg).unwrap(); // TODO(1760): we shouldn't have to (re)do this! @@ -1096,7 +1096,7 @@ mod tests { .memory() .unwrap(); - let rec_info = rec_stream.recording_info().cloned().unwrap(); + let store_info = rec_stream.store_info().cloned().unwrap(); let mut table = data_table_example(false); table.compute_all_size_bytes(); @@ -1111,14 +1111,14 @@ mod tests { msgs }; - // First message should be a set_recording_info resulting from the original sink swap + // First message should be a set_store_info resulting from the original sink swap // to in-memory mode. match msgs.pop().unwrap() { - LogMsg::SetRecordingInfo(msg) => { + LogMsg::SetStoreInfo(msg) => { assert!(msg.row_id != RowId::ZERO); - similar_asserts::assert_eq!(rec_info, msg.info); + similar_asserts::assert_eq!(store_info, msg.info); } - _ => panic!("expected SetRecordingInfo"), + _ => panic!("expected SetStoreInfo"), } // The underlying batcher is never flushing: there's nothing else. @@ -1140,7 +1140,7 @@ mod tests { // The batched table itself, which was sent as a result of the explicit flush above. match msgs.pop().unwrap() { LogMsg::ArrowMsg(rid, msg) => { - assert_eq!(rec_info.recording_id, rid); + assert_eq!(store_info.store_id, rid); let mut got = DataTable::from_arrow_msg(&msg).unwrap(); // TODO(1760): we shouldn't have to (re)do this! diff --git a/crates/re_sdk_comms/src/server.rs b/crates/re_sdk_comms/src/server.rs index 895b3eabff4c..fa49ff60747b 100644 --- a/crates/re_sdk_comms/src/server.rs +++ b/crates/re_sdk_comms/src/server.rs @@ -248,7 +248,7 @@ impl CongestionManager { #[allow(clippy::match_same_arms)] match msg { // we don't want to drop any of these - LogMsg::SetRecordingInfo(_) | LogMsg::EntityPathOpMsg(_, _) => true, + LogMsg::SetStoreInfo(_) | LogMsg::EntityPathOpMsg(_, _) => true, LogMsg::ArrowMsg(_, arrow_msg) => self.should_send_time_point(&arrow_msg.timepoint_max), } diff --git a/crates/re_space_view_spatial/src/scene/mod.rs b/crates/re_space_view_spatial/src/scene/mod.rs index 83c571d223dc..73f60439b96b 100644 --- a/crates/re_space_view_spatial/src/scene/mod.rs +++ b/crates/re_space_view_spatial/src/scene/mod.rs @@ -143,7 +143,7 @@ impl SceneSpatial { DefaultPoints, } - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; // Use a BTreeSet for entity hashes to get a stable order. let mut entities_per_draw_order = BTreeMap::>::new(); diff --git a/crates/re_space_view_spatial/src/scene/scene_part/arrows3d.rs b/crates/re_space_view_spatial/src/scene/scene_part/arrows3d.rs index 151e3a2193db..028656d2fc2c 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/arrows3d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/arrows3d.rs @@ -102,7 +102,7 @@ impl ScenePart for Arrows3DPart { }; match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/boxes2d.rs b/crates/re_space_view_spatial/src/scene/scene_part/boxes2d.rs index cdb349d553f1..56a78c355144 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/boxes2d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/boxes2d.rs @@ -117,7 +117,7 @@ impl ScenePart for Boxes2DPart { }; match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/boxes3d.rs b/crates/re_space_view_spatial/src/scene/scene_part/boxes3d.rs index 118f6887e073..c0491ec48154 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/boxes3d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/boxes3d.rs @@ -109,7 +109,7 @@ impl ScenePart for Boxes3DPart { let entity_highlight = highlights.entity_outline_mask(ent_path.hash()); match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/cameras.rs b/crates/re_space_view_spatial/src/scene/scene_part/cameras.rs index b781321582a7..3e80d6d83bd9 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/cameras.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/cameras.rs @@ -190,13 +190,13 @@ impl ScenePart for CamerasPart { ) { re_tracing::profile_scope!("CamerasPart"); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for (ent_path, props) in query.iter_entities() { let query = re_arrow_store::LatestAtQuery::new(query.timeline, query.latest_at); if let Some(pinhole) = store.query_latest_component::(ent_path, &query) { let view_coordinates = determine_view_coordinates( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &ctx.rec_cfg.time_ctrl, ent_path.clone(), ); diff --git a/crates/re_space_view_spatial/src/scene/scene_part/images.rs b/crates/re_space_view_spatial/src/scene/scene_part/images.rs index 3fcd7604b9b6..09f2a4b898d2 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/images.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/images.rs @@ -193,7 +193,7 @@ impl ImagesPart { if *properties.backproject_depth.get() && tensor.meaning == TensorDataMeaning::Depth { let query = ctx.current_query(); let closet_pinhole = ctx - .log_db + .store_db .entity_db .data_store .query_latest_component_at_closest_ancestor::(ent_path, &query); @@ -261,7 +261,7 @@ impl ImagesPart { ) -> Result<(), String> { re_tracing::profile_function!(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; let Some(intrinsics) = store.query_latest_component::( pinhole_ent_path, &ctx.current_query(), @@ -392,7 +392,7 @@ impl ScenePart for ImagesPart { }; match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/lines2d.rs b/crates/re_space_view_spatial/src/scene/scene_part/lines2d.rs index 0f53356f0514..4f94f9c684ae 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/lines2d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/lines2d.rs @@ -87,7 +87,7 @@ impl ScenePart for Lines2DPart { let entity_highlight = highlights.entity_outline_mask(ent_path.hash()); match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/lines3d.rs b/crates/re_space_view_spatial/src/scene/scene_part/lines3d.rs index 19e2da765b80..86492c944a65 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/lines3d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/lines3d.rs @@ -86,7 +86,7 @@ impl ScenePart for Lines3DPart { let entity_highlight = highlights.entity_outline_mask(ent_path.hash()); match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/meshes.rs b/crates/re_space_view_spatial/src/scene/scene_part/meshes.rs index bbb34c062f19..f133ba4084ff 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/meshes.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/meshes.rs @@ -78,7 +78,7 @@ impl ScenePart for MeshPart { }; match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/points2d.rs b/crates/re_space_view_spatial/src/scene/scene_part/points2d.rs index a38c5c65b0fd..944183f546fb 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/points2d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/points2d.rs @@ -184,7 +184,7 @@ impl ScenePart for Points2DPart { let entity_highlight = highlights.entity_outline_mask(ent_path.hash()); match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/scene/scene_part/points3d.rs b/crates/re_space_view_spatial/src/scene/scene_part/points3d.rs index a1128592e73c..0b0f6d8e727d 100644 --- a/crates/re_space_view_spatial/src/scene/scene_part/points3d.rs +++ b/crates/re_space_view_spatial/src/scene/scene_part/points3d.rs @@ -187,7 +187,7 @@ impl ScenePart for Points3DPart { let entity_highlight = highlights.entity_outline_mask(ent_path.hash()); match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, + &ctx.store_db.entity_db.data_store, &query.timeline, &query.latest_at, &props.visible_history, diff --git a/crates/re_space_view_spatial/src/transform_cache.rs b/crates/re_space_view_spatial/src/transform_cache.rs index e99b29f93cf9..1fa1960ef7ca 100644 --- a/crates/re_space_view_spatial/src/transform_cache.rs +++ b/crates/re_space_view_spatial/src/transform_cache.rs @@ -1,7 +1,7 @@ use nohash_hasher::IntMap; use re_arrow_store::LatestAtQuery; use re_components::{DisconnectedSpace, Pinhole, Transform3D}; -use re_data_store::{log_db::EntityDb, EntityPath, EntityPropertyMap, EntityTree}; +use re_data_store::{store_db::EntityDb, EntityPath, EntityPropertyMap, EntityTree}; use re_log_types::EntityPathHash; use re_viewer_context::TimeControl; diff --git a/crates/re_space_view_spatial/src/ui.rs b/crates/re_space_view_spatial/src/ui.rs index cb56f6ca5a27..08723c83fc5b 100644 --- a/crates/re_space_view_spatial/src/ui.rs +++ b/crates/re_space_view_spatial/src/ui.rs @@ -162,7 +162,7 @@ impl ViewSpatialState { query: &re_arrow_store::LatestAtQuery, entity_path: &EntityPath, ) { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; if store .query_latest_component::(entity_path, query) .is_some() @@ -191,7 +191,7 @@ impl ViewSpatialState { query: &re_arrow_store::LatestAtQuery, entity_path: &EntityPath, ) -> Option<()> { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; let tensor = store.query_latest_component::(entity_path, query)?; let mut properties = data_blueprint.data_blueprints_individual().get(entity_path); @@ -389,7 +389,7 @@ impl ViewSpatialState { } self.scene_num_primitives = scene.primitives.num_primitives(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; match *self.nav_mode.get() { SpatialNavigationMode::ThreeD => { let coordinates = store.query_latest_component(space, &ctx.current_query()); @@ -728,7 +728,7 @@ pub fn picking( // TODO(#1818): Depth at pointer only works for depth images so far. let mut depth_at_pointer = None; for hit in &picking_result.hits { - let Some(mut instance_path) = hit.instance_path_hash.resolve(&ctx.log_db.entity_db) + let Some(mut instance_path) = hit.instance_path_hash.resolve(&ctx.store_db.entity_db) else { continue; }; let ent_properties = entity_properties.get(&instance_path.entity_path); @@ -740,7 +740,7 @@ pub fn picking( let picked_image_with_coords = if hit.hit_type == PickingHitType::TexturedRect || *ent_properties.backproject_depth.get() { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; store .query_latest_component::(&instance_path.entity_path, &ctx.current_query()) .and_then(|tensor| { diff --git a/crates/re_space_view_spatial/src/ui_2d.rs b/crates/re_space_view_spatial/src/ui_2d.rs index 803576899d79..dd0e43b1ad2a 100644 --- a/crates/re_space_view_spatial/src/ui_2d.rs +++ b/crates/re_space_view_spatial/src/ui_2d.rs @@ -238,7 +238,7 @@ pub fn view_2d( // Save off the available_size since this is used for some of the layout updates later let available_size = ui.available_size(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; // Determine the canvas which determines the extent of the explorable scene coordinates, // and thus the size of the scroll area. diff --git a/crates/re_space_view_text/src/scene_element.rs b/crates/re_space_view_text/src/scene_element.rs index daface3565d9..138534829baa 100644 --- a/crates/re_space_view_text/src/scene_element.rs +++ b/crates/re_space_view_text/src/scene_element.rs @@ -42,7 +42,7 @@ impl SceneElementImpl for SceneText { query: &SceneQuery<'_>, state: &TextSpaceViewState, ) { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for entity_path in query.entity_paths { let ent_path = entity_path; diff --git a/crates/re_space_view_text/src/space_view_class.rs b/crates/re_space_view_text/src/space_view_class.rs index c0a10d84694b..51c69377b65a 100644 --- a/crates/re_space_view_text/src/space_view_class.rs +++ b/crates/re_space_view_text/src/space_view_class.rs @@ -201,7 +201,7 @@ impl ViewTextFilters { row_log_levels, } = self; - for timeline in ctx.log_db.timelines() { + for timeline in ctx.store_db.timelines() { col_timelines.entry(*timeline).or_insert(true); } @@ -219,7 +219,7 @@ impl ViewTextFilters { fn get_time_point(ctx: &ViewerContext<'_>, entry: &TextEntry) -> Option { if let Some(time_point) = ctx - .log_db + .store_db .entity_db .data_store .get_msg_metadata(&entry.row_id) diff --git a/crates/re_space_view_text_box/src/scene_element.rs b/crates/re_space_view_text_box/src/scene_element.rs index c55e6d0da06c..0010adf47ecd 100644 --- a/crates/re_space_view_text_box/src/scene_element.rs +++ b/crates/re_space_view_text_box/src/scene_element.rs @@ -29,7 +29,7 @@ impl SceneElement for SceneTextBox { query: &SceneQuery<'_>, _state: &dyn SpaceViewState, ) { - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for (ent_path, props) in query.iter_entities() { if !props.visible { diff --git a/crates/re_time_panel/src/lib.rs b/crates/re_time_panel/src/lib.rs index 45019bcfc8b7..f3a9852c5a54 100644 --- a/crates/re_time_panel/src/lib.rs +++ b/crates/re_time_panel/src/lib.rs @@ -155,7 +155,7 @@ impl TimePanel { ui.horizontal(|ui| { let re_ui = &ctx.re_ui; let time_ctrl = &mut ctx.rec_cfg.time_ctrl; - let times_per_timeline = ctx.log_db.times_per_timeline(); + let times_per_timeline = ctx.store_db.times_per_timeline(); self.time_control_ui .play_pause_ui(time_ctrl, re_ui, times_per_timeline, ui); self.time_control_ui.playback_speed_ui(time_ctrl, ui); @@ -165,7 +165,7 @@ impl TimePanel { let time_ctrl = &mut ctx.rec_cfg.time_ctrl; self.time_control_ui.timeline_selector_ui( time_ctrl, - ctx.log_db.times_per_timeline(), + ctx.store_db.times_per_timeline(), ui, ); collapsed_time_marker_and_time(ui, ctx); @@ -175,7 +175,7 @@ impl TimePanel { // One row: let re_ui = &ctx.re_ui; let time_ctrl = &mut ctx.rec_cfg.time_ctrl; - let times_per_timeline = ctx.log_db.times_per_timeline(); + let times_per_timeline = ctx.store_db.times_per_timeline(); self.time_control_ui .play_pause_ui(time_ctrl, re_ui, times_per_timeline, ui); self.time_control_ui @@ -275,7 +275,7 @@ impl TimePanel { full_y_range.clone(), ); time_selection_ui::loop_selection_ui( - ctx.log_db, + ctx.store_db, &mut ctx.rec_cfg.time_ctrl, &self.time_ranges_ui, ui, @@ -361,7 +361,7 @@ impl TimePanel { ctx, time_area_response, time_area_painter, - &ctx.log_db.entity_db.tree, + &ctx.store_db.entity_db.tree, ui, ); }); @@ -560,7 +560,7 @@ impl TimePanel { ui.horizontal(|ui| { let re_ui = &ctx.re_ui; let time_ctrl = &mut ctx.rec_cfg.time_ctrl; - let times_per_timeline = ctx.log_db.times_per_timeline(); + let times_per_timeline = ctx.store_db.times_per_timeline(); self.time_control_ui .play_pause_ui(time_ctrl, re_ui, times_per_timeline, ui); self.time_control_ui.playback_speed_ui(time_ctrl, ui); @@ -570,7 +570,7 @@ impl TimePanel { let time_ctrl = &mut ctx.rec_cfg.time_ctrl; self.time_control_ui.timeline_selector_ui( time_ctrl, - ctx.log_db.times_per_timeline(), + ctx.store_db.times_per_timeline(), ui, ); @@ -585,7 +585,7 @@ impl TimePanel { // One row: let re_ui = &ctx.re_ui; let time_ctrl = &mut ctx.rec_cfg.time_ctrl; - let times_per_timeline = ctx.log_db.times_per_timeline(); + let times_per_timeline = ctx.store_db.times_per_timeline(); self.time_control_ui .play_pause_ui(time_ctrl, re_ui, times_per_timeline, ui); @@ -685,15 +685,15 @@ fn help_button(ui: &mut egui::Ui) { /// /// This functions returns `true` iff the given time is safe to show. fn is_time_safe_to_show( - log_db: &re_data_store::LogDb, + store_db: &re_data_store::StoreDb, timeline: &re_arrow_store::Timeline, time: TimeReal, ) -> bool { - if log_db.num_timeless_messages() == 0 { + if store_db.num_timeless_messages() == 0 { return true; // no timeless messages, no problem } - if let Some(times) = log_db.entity_db.tree.prefix_times.get(timeline) { + if let Some(times) = store_db.entity_db.tree.prefix_times.get(timeline) { if let Some(first_time) = times.min_key() { let margin = match timeline.typ() { re_arrow_store::TimeType::Time => TimeInt::from_seconds(10_000), @@ -710,7 +710,7 @@ fn is_time_safe_to_show( fn current_time_ui(ctx: &ViewerContext<'_>, ui: &mut egui::Ui) { if let Some(time_int) = ctx.rec_cfg.time_ctrl.time_int() { let timeline = ctx.rec_cfg.time_ctrl.timeline(); - if is_time_safe_to_show(ctx.log_db, timeline, time_int.into()) { + if is_time_safe_to_show(ctx.store_db, timeline, time_int.into()) { let time_type = ctx.rec_cfg.time_ctrl.time_type(); ui.monospace(time_type.format(time_int)); } @@ -727,7 +727,7 @@ fn initialize_time_ranges_ui( re_tracing::profile_function!(); // If there's any timeless data, add the "beginning range" that contains timeless data. - let mut time_range = if ctx.log_db.num_timeless_messages() > 0 { + let mut time_range = if ctx.store_db.num_timeless_messages() > 0 { vec![TimeRange { min: TimeInt::BEGINNING, max: TimeInt::BEGINNING, @@ -737,7 +737,7 @@ fn initialize_time_ranges_ui( }; if let Some(times) = ctx - .log_db + .store_db .entity_db .tree .prefix_times diff --git a/crates/re_time_panel/src/time_selection_ui.rs b/crates/re_time_panel/src/time_selection_ui.rs index c594d99e05f3..174f84d64249 100644 --- a/crates/re_time_panel/src/time_selection_ui.rs +++ b/crates/re_time_panel/src/time_selection_ui.rs @@ -1,13 +1,13 @@ use egui::{CursorIcon, Id, NumExt as _, Rect}; -use re_data_store::LogDb; +use re_data_store::StoreDb; use re_log_types::{Duration, TimeInt, TimeRangeF, TimeReal, TimeType}; use re_viewer_context::{Looping, TimeControl}; use super::{is_time_safe_to_show, time_ranges_ui::TimeRangesUi}; pub fn loop_selection_ui( - log_db: &LogDb, + store_db: &StoreDb, time_ctrl: &mut TimeControl, time_ranges_ui: &TimeRangesUi, ui: &mut egui::Ui, @@ -76,8 +76,8 @@ pub fn loop_selection_ui( if is_active && !selected_range.is_empty() - && is_time_safe_to_show(log_db, &timeline, selected_range.min) - && is_time_safe_to_show(log_db, &timeline, selected_range.max) + && is_time_safe_to_show(store_db, &timeline, selected_range.min) + && is_time_safe_to_show(store_db, &timeline, selected_range.max) { paint_range_text(time_ctrl, selected_range, ui, time_area_painter, rect); } diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 2f133b0e3d95..ec06f6cb1f53 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -1,16 +1,15 @@ use std::{any::Any, hash::Hash}; use ahash::HashMap; -use anyhow::Context; use egui::NumExt as _; use itertools::Itertools as _; use poll_promise::Promise; use web_time::Instant; use re_arrow_store::{DataStoreConfig, DataStoreStats}; -use re_data_store::log_db::LogDb; +use re_data_store::store_db::StoreDb; use re_format::format_number; -use re_log_types::{ApplicationId, LogMsg, RecordingId, RecordingType}; +use re_log_types::{ApplicationId, LogMsg, StoreId, StoreKind}; use re_renderer::WgpuResourcePoolStatistics; use re_smart_channel::Receiver; use re_ui::{toasts, Command}; @@ -20,7 +19,7 @@ use re_viewer_context::{ }; use re_viewport::ViewportState; -use crate::{ui::Blueprint, viewer_analytics::ViewerAnalytics}; +use crate::{ui::Blueprint, viewer_analytics::ViewerAnalytics, StoreHub}; #[cfg(not(target_arch = "wasm32"))] use re_log_types::TimeRangeF; @@ -94,8 +93,8 @@ pub struct App { rx: Receiver, - /// Where the logs are stored. - log_dbs: HashMap, + /// Where the recordings and blueprints are stored. + store_hub: crate::StoreHub, /// What is serialized state: AppState, @@ -190,7 +189,7 @@ impl App { text_log_rx, component_ui_registry: re_data_ui::create_component_ui_registry(), rx, - log_dbs: Default::default(), + store_hub: Default::default(), state, pending_promises: Default::default(), toasts: toasts::Toasts::new(), @@ -438,8 +437,8 @@ impl App { let Some(rec_cfg) = self.state.recording_configs.get_mut(rec_id) else { return; }; let time_ctrl = &mut rec_cfg.time_ctrl; - let Some(log_db) = self.log_dbs.get(rec_id) else { return; }; - let times_per_timeline = log_db.times_per_timeline(); + let Some(store_db) = self.store_hub.recording(rec_id) else { return; }; + let times_per_timeline = store_db.times_per_timeline(); match command { TimeControlCommand::TogglePlayPause => { @@ -461,11 +460,11 @@ impl App { } fn selected_app_id(&self) -> ApplicationId { - self.log_db() - .and_then(|log_db| { - log_db - .recording_info() - .map(|rec_info| rec_info.application_id.clone()) + self.store_db() + .and_then(|store_db| { + store_db + .store_info() + .map(|store_info| store_info.application_id.clone()) }) .unwrap_or(ApplicationId::unknown()) } @@ -504,32 +503,10 @@ impl App { // Materialize the blueprint from the DB if the selected blueprint id isn't the default one. fn load_or_create_blueprint( &mut self, - this_frame_blueprint_id: &RecordingId, + this_frame_blueprint_id: &StoreId, egui_ctx: &egui::Context, ) -> Blueprint { - // TODO(jleibs): If the blueprint doesn't exist this probably means we are - // initializing a new default-blueprint for the application in question. - // Make sure it's marked as a blueprint. - let blueprint_db = self - .log_dbs - .entry(this_frame_blueprint_id.clone()) - .or_insert_with(|| { - let mut blueprint_db = LogDb::new(this_frame_blueprint_id.clone()); - - blueprint_db.add_begin_recording_msg(&re_log_types::SetRecordingInfo { - row_id: re_log_types::RowId::random(), - info: re_log_types::RecordingInfo { - application_id: this_frame_blueprint_id.as_str().into(), - recording_id: this_frame_blueprint_id.clone(), - is_official_example: false, - started: re_log_types::Time::now(), - recording_source: re_log_types::RecordingSource::Other("viewer".to_owned()), - recording_type: RecordingType::Blueprint, - }, - }); - - blueprint_db - }); + let blueprint_db = self.store_hub.blueprint_entry(this_frame_blueprint_id); Blueprint::from_db(egui_ctx, blueprint_db) } } @@ -608,28 +585,28 @@ impl eframe::App for App { .get(&self.selected_app_id()) .cloned() .unwrap_or_else(|| { - RecordingId::from_string(RecordingType::Blueprint, self.selected_app_id().0) + StoreId::from_string(StoreKind::Blueprint, self.selected_app_id().0) }); let store_config = self - .log_db() - .map(|log_db| log_db.entity_db.data_store.config().clone()) + .store_db() + .map(|store_db| store_db.entity_db.data_store.config().clone()) .unwrap_or_default(); let store_stats = self - .log_db() - .map(|log_db| DataStoreStats::from_store(&log_db.entity_db.data_store)) + .store_db() + .map(|store_db| DataStoreStats::from_store(&store_db.entity_db.data_store)) .unwrap_or_default(); let blueprint_config = self - .log_dbs - .get_mut(&active_blueprint_id) + .store_hub + .blueprint_mut(&active_blueprint_id) .map(|bp_db| bp_db.entity_db.data_store.config().clone()) .unwrap_or_default(); let blueprint_stats = self - .log_dbs - .get_mut(&active_blueprint_id) + .store_hub + .blueprint_mut(&active_blueprint_id) .map(|bp_db| DataStoreStats::from_store(&bp_db.entity_db.data_store)) .unwrap_or_default(); @@ -679,18 +656,18 @@ impl eframe::App for App { &blueprint_stats, ); - // NOTE: cannot call `.log_db()` due to borrowck shenanigans - if let Some(log_db) = self + // NOTE: cannot call `.store_db()` due to borrowck shenanigans + if let Some(store_db) = self .state .selected_rec_id .as_ref() - .and_then(|rec_id| self.log_dbs.get(rec_id)) + .and_then(|rec_id| self.store_hub.recording(rec_id)) { recording_config_entry( &mut self.state.recording_configs, - log_db.recording_id().clone(), + store_db.store_id().clone(), self.rx.source(), - log_db, + store_db, ) .selection_state .on_frame_start(|item| blueprint.is_item_valid(item)); @@ -706,14 +683,14 @@ impl eframe::App for App { { render_ctx.begin_frame(); - if log_db.is_empty() { + if store_db.is_empty() { wait_screen_ui(ui, &self.rx); } else { self.state.show( &mut blueprint, ui, render_ctx, - log_db, + store_db, &self.re_ui, &self.component_ui_registry, &self.space_view_class_registry, @@ -751,7 +728,7 @@ impl eframe::App for App { ); // If there was a real active blueprint that came from the store, save the changes back. - if let Some(blueprint_db) = self.log_dbs.get_mut(&active_blueprint_id) { + if let Some(blueprint_db) = self.store_hub.blueprint_mut(&active_blueprint_id) { blueprint.sync_changes_to_store(&blueprint_snapshot, blueprint_db); } else { // This shouldn't happen because we should have used `active_blueprint_id` to @@ -898,21 +875,20 @@ impl App { } }; - let recording_id = msg.recording_id(); + let store_id = msg.store_id(); - let is_new_recording = if let LogMsg::SetRecordingInfo(msg) = &msg { - match msg.info.recording_id.variant { - RecordingType::Data => { + let is_new_store = if let LogMsg::SetStoreInfo(msg) = &msg { + match msg.info.store_id.kind { + StoreKind::Recording => { re_log::debug!("Opening a new recording: {:?}", msg.info); - self.state.selected_rec_id = Some(recording_id.clone()); + self.state.selected_rec_id = Some(store_id.clone()); } - RecordingType::Blueprint => { + StoreKind::Blueprint => { re_log::debug!("Opening a new blueprint: {:?}", msg.info); - self.state.selected_blueprint_by_app.insert( - msg.info.application_id.clone(), - msg.info.recording_id.clone(), - ); + self.state + .selected_blueprint_by_app + .insert(msg.info.application_id.clone(), msg.info.store_id.clone()); } } true @@ -920,24 +896,21 @@ impl App { false }; - let log_db = self - .log_dbs - .entry(recording_id.clone()) - .or_insert_with(|| LogDb::new(recording_id.clone())); + let store_db = self.store_hub.store_db_entry(store_id); - if log_db.data_source.is_none() { - log_db.data_source = Some(self.rx.source().clone()); + if store_db.data_source.is_none() { + store_db.data_source = Some(self.rx.source().clone()); } - if let Err(err) = log_db.add(&msg) { + if let Err(err) = store_db.add(&msg) { re_log::error!("Failed to add incoming msg: {err}"); }; - if is_new_recording { + if is_new_store && store_db.store_kind() == StoreKind::Recording { // Do analytics after ingesting the new message, - // because thats when the `log_db.recording_info` is set, + // because thats when the `store_db.store_info` is set, // which we use in the analytics call. - self.analytics.on_open_recording(log_db); + self.analytics.on_open_recording(store_db); } if start.elapsed() > web_time::Duration::from_millis(10) { @@ -950,24 +923,25 @@ impl App { fn cleanup(&mut self) { re_tracing::profile_function!(); - self.log_dbs.retain(|_, log_db| !log_db.is_empty()); + self.store_hub.purge_empty(); if !self .state .selected_rec_id .as_ref() - .map_or(false, |rec_id| self.log_dbs.contains_key(rec_id)) + .map_or(false, |rec_id| self.store_hub.contains_recording(rec_id)) { + // Pick any: self.state.selected_rec_id = self - .log_dbs - .values() - .find(|log| log.recording_type() == RecordingType::Data) - .map(|log| log.recording_id().clone()); + .store_hub + .recordings() + .next() + .map(|log| log.store_id().clone()); } self.state .recording_configs - .retain(|recording_id, _| self.log_dbs.contains_key(recording_id)); + .retain(|store_id, _| self.store_hub.contains_recording(store_id)); } fn purge_memory_if_needed(&mut self) { @@ -1007,9 +981,7 @@ impl App { format_bytes(counted as f64 * fraction_to_purge as f64) ); } - for log_db in self.log_dbs.values_mut() { - log_db.purge_fraction_of_ram(fraction_to_purge); - } + self.store_hub.purge_fraction_of_ram(fraction_to_purge); self.state.cache.purge_memory(); } @@ -1044,23 +1016,35 @@ impl App { egui_ctx.set_style((*style).clone()); } - /// Do we have an open `LogDb` that is non-empty? - fn log_db_is_nonempty(&self) -> bool { - self.log_db().map_or(false, |log_db| !log_db.is_empty()) + /// Do we have an open `StoreDb` that is non-empty? + fn store_db_is_nonempty(&self) -> bool { + self.store_db() + .map_or(false, |store_db| !store_db.is_empty()) } - /// Get access to the currently shown [`LogDb`], if any. - pub fn log_db(&self) -> Option<&LogDb> { + /// Get access to the currently shown [`StoreDb`], if any. + pub fn store_db(&self) -> Option<&StoreDb> { self.state .selected_rec_id .as_ref() - .and_then(|rec_id| self.log_dbs.get(rec_id)) + .and_then(|rec_id| self.store_hub.recording(rec_id)) } - fn show_log_db(&mut self, log_db: LogDb) { - self.analytics.on_open_recording(&log_db); - self.state.selected_rec_id = Some(log_db.recording_id().clone()); - self.log_dbs.insert(log_db.recording_id().clone(), log_db); + fn on_rrd_loaded(&mut self, store_hub: StoreHub) { + if let Some(store_db) = store_hub.recordings().next() { + self.state.selected_rec_id = Some(store_db.store_id().clone()); + self.analytics.on_open_recording(store_db); + } + + for blueprint_db in store_hub.blueprints() { + if let Some(app_id) = blueprint_db.app_id() { + self.state + .selected_blueprint_by_app + .insert(app_id.clone(), blueprint_db.store_id().clone()); + } + } + + self.store_hub.append(store_hub); } fn handle_dropping_files(&mut self, egui_ctx: &egui::Context) { @@ -1076,8 +1060,8 @@ impl App { if let Some(file) = egui_ctx.input(|i| i.raw.dropped_files.first().cloned()) { if let Some(bytes) = &file.bytes { let mut bytes: &[u8] = &(*bytes)[..]; - if let Some(log_db) = load_file_contents(&file.name, &mut bytes) { - self.show_log_db(log_db); + if let Some(rrd) = load_file_contents(&file.name, &mut bytes) { + self.on_rrd_loaded(rrd); #[allow(clippy::needless_return)] // false positive on wasm32 return; @@ -1086,8 +1070,8 @@ impl App { #[cfg(not(target_arch = "wasm32"))] if let Some(path) = &file.path { - if let Some(log_db) = load_file_path(path) { - self.show_log_db(log_db); + if let Some(rrd) = load_file_path(path) { + self.on_rrd_loaded(rrd); } } } @@ -1146,12 +1130,12 @@ struct AppState { cache: Caches, #[serde(skip)] - selected_rec_id: Option, + selected_rec_id: Option, #[serde(skip)] - selected_blueprint_by_app: HashMap, + selected_blueprint_by_app: HashMap, - /// Configuration for the current recording (found in [`LogDb`]). - recording_configs: HashMap, + /// Configuration for the current recording (found in [`StoreDb`]). + recording_configs: HashMap, /// Which view panel is currently being shown panel_selection: PanelSelection, @@ -1176,7 +1160,7 @@ impl AppState { blueprint: &mut Blueprint, ui: &mut egui::Ui, render_ctx: &mut re_renderer::RenderContext, - log_db: &LogDb, + store_db: &StoreDb, re_ui: &re_ui::ReUi, component_ui_registry: &ComponentUiRegistry, space_view_class_registry: &SpaceViewClassRegistry, @@ -1200,9 +1184,9 @@ impl AppState { let rec_cfg = recording_config_entry( recording_configs, - log_db.recording_id().clone(), + store_db.store_id().clone(), rx.source(), - log_db, + store_db, ); let mut ctx = ViewerContext { @@ -1210,7 +1194,7 @@ impl AppState { cache, space_view_class_registry, component_ui_registry, - log_db, + store_db, rec_cfg, re_ui, render_ctx, @@ -1238,10 +1222,11 @@ impl AppState { // so we have one frame to see the first data before we move the time. let dt = ui.ctx().input(|i| i.stable_dt); let more_data_is_coming = rx.is_connected(); - let needs_repaint = - ctx.rec_cfg - .time_ctrl - .update(log_db.times_per_timeline(), dt, more_data_is_coming); + let needs_repaint = ctx.rec_cfg.time_ctrl.update( + store_db.times_per_timeline(), + dt, + more_data_is_coming, + ); if needs_repaint == re_viewer_context::NeedsRepaint::Yes { ui.ctx().request_repaint(); } @@ -1744,7 +1729,7 @@ fn save_buttons_ui(ui: &mut egui::Ui, app: &mut App) { }); }); } else { - ui.add_enabled_ui(app.log_db_is_nonempty(), |ui| { + ui.add_enabled_ui(app.store_db_is_nonempty(), |ui| { if ui .add(save_button) .on_hover_text("Save all data to a Rerun data file (.rrd)") @@ -1780,15 +1765,15 @@ fn open(app: &mut App) { .add_filter("rerun data file", &["rrd"]) .pick_file() { - if let Some(log_db) = load_file_path(&path) { - app.show_log_db(log_db); + if let Some(store_db) = load_file_path(&path) { + app.on_rrd_loaded(store_db); } } } #[cfg(not(target_arch = "wasm32"))] fn save(app: &mut App, loop_selection: Option<(re_data_store::Timeline, TimeRangeF)>) { - let Some(log_db) = app.log_db() else { + let Some(store_db) = app.store_db() else { // NOTE: Can only happen if saving through the command palette. re_log::error!("No data to save!"); return; @@ -1805,7 +1790,7 @@ fn save(app: &mut App, loop_selection: Option<(re_data_store::Timeline, TimeRang .set_title(title) .save_file() { - let f = match save_database_to_file(log_db, path, loop_selection) { + let f = match save_database_to_file(store_db, path, loop_selection) { Ok(f) => f, Err(err) => { re_log::error!("File saving failed: {err}"); @@ -1820,7 +1805,7 @@ fn save(app: &mut App, loop_selection: Option<(re_data_store::Timeline, TimeRang } fn main_view_selector_ui(ui: &mut egui::Ui, app: &mut App) { - if app.log_db_is_nonempty() { + if app.store_db_is_nonempty() { ui.horizontal(|ui| { ui.label("Main view:"); if ui @@ -1838,37 +1823,36 @@ fn main_view_selector_ui(ui: &mut egui::Ui, app: &mut App) { } fn recordings_menu(ui: &mut egui::Ui, app: &mut App) { - let log_dbs = app - .log_dbs - .values() - .filter(|log| log.recording_type() == RecordingType::Data) - .sorted_by_key(|log_db| log_db.recording_info().map(|ri| ri.started)) + let store_dbs = app + .store_hub + .recordings() + .sorted_by_key(|store_db| store_db.store_info().map(|ri| ri.started)) .collect_vec(); - if log_dbs.is_empty() { + if store_dbs.is_empty() { ui.weak("(empty)"); return; } ui.style_mut().wrap = Some(false); - for log_db in &log_dbs { - let info = if let Some(rec_info) = log_db.recording_info() { + for store_db in &store_dbs { + let info = if let Some(store_info) = store_db.store_info() { format!( "{} - {}", - rec_info.application_id, - rec_info.started.format() + store_info.application_id, + store_info.started.format() ) } else { "".to_owned() }; if ui .radio( - app.state.selected_rec_id.as_ref() == Some(log_db.recording_id()), + app.state.selected_rec_id.as_ref() == Some(store_db.store_id()), info, ) .clicked() { - app.state.selected_rec_id = Some(log_db.recording_id().clone()); + app.state.selected_rec_id = Some(store_db.store_id().clone()); } } } @@ -1876,14 +1860,12 @@ fn recordings_menu(ui: &mut egui::Ui, app: &mut App) { fn blueprints_menu(ui: &mut egui::Ui, app: &mut App) { let app_id = app.selected_app_id(); let blueprint_dbs = app - .log_dbs - .values() - .sorted_by_key(|log_db| log_db.recording_info().map(|ri| ri.started)) + .store_hub + .blueprints() + .sorted_by_key(|store_db| store_db.store_info().map(|ri| ri.started)) .filter(|log| { - log.recording_type() == RecordingType::Blueprint - && log - .recording_info() - .map_or(false, |ri| ri.application_id == app_id) + log.store_info() + .map_or(false, |ri| ri.application_id == app_id) }) .collect_vec(); @@ -1893,18 +1875,18 @@ fn blueprints_menu(ui: &mut egui::Ui, app: &mut App) { } ui.style_mut().wrap = Some(false); - for log_db in blueprint_dbs + for store_db in blueprint_dbs .iter() - .filter(|log| log.recording_type() == RecordingType::Blueprint) + .filter(|log| log.store_kind() == StoreKind::Blueprint) { - let info = if let Some(rec_info) = log_db.recording_info() { - if rec_info.is_app_default_blueprint() { - format!("{} - Default Blueprint", rec_info.application_id,) + let info = if let Some(store_info) = store_db.store_info() { + if store_info.is_app_default_blueprint() { + format!("{} - Default Blueprint", store_info.application_id,) } else { format!( "{} - {}", - rec_info.application_id, - rec_info.started.format() + store_info.application_id, + store_info.started.format() ) } } else { @@ -1912,14 +1894,14 @@ fn blueprints_menu(ui: &mut egui::Ui, app: &mut App) { }; if ui .radio( - app.state.selected_blueprint_by_app.get(&app_id) == Some(log_db.recording_id()), + app.state.selected_blueprint_by_app.get(&app_id) == Some(store_db.store_id()), info, ) .clicked() { app.state .selected_blueprint_by_app - .insert(app_id.clone(), log_db.recording_id().clone()); + .insert(app_id.clone(), store_db.store_id().clone()); } } } @@ -2089,7 +2071,7 @@ fn debug_menu_options_ui(ui: &mut egui::Ui, options: &mut AppOptions, _frame: &m /// specific time range will be accounted for. #[cfg(not(target_arch = "wasm32"))] fn save_database_to_file( - log_db: &LogDb, + store_db: &StoreDb, path: std::path::PathBuf, time_selection: Option<(re_data_store::Timeline, TimeRangeF)>, ) -> anyhow::Result anyhow::Result> { @@ -2097,13 +2079,13 @@ fn save_database_to_file( re_tracing::profile_scope!("dump_messages"); - let begin_rec_msg = log_db + let begin_rec_msg = store_db .recording_msg() - .map(|msg| LogMsg::SetRecordingInfo(msg.clone())); + .map(|msg| LogMsg::SetStoreInfo(msg.clone())); - let ent_op_msgs = log_db + let ent_op_msgs = store_db .iter_entity_op_msgs() - .map(|msg| LogMsg::EntityPathOpMsg(log_db.recording_id().clone(), msg.clone())) + .map(|msg| LogMsg::EntityPathOpMsg(store_db.store_id().clone(), msg.clone())) .collect_vec(); let time_filter = time_selection.map(|(timeline, range)| { @@ -2112,14 +2094,14 @@ fn save_database_to_file( TimeRange::new(range.min.floor(), range.max.ceil()), ) }); - let data_msgs: Result, _> = log_db + let data_msgs: Result, _> = store_db .entity_db .data_store .to_data_tables(time_filter) .map(|table| { table .to_arrow_msg() - .map(|msg| LogMsg::ArrowMsg(log_db.recording_id().clone(), msg)) + .map(|msg| LogMsg::ArrowMsg(store_db.store_id().clone(), msg)) }) .collect(); @@ -2145,42 +2127,27 @@ fn save_database_to_file( }) } -#[allow(unused_mut)] -fn load_rrd_to_log_db(mut read: impl std::io::Read) -> anyhow::Result { - re_tracing::profile_function!(); - - let mut decoder = re_log_encoding::decoder::Decoder::new(read)?; - - let first = decoder.next().context("no messages in recording")??; - - let mut log_db = LogDb::new(first.recording_id().clone()); - log_db.add(&first)?; - - for msg in decoder { - log_db.add(&msg?)?; - } - Ok(log_db) -} - #[cfg(not(target_arch = "wasm32"))] #[must_use] -fn load_file_path(path: &std::path::Path) -> Option { - fn load_file_path_impl(path: &std::path::Path) -> anyhow::Result { +fn load_file_path(path: &std::path::Path) -> Option { + fn load_file_path_impl(path: &std::path::Path) -> anyhow::Result { re_tracing::profile_function!(); use anyhow::Context as _; let file = std::fs::File::open(path).context("Failed to open file")?; - load_rrd_to_log_db(file) + StoreHub::from_rrd(file) } re_log::info!("Loading {path:?}…"); match load_file_path_impl(path) { - Ok(mut new_log_db) => { + Ok(mut rrd) => { re_log::info!("Loaded {path:?}"); - new_log_db.data_source = Some(re_smart_channel::SmartChannelSource::Files { - paths: vec![path.into()], - }); - Some(new_log_db) + for store_db in rrd.store_dbs_mut() { + store_db.data_source = Some(re_smart_channel::SmartChannelSource::Files { + paths: vec![path.into()], + }); + } + Some(rrd) } Err(err) => { let msg = format!("Failed loading {path:?}: {}", re_error::format(&err)); @@ -2195,14 +2162,16 @@ fn load_file_path(path: &std::path::Path) -> Option { } #[must_use] -fn load_file_contents(name: &str, read: impl std::io::Read) -> Option { - match load_rrd_to_log_db(read) { - Ok(mut log_db) => { +fn load_file_contents(name: &str, read: impl std::io::Read) -> Option { + match StoreHub::from_rrd(read) { + Ok(mut rrd) => { re_log::info!("Loaded {name:?}"); - log_db.data_source = Some(re_smart_channel::SmartChannelSource::Files { - paths: vec![name.into()], - }); - Some(log_db) + for store_db in rrd.store_dbs_mut() { + store_db.data_source = Some(re_smart_channel::SmartChannelSource::Files { + paths: vec![name.into()], + }); + } + Some(rrd) } Err(err) => { let msg = format!("Failed loading {name:?}: {}", re_error::format(&err)); @@ -2217,19 +2186,19 @@ fn load_file_contents(name: &str, read: impl std::io::Read) -> Option { } fn recording_config_entry<'cfgs>( - configs: &'cfgs mut HashMap, - id: RecordingId, + configs: &'cfgs mut HashMap, + id: StoreId, data_source: &'_ re_smart_channel::SmartChannelSource, - log_db: &'_ LogDb, + store_db: &'_ StoreDb, ) -> &'cfgs mut RecordingConfig { configs .entry(id) - .or_insert_with(|| new_recording_confg(data_source, log_db)) + .or_insert_with(|| new_recording_confg(data_source, store_db)) } fn new_recording_confg( data_source: &'_ re_smart_channel::SmartChannelSource, - log_db: &'_ LogDb, + store_db: &'_ StoreDb, ) -> RecordingConfig { let play_state = match data_source { // Play files from the start by default - it feels nice and alive./ @@ -2248,7 +2217,7 @@ fn new_recording_confg( rec_cfg .time_ctrl - .set_play_state(log_db.times_per_timeline(), play_state); + .set_play_state(store_db.times_per_timeline(), play_state); rec_cfg } diff --git a/crates/re_viewer/src/lib.rs b/crates/re_viewer/src/lib.rs index 23bc4937ccdc..a110fd4fd988 100644 --- a/crates/re_viewer/src/lib.rs +++ b/crates/re_viewer/src/lib.rs @@ -10,6 +10,7 @@ pub mod env_vars; mod profiler; mod remote_viewer_app; mod screenshotter; +mod store_hub; mod ui; mod viewer_analytics; @@ -18,6 +19,7 @@ pub(crate) use ui::{memory_panel, selection_panel}; pub use app::{App, StartupOptions}; pub use remote_viewer_app::RemoteViewerApp; +pub use store_hub::StoreHub; pub mod external { pub use {eframe, egui}; @@ -81,18 +83,18 @@ pub enum AppEnvironment { } impl AppEnvironment { - pub fn from_recording_source(source: &re_log_types::RecordingSource) -> Self { - use re_log_types::RecordingSource; + pub fn from_store_source(source: &re_log_types::StoreSource) -> Self { + use re_log_types::StoreSource; match source { - RecordingSource::PythonSdk(python_version) => Self::PythonSdk(python_version.clone()), - RecordingSource::RustSdk { + StoreSource::PythonSdk(python_version) => Self::PythonSdk(python_version.clone()), + StoreSource::RustSdk { rustc_version: rust_version, llvm_version, } => Self::RustSdk { rustc_version: rust_version.clone(), llvm_version: llvm_version.clone(), }, - RecordingSource::Unknown | RecordingSource::Other(_) => Self::RustSdk { + StoreSource::Unknown | StoreSource::Other(_) => Self::RustSdk { rustc_version: "unknown".into(), llvm_version: "unknown".into(), }, diff --git a/crates/re_viewer/src/store_hub.rs b/crates/re_viewer/src/store_hub.rs new file mode 100644 index 000000000000..5e5c21142223 --- /dev/null +++ b/crates/re_viewer/src/store_hub.rs @@ -0,0 +1,151 @@ +use re_data_store::StoreDb; +use re_log_types::{StoreId, StoreKind}; + +/// Stores many [`StoreDb`]s of recordings and blueprints. +#[derive(Default)] +pub struct StoreHub { + // TODO(emilk): two separate maps per [`StoreKind`]. + store_dbs: ahash::HashMap, +} + +impl StoreHub { + /// Decode an rrd stream. + /// It can theoretically contain multiple recordings, and blueprints. + pub fn from_rrd(read: impl std::io::Read) -> anyhow::Result { + re_tracing::profile_function!(); + + let decoder = re_log_encoding::decoder::Decoder::new(read)?; + + let mut slf = Self::default(); + + for msg in decoder { + let msg = msg?; + slf.store_db_entry(msg.store_id()).add(&msg)?; + } + Ok(slf) + } + + /// Returns either a recording or blueprint [`StoreDb`]. + /// One is created if it doesn't already exist. + pub fn store_db_entry(&mut self, id: &StoreId) -> &mut StoreDb { + self.store_dbs + .entry(id.clone()) + .or_insert_with(|| StoreDb::new(id.clone())) + } + + /// All loaded [`StoreDb`], both recordings and blueprints, in arbitrary order. + pub fn store_dbs(&self) -> impl Iterator { + self.store_dbs.values() + } + + /// All loaded [`StoreDb`], both recordings and blueprints, in arbitrary order. + pub fn store_dbs_mut(&mut self) -> impl Iterator { + self.store_dbs.values_mut() + } + + pub fn append(&mut self, mut other: Self) { + for (id, store_db) in other.store_dbs.drain() { + self.store_dbs.insert(id, store_db); + } + } + + // -- + + pub fn contains_recording(&self, id: &StoreId) -> bool { + debug_assert_eq!(id.kind, StoreKind::Recording); + self.store_dbs.contains_key(id) + } + + pub fn recording(&self, id: &StoreId) -> Option<&StoreDb> { + debug_assert_eq!(id.kind, StoreKind::Recording); + self.store_dbs.get(id) + } + + pub fn recording_mut(&mut self, id: &StoreId) -> Option<&mut StoreDb> { + debug_assert_eq!(id.kind, StoreKind::Recording); + self.store_dbs.get_mut(id) + } + + /// Creates one if it doesn't exist. + pub fn recording_entry(&mut self, id: &StoreId) -> &mut StoreDb { + debug_assert_eq!(id.kind, StoreKind::Recording); + self.store_dbs + .entry(id.clone()) + .or_insert_with(|| StoreDb::new(id.clone())) + } + + pub fn insert_recording(&mut self, store_db: StoreDb) { + debug_assert_eq!(store_db.store_kind(), StoreKind::Recording); + self.store_dbs.insert(store_db.store_id().clone(), store_db); + } + + pub fn recordings(&self) -> impl Iterator { + self.store_dbs + .values() + .filter(|log| log.store_kind() == StoreKind::Recording) + } + + pub fn blueprints(&self) -> impl Iterator { + self.store_dbs + .values() + .filter(|log| log.store_kind() == StoreKind::Blueprint) + } + + // -- + + pub fn contains_blueprint(&self, id: &StoreId) -> bool { + debug_assert_eq!(id.kind, StoreKind::Blueprint); + self.store_dbs.contains_key(id) + } + + pub fn blueprint(&self, id: &StoreId) -> Option<&StoreDb> { + debug_assert_eq!(id.kind, StoreKind::Blueprint); + self.store_dbs.get(id) + } + + pub fn blueprint_mut(&mut self, id: &StoreId) -> Option<&mut StoreDb> { + debug_assert_eq!(id.kind, StoreKind::Blueprint); + self.store_dbs.get_mut(id) + } + + /// Creates one if it doesn't exist. + pub fn blueprint_entry(&mut self, id: &StoreId) -> &mut StoreDb { + debug_assert_eq!(id.kind, StoreKind::Blueprint); + + self.store_dbs.entry(id.clone()).or_insert_with(|| { + // TODO(jleibs): If the blueprint doesn't exist this probably means we are + // initializing a new default-blueprint for the application in question. + // Make sure it's marked as a blueprint. + + let mut blueprint_db = StoreDb::new(id.clone()); + + blueprint_db.add_begin_recording_msg(&re_log_types::SetStoreInfo { + row_id: re_log_types::RowId::random(), + info: re_log_types::StoreInfo { + application_id: id.as_str().into(), + store_id: id.clone(), + is_official_example: false, + started: re_log_types::Time::now(), + store_source: re_log_types::StoreSource::Other("viewer".to_owned()), + store_kind: StoreKind::Blueprint, + }, + }); + + blueprint_db + }) + } + + // -- + + pub fn purge_empty(&mut self) { + self.store_dbs.retain(|_, store_db| !store_db.is_empty()); + } + + pub fn purge_fraction_of_ram(&mut self, fraction_to_purge: f32) { + re_tracing::profile_function!(); + + for store_db in self.store_dbs.values_mut() { + store_db.purge_fraction_of_ram(fraction_to_purge); + } + } +} diff --git a/crates/re_viewer/src/ui/blueprint.rs b/crates/re_viewer/src/ui/blueprint.rs index 28e242386d58..fea78efb7392 100644 --- a/crates/re_viewer/src/ui/blueprint.rs +++ b/crates/re_viewer/src/ui/blueprint.rs @@ -32,7 +32,7 @@ impl Blueprint { ) { re_tracing::profile_function!(); - let spaces_info = SpaceInfoCollection::new(&ctx.log_db.entity_db); + let spaces_info = SpaceInfoCollection::new(&ctx.store_db.entity_db); self.viewport.on_frame_start(ctx, &spaces_info); diff --git a/crates/re_viewer/src/ui/blueprint_load.rs b/crates/re_viewer/src/ui/blueprint_load.rs index 1e4632590ac0..08d19b111a9d 100644 --- a/crates/re_viewer/src/ui/blueprint_load.rs +++ b/crates/re_viewer/src/ui/blueprint_load.rs @@ -14,7 +14,7 @@ use super::Blueprint; use crate::blueprint_components::panel::PanelState; impl Blueprint { - pub fn from_db(egui_ctx: &egui::Context, blueprint_db: &re_data_store::LogDb) -> Self { + pub fn from_db(egui_ctx: &egui::Context, blueprint_db: &re_data_store::StoreDb) -> Self { let mut ret = Self::new(egui_ctx); let space_views: HashMap = if let Some(space_views) = @@ -57,21 +57,21 @@ impl Blueprint { } } -fn load_panel_state(path: &EntityPath, blueprint_db: &re_data_store::LogDb) -> Option { +fn load_panel_state(path: &EntityPath, blueprint_db: &re_data_store::StoreDb) -> Option { query_timeless_single::(&blueprint_db.entity_db.data_store, path) .map(|p| p.expanded) } fn load_space_view( path: &EntityPath, - blueprint_db: &re_data_store::LogDb, + blueprint_db: &re_data_store::StoreDb, ) -> Option { query_timeless_single::(&blueprint_db.entity_db.data_store, path) .map(|c| c.space_view) } fn load_viewport( - blueprint_db: &re_data_store::LogDb, + blueprint_db: &re_data_store::StoreDb, space_views: HashMap, ) -> Viewport { let auto_space_views = query_timeless_single::( @@ -82,7 +82,7 @@ fn load_viewport( // Only enable auto-space-views if this is the app-default blueprint AutoSpaceViews( blueprint_db - .recording_info() + .store_info() .map_or(false, |ri| ri.is_app_default_blueprint()), ) }); diff --git a/crates/re_viewer/src/ui/blueprint_sync.rs b/crates/re_viewer/src/ui/blueprint_sync.rs index 41ea173f8497..d2a9edb8d116 100644 --- a/crates/re_viewer/src/ui/blueprint_sync.rs +++ b/crates/re_viewer/src/ui/blueprint_sync.rs @@ -15,7 +15,11 @@ use crate::blueprint_components::panel::PanelState; // Resolving and applying updates impl Blueprint { - pub fn sync_changes_to_store(&self, snapshot: &Self, blueprint_db: &mut re_data_store::LogDb) { + pub fn sync_changes_to_store( + &self, + snapshot: &Self, + blueprint_db: &mut re_data_store::StoreDb, + ) { // Update the panel states sync_panel_expanded( blueprint_db, @@ -55,7 +59,7 @@ impl Blueprint { } pub fn sync_panel_expanded( - blueprint_db: &mut re_data_store::LogDb, + blueprint_db: &mut re_data_store::StoreDb, panel_name: &str, expanded: bool, snapshot: bool, @@ -72,7 +76,7 @@ pub fn sync_panel_expanded( } pub fn sync_space_view( - blueprint_db: &mut re_data_store::LogDb, + blueprint_db: &mut re_data_store::StoreDb, space_view: &SpaceViewBlueprint, snapshot: Option<&SpaceViewBlueprint>, ) { @@ -94,7 +98,7 @@ pub fn sync_space_view( } } -pub fn clear_space_view(blueprint_db: &mut re_data_store::LogDb, space_view_id: &SpaceViewId) { +pub fn clear_space_view(blueprint_db: &mut re_data_store::StoreDb, space_view_id: &SpaceViewId) { let entity_path = EntityPath::from(format!( "{}/{}", SpaceViewComponent::SPACEVIEW_PREFIX, @@ -119,7 +123,7 @@ pub fn clear_space_view(blueprint_db: &mut re_data_store::LogDb, space_view_id: } pub fn sync_viewport( - blueprint_db: &mut re_data_store::LogDb, + blueprint_db: &mut re_data_store::StoreDb, viewport: &Viewport, snapshot: &Viewport, ) { diff --git a/crates/re_viewer/src/ui/memory_panel.rs b/crates/re_viewer/src/ui/memory_panel.rs index 9d8a42f12fbc..2ed7307aeef0 100644 --- a/crates/re_viewer/src/ui/memory_panel.rs +++ b/crates/re_viewer/src/ui/memory_panel.rs @@ -433,7 +433,7 @@ fn summarize_callstack(callstack: &str) -> String { ("App::receive_messages", "App::receive_messages"), ("w_store::store::ComponentBucket>::archive", "archive"), ("DataStore>::insert", "DataStore"), - ("LogDb", "LogDb"), + ("StoreDb", "StoreDb"), ("EntityDb", "EntityDb"), ("EntityTree", "EntityTree"), ("::LogMsg>::deserialize", "LogMsg"), diff --git a/crates/re_viewer/src/ui/selection_panel.rs b/crates/re_viewer/src/ui/selection_panel.rs index a80a1c0b8fca..4cfb0876ed20 100644 --- a/crates/re_viewer/src/ui/selection_panel.rs +++ b/crates/re_viewer/src/ui/selection_panel.rs @@ -450,7 +450,7 @@ fn pinhole_props_ui( entity_props: &mut EntityProperties, ) { let query = ctx.current_query(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; if store .query_latest_component::(entity_path, &query) .is_some() @@ -482,13 +482,13 @@ fn depth_props_ui( re_tracing::profile_function!(); let query = ctx.current_query(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; let tensor = store.query_latest_component::(entity_path, &query)?; if tensor.meaning != TensorDataMeaning::Depth { return Some(()); } let pinhole_ent_path = ctx - .log_db + .store_db .entity_db .data_store .query_latest_component_at_closest_ancestor::(entity_path, &query)? diff --git a/crates/re_viewer/src/viewer_analytics.rs b/crates/re_viewer/src/viewer_analytics.rs index 28ae10897c2f..c6ded4048da3 100644 --- a/crates/re_viewer/src/viewer_analytics.rs +++ b/crates/re_viewer/src/viewer_analytics.rs @@ -13,7 +13,7 @@ use re_analytics::{Analytics, Event, Property}; #[cfg(all(not(target_arch = "wasm32"), feature = "analytics"))] -use re_log_types::RecordingSource; +use re_log_types::StoreSource; pub struct ViewerAnalytics { // NOTE: Optional because it is possible to have the `analytics` feature flag enabled @@ -128,33 +128,37 @@ impl ViewerAnalytics { } /// When we have loaded the start of a new recording. - pub fn on_open_recording(&mut self, log_db: &re_data_store::LogDb) { - if let Some(rec_info) = log_db.recording_info() { + pub fn on_open_recording(&mut self, store_db: &re_data_store::StoreDb) { + if store_db.store_kind() != re_log_types::StoreKind::Recording { + return; + } + + if let Some(store_info) = store_db.store_info() { // We hash the application_id and recording_id unless this is an official example. // That's because we want to be able to track which are the popular examples, // but we don't want to collect actual application ids. self.register("application_id", { - let prop = Property::from(rec_info.application_id.0.clone()); - if rec_info.is_official_example { + let prop = Property::from(store_info.application_id.0.clone()); + if store_info.is_official_example { prop } else { prop.hashed() } }); self.register("recording_id", { - let prop = Property::from(rec_info.recording_id.to_string()); - if rec_info.is_official_example { + let prop = Property::from(store_info.store_id.to_string()); + if store_info.is_official_example { prop } else { prop.hashed() } }); - let recording_source = match &rec_info.recording_source { - RecordingSource::Unknown => "unknown".to_owned(), - RecordingSource::PythonSdk(_version) => "python_sdk".to_owned(), - RecordingSource::RustSdk { .. } => "rust_sdk".to_owned(), - RecordingSource::Other(other) => other.clone(), + let store_source = match &store_info.store_source { + StoreSource::Unknown => "unknown".to_owned(), + StoreSource::PythonSdk(_version) => "python_sdk".to_owned(), + StoreSource::RustSdk { .. } => "rust_sdk".to_owned(), + StoreSource::Other(other) => other.clone(), }; // If we happen to know the Python or Rust version used on the _recording machine_, @@ -162,26 +166,26 @@ impl ViewerAnalytics { // // The Python/Rust versions appearing in events always apply to the recording // environment, _not_ the environment in which the viewer is running! - if let RecordingSource::RustSdk { + if let StoreSource::RustSdk { rustc_version: rust_version, llvm_version, - } = &rec_info.recording_source + } = &store_info.store_source { self.register("rust_version", rust_version.to_string()); self.register("llvm_version", llvm_version.to_string()); self.deregister("python_version"); // can't be both! } - if let RecordingSource::PythonSdk(version) = &rec_info.recording_source { + if let StoreSource::PythonSdk(version) = &store_info.store_source { self.register("python_version", version.to_string()); self.deregister("rust_version"); // can't be both! self.deregister("llvm_version"); // can't be both! } - self.register("recording_source", recording_source); - self.register("is_official_example", rec_info.is_official_example); + self.register("store_source", store_source); + self.register("is_official_example", store_info.is_official_example); } - if let Some(data_source) = &log_db.data_source { + if let Some(data_source) = &store_db.data_source { let data_source = match data_source { re_smart_channel::SmartChannelSource::Files { .. } => "file", // .rrd, .png, .glb, … re_smart_channel::SmartChannelSource::RrdHttpStream { .. } => "http", @@ -210,5 +214,5 @@ impl ViewerAnalytics { ) { } #[allow(clippy::unused_self)] - pub fn on_open_recording(&mut self, _log_db: &re_data_store::LogDb) {} + pub fn on_open_recording(&mut self, _store_db: &re_data_store::StoreDb) {} } diff --git a/crates/re_viewer_context/src/annotations.rs b/crates/re_viewer_context/src/annotations.rs index fb730df476a9..945d4d9ff46d 100644 --- a/crates/re_viewer_context/src/annotations.rs +++ b/crates/re_viewer_context/src/annotations.rs @@ -130,7 +130,7 @@ impl AnnotationMap { let mut visited = IntSet::::default(); - let data_store = &ctx.log_db.entity_db.data_store; + let data_store = &ctx.store_db.entity_db.data_store; let latest_at_query = LatestAtQuery::new(scene_query.timeline, scene_query.latest_at); // This logic is borrowed from `iter_ancestor_meta_field`, but using the arrow-store instead diff --git a/crates/re_viewer_context/src/viewer_context.rs b/crates/re_viewer_context/src/viewer_context.rs index 62b27d96a486..50e73d1a93d0 100644 --- a/crates/re_viewer_context/src/viewer_context.rs +++ b/crates/re_viewer_context/src/viewer_context.rs @@ -1,4 +1,4 @@ -use re_data_store::log_db::LogDb; +use re_data_store::store_db::StoreDb; use crate::{ AppOptions, Caches, ComponentUiRegistry, Item, ItemCollection, SelectionState, @@ -22,9 +22,9 @@ pub struct ViewerContext<'a> { pub space_view_class_registry: &'a SpaceViewClassRegistry, /// The current recording. - pub log_db: &'a LogDb, + pub store_db: &'a StoreDb, - /// UI config for the current recording (found in [`LogDb`]). + /// UI config for the current recording (found in [`StoreDb`]). pub rec_cfg: &'a mut RecordingConfig, /// The look and feel of the UI. @@ -73,7 +73,7 @@ impl<'a> ViewerContext<'a> { // ---------------------------------------------------------------------------- -/// UI config for the current recording (found in [`LogDb`]). +/// UI config for the current recording (found in [`StoreDb`]). #[derive(Default, serde::Deserialize, serde::Serialize)] #[serde(default)] pub struct RecordingConfig { diff --git a/crates/re_viewport/src/space_info.rs b/crates/re_viewport/src/space_info.rs index 68503a791a5c..fab17160ffb9 100644 --- a/crates/re_viewport/src/space_info.rs +++ b/crates/re_viewport/src/space_info.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use re_arrow_store::{LatestAtQuery, TimeInt, Timeline}; use re_components::{DisconnectedSpace, Pinhole, Transform3D}; -use re_data_store::{log_db::EntityDb, EntityPath, EntityTree}; +use re_data_store::{store_db::EntityDb, EntityPath, EntityTree}; use re_space_view_spatial::UnreachableTransform; /// Transform connecting two space paths. diff --git a/crates/re_viewport/src/space_view.rs b/crates/re_viewport/src/space_view.rs index 9b76c04e6db0..595bbff8f2c1 100644 --- a/crates/re_viewport/src/space_view.rs +++ b/crates/re_viewport/src/space_view.rs @@ -239,7 +239,7 @@ impl SpaceViewBlueprint { ViewCategory::Spatial => { let transforms = TransformCache::determine_transforms( - &ctx.log_db.entity_db, + &ctx.store_db.entity_db, &ctx.rec_cfg.time_ctrl, &self.space_path, self.data_blueprint.data_blueprints_projected(), @@ -288,14 +288,14 @@ impl SpaceViewBlueprint { &mut self, tree: &EntityTree, spaces_info: &SpaceInfoCollection, - log_db: &re_data_store::LogDb, + store_db: &re_data_store::StoreDb, ) { re_tracing::profile_function!(); let mut entities = Vec::new(); tree.visit_children_recursively(&mut |entity_path: &EntityPath| { let entity_categories = - categorize_entity_path(Timeline::log_time(), log_db, entity_path); + categorize_entity_path(Timeline::log_time(), store_db, entity_path); if entity_categories.contains(self.category) && !self.data_blueprint.contains_entity(entity_path) diff --git a/crates/re_viewport/src/space_view_entity_picker.rs b/crates/re_viewport/src/space_view_entity_picker.rs index c9831fce4d68..e9e143a95f48 100644 --- a/crates/re_viewport/src/space_view_entity_picker.rs +++ b/crates/re_viewport/src/space_view_entity_picker.rs @@ -84,8 +84,8 @@ fn add_entities_ui( ui: &mut egui::Ui, space_view: &mut SpaceViewBlueprint, ) { - let spaces_info = SpaceInfoCollection::new(&ctx.log_db.entity_db); - let tree = &ctx.log_db.entity_db.tree; + let spaces_info = SpaceInfoCollection::new(&ctx.store_db.entity_db); + let tree = &ctx.store_db.entity_db.tree; let entities_add_info = create_entity_add_info(ctx, tree, space_view, &spaces_info); add_entities_tree_ui( @@ -215,7 +215,7 @@ fn add_entities_line_ui( |ui| { let response = ctx.re_ui.small_icon_button(ui, &re_ui::icons::ADD); if response.clicked() { - space_view.add_entity_subtree(entity_tree, spaces_info, ctx.log_db); + space_view.add_entity_subtree(entity_tree, spaces_info, ctx.store_db); } if add_info @@ -307,7 +307,7 @@ fn create_entity_add_info( let mut meta_data: IntMap = IntMap::default(); tree.visit_children_recursively(&mut |entity_path| { - let categories = categorize_entity_path(Timeline::log_time(), ctx.log_db, entity_path); + let categories = categorize_entity_path(Timeline::log_time(), ctx.store_db, entity_path); let can_add: CanAddToSpaceView = if categories.contains(space_view.category) { match spaces_info.is_reachable_by_transform(entity_path, &space_view.space_path) { Ok(()) => CanAddToSpaceView::Compatible { diff --git a/crates/re_viewport/src/space_view_heuristics.rs b/crates/re_viewport/src/space_view_heuristics.rs index 72250d26c97e..0e129bfd2cf2 100644 --- a/crates/re_viewport/src/space_view_heuristics.rs +++ b/crates/re_viewport/src/space_view_heuristics.rs @@ -24,7 +24,7 @@ pub fn all_possible_space_views( // Everything with a SpaceInfo is a candidate (that is root + whenever there is a transform), // as well as all direct descendants of the root. - let root_children = &ctx.log_db.entity_db.tree.children; + let root_children = &ctx.store_db.entity_db.tree.children; let candidate_space_paths = spaces_info .iter() .map(|info| &info.path) @@ -125,7 +125,7 @@ pub fn default_created_space_views( spaces_info: &SpaceInfoCollection, ) -> Vec { let candidates = all_possible_space_views(ctx, spaces_info); - default_created_space_views_from_candidates(&ctx.log_db.entity_db.data_store, candidates) + default_created_space_views_from_candidates(&ctx.store_db.entity_db.data_store, candidates) } fn default_created_space_views_from_candidates( @@ -301,8 +301,8 @@ pub fn default_queried_entities( re_tracing::profile_function!(); let timeline = Timeline::log_time(); - let log_db = &ctx.log_db; - let data_store = &log_db.entity_db.data_store; + let store_db = &ctx.store_db; + let data_store = &store_db.entity_db.data_store; let mut entities = Vec::new(); let space_info = spaces_info.get_first_parent_with_info(space_path); @@ -314,7 +314,8 @@ pub fn default_queried_entities( .iter() .filter(|entity_path| { is_default_added_to_space_view(entity_path, space_path, data_store, timeline) - && categorize_entity_path(timeline, log_db, entity_path).contains(category) + && categorize_entity_path(timeline, store_db, entity_path) + .contains(category) }) .cloned(), ); @@ -332,8 +333,8 @@ fn default_queried_entities_by_category( re_tracing::profile_function!(); let timeline = Timeline::log_time(); - let log_db = &ctx.log_db; - let data_store = &log_db.entity_db.data_store; + let store_db = &ctx.store_db; + let data_store = &store_db.entity_db.data_store; let mut groups: BTreeMap> = BTreeMap::default(); let space_info = space_info_collection.get_first_parent_with_info(space_path); @@ -343,7 +344,7 @@ fn default_queried_entities_by_category( &mut |space_info| { for entity_path in &space_info.descendants_without_transform { if is_default_added_to_space_view(entity_path, space_path, data_store, timeline) { - for category in categorize_entity_path(timeline, log_db, entity_path) { + for category in categorize_entity_path(timeline, store_db, entity_path) { groups .entry(category) .or_default() diff --git a/crates/re_viewport/src/view_bar_chart/scene.rs b/crates/re_viewport/src/view_bar_chart/scene.rs index 7cf485fc9838..558445dc5ac7 100644 --- a/crates/re_viewport/src/view_bar_chart/scene.rs +++ b/crates/re_viewport/src/view_bar_chart/scene.rs @@ -21,7 +21,7 @@ impl SceneBarChart { fn load_tensors(&mut self, ctx: &mut ViewerContext<'_>, query: &SceneQuery<'_>) { re_tracing::profile_function!(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for (ent_path, props) in query.iter_entities() { if !props.visible { diff --git a/crates/re_viewport/src/view_category.rs b/crates/re_viewport/src/view_category.rs index 9256da24558c..037518efc437 100644 --- a/crates/re_viewport/src/view_category.rs +++ b/crates/re_viewport/src/view_category.rs @@ -3,7 +3,7 @@ use re_components::{ Arrow3D, Box3D, Component as _, LineStrip2D, LineStrip3D, Mesh3D, Pinhole, Point2D, Point3D, Rect2D, Scalar, Tensor, TextBox, TextEntry, Transform3D, }; -use re_data_store::{EntityPath, LogDb, Timeline}; +use re_data_store::{EntityPath, StoreDb, Timeline}; #[derive( Debug, Default, PartialOrd, Ord, enumset::EnumSetType, serde::Deserialize, serde::Serialize, @@ -64,14 +64,14 @@ pub type ViewCategorySet = enumset::EnumSet; pub fn categorize_entity_path( timeline: Timeline, - log_db: &LogDb, + store_db: &StoreDb, entity_path: &EntityPath, ) -> ViewCategorySet { re_tracing::profile_function!(); let mut set = ViewCategorySet::default(); - for component in log_db + for component in store_db .entity_db .data_store .all_components(&timeline, entity_path) @@ -98,7 +98,7 @@ pub fn categorize_entity_path( } else if component == Tensor::name() { let timeline_query = LatestAtQuery::new(timeline, TimeInt::MAX); - let store = &log_db.entity_db.data_store; + let store = &store_db.entity_db.data_store; if let Some(tensor) = store.query_latest_component::(entity_path, &timeline_query) { diff --git a/crates/re_viewport/src/view_tensor/scene.rs b/crates/re_viewport/src/view_tensor/scene.rs index 7b2cd303800b..0a1f7b61ce97 100644 --- a/crates/re_viewport/src/view_tensor/scene.rs +++ b/crates/re_viewport/src/view_tensor/scene.rs @@ -14,7 +14,7 @@ impl SceneTensor { pub(crate) fn load(&mut self, ctx: &mut ViewerContext<'_>, query: &SceneQuery<'_>) { re_tracing::profile_function!(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for (ent_path, props) in query.iter_entities() { let timeline_query = LatestAtQuery::new(query.timeline, query.latest_at); diff --git a/crates/re_viewport/src/view_time_series/scene.rs b/crates/re_viewport/src/view_time_series/scene.rs index d26f51841b81..6fe5e4e48f3f 100644 --- a/crates/re_viewport/src/view_time_series/scene.rs +++ b/crates/re_viewport/src/view_time_series/scene.rs @@ -73,7 +73,7 @@ impl SceneTimeSeries { fn load_scalars(&mut self, ctx: &mut ViewerContext<'_>, query: &SceneQuery<'_>) { re_tracing::profile_function!(); - let store = &ctx.log_db.entity_db.data_store; + let store = &ctx.store_db.entity_db.data_store; for entity_path in query.entity_paths { let ent_path = entity_path; diff --git a/crates/rerun/src/clap.rs b/crates/rerun/src/clap.rs index 19b4ccdfbb07..e2e101577802 100644 --- a/crates/rerun/src/clap.rs +++ b/crates/rerun/src/clap.rs @@ -95,7 +95,7 @@ impl RerunArgs { }; let _tokio_runtime_guard = tokio_runtime_handle.enter(); - let (rerun_enabled, recording_info, batcher_config) = + let (rerun_enabled, store_info, batcher_config) = crate::RecordingStreamBuilder::new(application_id) .default_enabled(default_enabled) .into_args(); @@ -123,12 +123,12 @@ impl RerunArgs { #[cfg(feature = "native_viewer")] RerunBehavior::Spawn => { - crate::native_viewer::spawn(recording_info, batcher_config, run)?; + crate::native_viewer::spawn(store_info, batcher_config, run)?; return Ok(()); } }; - let rec_stream = RecordingStream::new(recording_info, batcher_config, sink)?; + let rec_stream = RecordingStream::new(store_info, batcher_config, sink)?; run(rec_stream.clone()); // The user callback is done executing, it's a good opportunity to flush the pipeline diff --git a/crates/rerun/src/native_viewer.rs b/crates/rerun/src/native_viewer.rs index fc214f37a2bb..e9922ced7bbf 100644 --- a/crates/rerun/src/native_viewer.rs +++ b/crates/rerun/src/native_viewer.rs @@ -1,5 +1,5 @@ use re_log_types::LogMsg; -use re_log_types::RecordingInfo; +use re_log_types::StoreInfo; use re_sdk::RecordingStream; /// Starts a Rerun viewer on the current thread and migrates the given callback, along with @@ -15,7 +15,7 @@ use re_sdk::RecordingStream; /// their UI runs on the main thread! ⚠️ #[cfg(not(target_arch = "wasm32"))] pub fn spawn( - recording_info: RecordingInfo, + store_info: StoreInfo, batcher_config: re_log_types::DataTableBatcherConfig, run: F, ) -> re_viewer::external::eframe::Result<()> @@ -27,11 +27,10 @@ where re_smart_channel::SmartChannelSource::Sdk, ); let sink = Box::new(NativeViewerSink(tx)); - let app_env = - re_viewer::AppEnvironment::from_recording_source(&recording_info.recording_source); + let app_env = re_viewer::AppEnvironment::from_store_source(&store_info.store_source); let rec_stream = - RecordingStream::new(recording_info, batcher_config, sink).expect("Failed to spawn thread"); + RecordingStream::new(store_info, batcher_config, sink).expect("Failed to spawn thread"); // NOTE: Forget the handle on purpose, leave that thread be. std::thread::Builder::new() @@ -67,7 +66,7 @@ pub fn show(msgs: Vec) -> re_viewer::external::eframe::Result<()> { return Ok(()); } - let recording_source = re_log_types::RecordingSource::RustSdk { + let store_source = re_log_types::StoreSource::RustSdk { rustc_version: env!("RE_BUILD_RUSTC_VERSION").into(), llvm_version: env!("RE_BUILD_LLVM_VERSION").into(), }; @@ -75,7 +74,7 @@ pub fn show(msgs: Vec) -> re_viewer::external::eframe::Result<()> { let startup_options = re_viewer::StartupOptions::default(); re_viewer::run_native_viewer_with_messages( re_build_info::build_info!(), - re_viewer::AppEnvironment::from_recording_source(&recording_source), + re_viewer::AppEnvironment::from_store_source(&store_source), startup_options, msgs, ) diff --git a/crates/rerun/src/run.rs b/crates/rerun/src/run.rs index 1f410cc1a9c7..50d3c86dd58a 100644 --- a/crates/rerun/src/run.rs +++ b/crates/rerun/src/run.rs @@ -350,9 +350,9 @@ async fn run_impl( paths: vec![path.clone()], }, ); - let recording_id = - re_log_types::RecordingId::random(re_log_types::RecordingType::Data); - load_file_to_channel_at(recording_id, &path, tx) + let store_id = + re_log_types::StoreId::random(re_log_types::StoreKind::Recording); + load_file_to_channel_at(store_id, &path, tx) .with_context(|| format!("{path:?}"))?; rx } @@ -416,15 +416,15 @@ async fn run_impl( }, ); - let recording_id = re_log_types::RecordingId::random(re_log_types::RecordingType::Data); + let store_id = re_log_types::StoreId::random(re_log_types::StoreKind::Recording); // Load the files in parallel, and log errors. // Failing to log one out of many files is not a big deal. for path in paths { let tx = tx.clone_as(re_smart_channel::SmartMessageSource::File(path.clone())); - let recording_id = recording_id.clone(); + let store_id = store_id.clone(); rayon::spawn(move || { - if let Err(err) = load_file_to_channel_at(recording_id, &path, tx) { + if let Err(err) = load_file_to_channel_at(store_id, &path, tx) { re_log::error!("Failed to load {path:?}: {err}"); } }); @@ -450,7 +450,7 @@ async fn run_impl( // Now what do we do with the data? if args.test_receive { - assert_receive_into_log_db(&rx).map(|_db| ()) + assert_receive_into_store_db(&rx).map(|_db| ()) } else if let Some(rrd_path) = args.save { Ok(stream_to_rrd(&rx, &rrd_path.into())?) } else if args.web_viewer { @@ -534,12 +534,12 @@ fn parse_size(size: &str) -> anyhow::Result<[f32; 2]> { } // NOTE: This is only used as part of end-to-end tests. -fn assert_receive_into_log_db(rx: &Receiver) -> anyhow::Result { +fn assert_receive_into_store_db(rx: &Receiver) -> anyhow::Result { use re_smart_channel::RecvTimeoutError; - re_log::info!("Receiving messages into a LogDb…"); + re_log::info!("Receiving messages into a StoreDb…"); - let mut db: Option = None; + let mut db: Option = None; let mut num_messages = 0; @@ -553,7 +553,7 @@ fn assert_receive_into_log_db(rx: &Receiver) -> anyhow::Result { let mut_db = db.get_or_insert_with(|| { - re_data_store::LogDb::new(msg.recording_id().clone()) + re_data_store::StoreDb::new(msg.store_id().clone()) }); mut_db.add(&msg)?; @@ -568,7 +568,7 @@ fn assert_receive_into_log_db(rx: &Receiver) -> anyhow::Result, ) -> Result<(), anyhow::Error> { @@ -791,7 +791,7 @@ fn load_file_to_channel_at( } else { #[cfg(feature = "sdk")] { - let log_msg = re_sdk::MsgSender::from_file_path(path)?.into_log_msg(recording_id)?; + let log_msg = re_sdk::MsgSender::from_file_path(path)?.into_log_msg(store_id)?; tx.send(log_msg).ok(); // .ok(): we may be running in a background thread, so who knows if the receiver is still open tx.quit(None).ok(); Ok(()) @@ -799,7 +799,7 @@ fn load_file_to_channel_at( #[cfg(not(feature = "sdk"))] { - _ = recording_id; + _ = store_id; anyhow::bail!("Unsupported file extension: '{extension}' for path {path:?}. Try enabling the 'sdk' feature of 'rerun'."); } } diff --git a/design/batching.md b/design/batching.md index ee3731c6d19f..be45ae74c7b7 100644 --- a/design/batching.md +++ b/design/batching.md @@ -91,7 +91,7 @@ The data goes through several distinct stages during its lifetime: At present, the client is limited to creating a single event at a time, corresponding to a single row of data. Each row contains N components, each of which can hold M instances for a given entity across P timelines. -To begin the process, the SDK creates a `ComponentBundle`, which can be thought of as a data cell within a dataframe. This `ComponentBundle` is essentially a list of values for a specific component type. Keep in mind we only ever work with lists, rather than individual values. +To begin the process, the SDK creates a `ComponentBundle`, which can be thought of as a data cell within a dataframe. This `ComponentBundle` is essentially a list of values for a specific component type. Keep in mind we only ever work with lists, rather than individual values. From this point forward, the individual values in these lists are referred to as "component instances" (:warning: "component instances" != "instance keys"). ```rust @@ -114,13 +114,13 @@ pub struct MsgBundle { pub components: Vec, } ``` -which corresponds to _1 event_, i.e. 1 row's worth of data for 1 entity in N timelines. +which corresponds to _1 event_, i.e. 1 row's worth of data for 1 entity in N timelines. This event is uniquely identified with a `MsgId`, which is a `TUID` under the hood (wall-clock UID). -The number of component instances for all columns, or components, in a given row is determined by examining the number of instances for the first entry in the `components` list. However, this approach has a significant flaw: all components must have the same number of instances. +The number of component instances for all columns, or components, in a given row is determined by examining the number of instances for the first entry in the `components` list. However, this approach has a significant flaw: all components must have the same number of instances. This requirement creates a situation where splats, with a single instance, and clears, with no instances, must be sent in separate `MsgBundle`s. -As part of packing the `MsgBundle`, we convert the `MsgId` itself into a `ComponentBundle` by cloning it as many times as necessary to match the number of instances. We do this because we need the `MsgId` information later for garbage collection purposes. +As part of packing the `MsgBundle`, we convert the `MsgId` itself into a `ComponentBundle` by cloning it as many times as necessary to match the number of instances. We do this because we need the `MsgId` information later for garbage collection purposes. However, this approach presents a challenge for clears, which have zero instances. As a result, messages that contain zero instances cannot be garbage collected as of today. ### Transport @@ -220,8 +220,8 @@ There are several important points to note: ### Storage The data is actually stored in two places: -- in `LogDb`, where every raw `LogMsg` is kept around so that it can later be saved to disk, - - Due to the lack of batching, the size of the data sitting in `LogDb` is actually completely dwarfed by the size of the schema metadata. +- in `StoreDb`, where every raw `LogMsg` is kept around so that it can later be saved to disk, + - Due to the lack of batching, the size of the data sitting in `StoreDb` is actually completely dwarfed by the size of the schema metadata. - in the `DataStore`, where the data is stripped down into parts and indexed as needed for our latest-at semantics. - That's the origin of the `MsgId` mismatch problem. @@ -273,7 +273,7 @@ IndexTable { ``` Note that, although the tables are bucketed, garbage collection of indices is actually entirely disabled today because the whole GC story is broken (see below). -Components are stored on a per-component basis: i.e. all timelines and all entities share the same component storage. +Components are stored on a per-component basis: i.e. all timelines and all entities share the same component storage. Like indices, they are split further into buckets (using both space and time thresholds), once again to facilitate garbage collection: ``` ComponentTable { @@ -312,11 +312,11 @@ ComponentTable { ``` (The space thresholds don't actually work today due to the hacks we do in the GC implementation to work around `MsgId` mismatches) -Storing data in both `LogDb` and the component tables can lead to a significant increase in memory usage if not managed carefully, effectively doubling the storage requirements. Therefore, bucket compaction is currently disabled, leaving some performance on the table. +Storing data in both `StoreDb` and the component tables can lead to a significant increase in memory usage if not managed carefully, effectively doubling the storage requirements. Therefore, bucket compaction is currently disabled, leaving some performance on the table. Overall, this storage architecture maps well to our latest-at query semantics, but quite poorly to our range/timeseries semantics (see read path section below). -The index buckets in the `DataStore` hold references to specific rows in the component tables, where the actual data is stored. +The index buckets in the `DataStore` hold references to specific rows in the component tables, where the actual data is stored. At first, this may seem reasonable, but it's not the most efficient approach: Arrow data is already reference counted, so we're essentially referencing a set of references. This leads to a significant and expensive issue on the read path, particularly for range queries, as discussed below. ### Write path @@ -325,7 +325,7 @@ The write path is fairly straightforward, with some complications arising from h First, each component (i.e., column) is inserted into the currently active component table, which generates a set of globally unique and stable row numbers. -Next, we retrieve or create the appropriate index based on the `EntityPath` and `Timeline` parameters. Using binary search, we locate the correct bucket and insert the row numbers. +Next, we retrieve or create the appropriate index based on the `EntityPath` and `Timeline` parameters. Using binary search, we locate the correct bucket and insert the row numbers. That's also when bucket splitting happen, which is its own can of worms, but is completely orthogonal to batching concerns. We also maintain an additional index that maps `MsgId`s to timepoints, which is crucial for multi-timeline views like the text view. @@ -346,14 +346,14 @@ This second subtlety has important implications. To actually retrieve the data, While range queries have some surprisingly tricky semantics (especially around the intersection of timeless and temporal data), operationally they behave pretty much like latest-at queries: grabbing the right index, binsearching for the right bucket, and starting iteration from there. -However, the fact that we return row numbers instead of the actual data itself can have significant performance implications when it comes to range queries. +However, the fact that we return row numbers instead of the actual data itself can have significant performance implications when it comes to range queries. For example, if you need to iterate through 100k values, you would need to run 100k `get` requests, which would require 100k binsearches in the component tables. This can be extremely costly and is a major reason why our ranged query scenes quickly become unusable as the dataset grows. ### Garbage Collection The current garbage collection mechanism was put together as a quick fix for the `MsgId`-mismatch issue, and it is largely unreliable. -The algorithm works as follows: it finds the oldest component bucket based on the insertion order from the datastore, which doesn't make much semantic sense, and drops it. Then, it drops all component buckets that roughly cover the same time range. Finally, it returns all the `MsgId`s to the viewer so that it can in turn clear its own data structures. +The algorithm works as follows: it finds the oldest component bucket based on the insertion order from the datastore, which doesn't make much semantic sense, and drops it. Then, it drops all component buckets that roughly cover the same time range. Finally, it returns all the `MsgId`s to the viewer so that it can in turn clear its own data structures. This process is repeated in a loop until a sufficient amount of data has been dropped. Beyond these hacks, the logic in and of itself is fundamentally broken right now. Consider the following log calls: @@ -381,7 +381,7 @@ This happens because the GC blindly drops data rather than doing the correct thi ### Save-to-disk -The current store cannot be dumped to disk, we rely on `LogDb` to store all incoming `LogMsg`s and dump them to disk as-is if the user decides to save the recording. +The current store cannot be dumped to disk, we rely on `StoreDb` to store all incoming `LogMsg`s and dump them to disk as-is if the user decides to save the recording. --- @@ -391,16 +391,16 @@ The proposed design involves significant changes at every stage of the data life ### Creation -The main difference is of course that the client can now accumulate events (i.e., rows) in a local table before sending them to the server. +The main difference is of course that the client can now accumulate events (i.e., rows) in a local table before sending them to the server. In practice this process of accumulation is handled behind the scenes by the SDK, and driven by both time and space thresholds ("accumulate at most 10MiB of raw data for no more than 50ms"). -To reflect the fact that we're passing tables of data around, I suggest we update the terminology. -The current terms `ComponentBundle` and `MsgBundle` are vague, so let's use more descriptive terms instead: +To reflect the fact that we're passing tables of data around, I suggest we update the terminology. +The current terms `ComponentBundle` and `MsgBundle` are vague, so let's use more descriptive terms instead: * `DataCell`: a uniform list of values for a given component type from a single log call. * `DataRow`: an event, a list of cells associated with an event ID, entity path, timepoint, and number of instances. Corresponds to a single SDK log call. * `DataTable`: a batch; a list of rows associated with a batch ID. -Juggling between native and arrow data interchangeably can be a cumbersome task in our current implementation. While we have some helper functions to facilitate this, the process is not as smooth as it could be. +Juggling between native and arrow data interchangeably can be a cumbersome task in our current implementation. While we have some helper functions to facilitate this, the process is not as smooth as it could be. This is partly due to limitations in `arrow2-convert`, but also because some of our APIs are simply not optimized for this use case (yet). So, considering all the reasons above, here are all the new types involved. @@ -554,7 +554,7 @@ The SDK accumulates cells into rows into tables until either the space or time t Note that `DataCell`, `DataRow`, `DataTable` are all temporary constructs to help with the creation of data batches, they are not what gets sent over the wire (although `DataCell` pre-serializes its data as it is much more convenient to erase component data before passing it around). -Only when a `DataTable` gets transformed into a `ArrowMsg` does serialization actually happen. +Only when a `DataTable` gets transformed into a `ArrowMsg` does serialization actually happen. `ArrowMsg` is what gets sent over the wire. ### Transport @@ -667,7 +667,7 @@ Schema { }, ], metadata: { - "rerun.batch_id": "", + "rerun.batch_id": "", }, } ``` @@ -682,12 +682,12 @@ At this point we might want to sort the batch by `(event_id, entity_path)`, whic That's also an opportunity to pre-compact the data: if two rows share the same timepoints with different components, we could potentially merge them together... that's a bit more controversial though as it means either dropping some `EventId`s, or supporting multiple `EventId`s for a single event. -One last thing that needs to be taken care of before actually sending the data is compression / dictionary-encoding of some kind. +One last thing that needs to be taken care of before actually sending the data is compression / dictionary-encoding of some kind. We already have `zstd` in place for that. ### Storage -One of the major change storage-wise is the complete removal of component tables: index tables now reference the arrow data directly. +One of the major change storage-wise is the complete removal of component tables: index tables now reference the arrow data directly. With the new design, the arrow buffers now store multiple rows of data. To reference a specific row, each index row must point to _a unit-length slice_ in a shared batch of arrow data. That is the reason why sorting the batch on the client's end improves performance: it improves data locality in the store by making the shared batches follow the layout of the final buckets more closely. @@ -746,7 +746,7 @@ Worth noticing: In addition to storing the indices themselves, we also require a bunch of auxiliary datastructures. First, we need to keep track of all `EventId`s currently present in the store, in `event_id` order (remember, these are time-based (clients' wall-clocks)!). -This will replace the existing `chronological_msg_ids` in `LogDb` (which is currently in insertion-order-as-seen-from-the-viewer, which isn't too great). +This will replace the existing `chronological_msg_ids` in `StoreDb` (which is currently in insertion-order-as-seen-from-the-viewer, which isn't too great). We need this because some operations like GC and save-to-disk require to pick an arbitrary ordering to get going, and `event_id` is our best bet for now. Second, we need to map `EventId`s to index rows for GC purposes: `HashMap>`. @@ -821,57 +821,57 @@ This will make the resulting .rrd file both faster to load (there's some fixed o A _lot_ of things are gonna change, so we really want A) to avoid crazy large PRs that are a pain to review and B) to be able detect regressions (both correctness and performance) early on so they don't turn into long & painful investigations. -Nothing set in stone obviously, but the following steps seem like a good start (roughly 1 step == 1 PR). +Nothing set in stone obviously, but the following steps seem like a good start (roughly 1 step == 1 PR). This entire section can pretty much be used verbatim as a tracking issue. -1. Implement all the needed tests & benchmarks -We need to be able to check for regressions at every step, so make sure we have all the tests and benchmarks we need for that. +1. Implement all the needed tests & benchmarks +We need to be able to check for regressions at every step, so make sure we have all the tests and benchmarks we need for that. We should already be 95% of the way there at this point. -1. Move `DataStore` sanity checks and formatting tools to separate files +1. Move `DataStore` sanity checks and formatting tools to separate files `store.rs` is supposed to be the place where one can get an overview of all the datastructures involved in the store, except it has slowly become a mess over time and is now pretty much unreadable. -1. Replace `MsgBundle` & `ComponentBundle` with the new types (`DataCell`, `DataRow`, `DataTable`, `EventId`, `BatchId`...) +1. Replace `MsgBundle` & `ComponentBundle` with the new types (`DataCell`, `DataRow`, `DataTable`, `EventId`, `BatchId`...) No actual batching features nor any kind of behavior changes of any sort: just define the new types and use them everywhere. -1. Pass entity path as a column rather than as metadata +1. Pass entity path as a column rather than as metadata Replace the current entity_path that is passed in the metadata map with an actual column instead. This will also requires us to make `EntityPath` a proper arrow datatype (..datatype, not component!!). -1. Implement explicit number of instances +1. Implement explicit number of instances Introduce a new column for `num_instances`, integrate it in the store index and expose it in the store APIs. -1. Fix splats all around (rs sdk, py sdk, re_query...) +1. Fix splats all around (rs sdk, py sdk, re_query...) Update the SDKs and `re_query` to properly make use of the new explicit `num_instances`. -1. Get rid of component buckets altogether +1. Get rid of component buckets altogether Update the store implementation to remove component tables, remove the `get` APIs, introduce slicing on the write path, etc. Still no batching in sight! -1. Implement the coalescing/accumulation logic in the SDK +1. Implement the coalescing/accumulation logic in the SDK Add the required logic/thread/timers/whatever-else in the SDKs to accumulate data and just send it all as many `LogMsg`s (i.e. no batching yet). -1. Implement full-on batching +1. Implement full-on batching End-to-end: transport, storage, the whele shebang. -1. Sort the batch before sending (`(event_id, entity_path)`) +1. Sort the batch before sending (`(event_id, entity_path)`) Keep that in its own PR to keep track of the benchmarks. -1. Implement new GC +1. Implement new GC The complete implementation; should close all existing GC issues. -1. Dump directly from the store into an rrd file +1. Dump directly from the store into an rrd file No rebatching yet, just dump every event in its own `LogMsg`. -1. Remove `LogMsg`s from `LogDb` -We shouldn't need to keep track of events outside the store past this point: clean it all up. +1. Remove `LogMsg`s from `StoreDb` +We shouldn't need to keep track of events outside the store past this point: clean it all up. Reminder: the timeline widget keeps track of timepoints directly, not events. -1. Rebatch aggressively while dumping to disk +1. Rebatch aggressively while dumping to disk -1. Use arrow extension types to carry around component names +1. Use arrow extension types to carry around component names -1. Drop `log_time` -We currently store the logging time twice: once in the `MsgId` (soon `EventId`) and once injected by the SDK (and they don't even match!). -We could just not inject the `log_time`, and instead derive a `log_time` column on the server using the timestamp in the `EventId`; especially since we probably want an auto-derived `ingestion_time` anyway. +1. Drop `log_time` +We currently store the logging time twice: once in the `MsgId` (soon `EventId`) and once injected by the SDK (and they don't even match!). +We could just not inject the `log_time`, and instead derive a `log_time` column on the server using the timestamp in the `EventId`; especially since we probably want an auto-derived `ingestion_time` anyway. The timestamp in `EventId` is not going away: it is what defines our global ordering! - Turn all of the above into a tracking issue @@ -892,14 +892,14 @@ Basically aggressive rebatching on the loaded data. ### Dedicated storage for timeseries -While our store design nicely matches latest-at semantics, it's pretty horrible when it comes to range/timeseries-like semantics. +While our store design nicely matches latest-at semantics, it's pretty horrible when it comes to range/timeseries-like semantics. It gets even worse for timeseries of simple scalars. At some point we're gonna want to have a fully dedicated storage & query path for scalar timeseries. ### Recursive clears native to the store -Recursive clears are currently handled in `LogDb`, which is an issue for (at least) two reasons: +Recursive clears are currently handled in `StoreDb`, which is an issue for (at least) two reasons: - Once we start saving the store in a native format, rather than a collection of `LogMsg`, we'll lose the recursive clears when dumping then reloading the recording. - The recursive clears aren't even arrow-ified yet. @@ -946,7 +946,7 @@ When garbage collecting, we _have to_ keep track of the compacted latest-at stat ### Optimize linear backwards walk -Although this has been fixed in the trivial case (the component is not present at all), this can still be an issue in others. +Although this has been fixed in the trivial case (the component is not present at all), this can still be an issue in others. The classic solution is some kind of bitmap index. ### Drop-after semantics (undo/redo) @@ -981,7 +981,7 @@ Don't waste compute & memory on creating large `arrow::Chunk`s from cells, inste > It seems like we do treat instance keys like any other component, which means each individual instance key is actually a component instance, no? -The terminology is very subtle. +The terminology is very subtle. - `InstanceKey` is indeed a component, and so it is always passed as a list, which we colloquially refer to as "the instance keys". - a "component instance", or just "instance", is the name we give to any single value in a component cell: @@ -1016,7 +1016,7 @@ None of these are components however, they are merely arrow datatypes. Everything else is just a component, and as such is passed as a `DataCell`. -Components are completely opaque blobs from the store's PoV; they cannot dictate its behavior since they aren't even deserialized. +Components are completely opaque blobs from the store's PoV; they cannot dictate its behavior since they aren't even deserialized. This includes `InstanceKey`s, which are just returned to `re_query` as-is. The one special thing about instance keys is that they are auto-generated server-side if they are missing; but even then, once they are generated they are still treated as opaque blobs from the store's PoV. diff --git a/examples/python/multiprocessing/main.py b/examples/python/multiprocessing/main.py index c7ed437a1a0d..e43cbd6b008a 100755 --- a/examples/python/multiprocessing/main.py +++ b/examples/python/multiprocessing/main.py @@ -10,18 +10,22 @@ import rerun as rr # pip install rerun-sdk -def task(title: str) -> None: +def task(child_index: int) -> None: # All processes spawned with `multiprocessing` will automatically # be assigned the same default recording_id. # We just need to connect each process to the the rerun viewer: rr.init("multiprocessing") rr.connect() + title = f"task {child_index}" rr.log_text_entry( "log", text=f"Logging from pid={os.getpid()}, thread={threading.get_ident()} using the rerun recording id {rr.get_recording_id()}", # noqa: E501 line too long ) - rr.log_rect(title, [10, 20, 30, 40], label=title) + if child_index == 0: + rr.log_rect(title, [5, 5, 80, 80], label=title) + else: + rr.log_rect(title, [10 + child_index * 10, 20 + child_index * 5, 30, 40], label=title) def main() -> None: @@ -30,18 +34,19 @@ def main() -> None: [__import__("logging").warning(f"unknown arg: {arg}") for arg in unknown] rr.init("multiprocessing") - rr.spawn(connect=False) # this is the viewer that each process will connect to + rr.spawn(connect=False) # this is the viewer that each child process will connect to - task("main_task") + task(0) # Using multiprocessing with "fork" results in a hang on shutdown so # always use "spawn" # TODO(https://github.com/rerun-io/rerun/issues/1921) multiprocessing.set_start_method("spawn") - p = multiprocessing.Process(target=task, args=("child_task",)) - p.start() - p.join() + for i in [1, 2, 3]: + p = multiprocessing.Process(target=task, args=(i,)) + p.start() + p.join() if __name__ == "__main__": diff --git a/examples/rust/dna/src/main.rs b/examples/rust/dna/src/main.rs index 4f382345aa0d..befc2a4b8ea1 100644 --- a/examples/rust/dna/src/main.rs +++ b/examples/rust/dna/src/main.rs @@ -15,8 +15,8 @@ use rerun::{ const NUM_POINTS: usize = 100; fn main() -> Result<(), Box> { - let recording_info = rerun::new_recording_info("DNA Abacus"); - rerun::native_viewer::spawn(recording_info, Default::default(), |rec_stream| { + let store_info = rerun::new_store_info("DNA Abacus"); + rerun::native_viewer::spawn(store_info, Default::default(), |rec_stream| { run(&rec_stream).unwrap(); })?; Ok(()) diff --git a/examples/rust/extend_viewer_ui/src/main.rs b/examples/rust/extend_viewer_ui/src/main.rs index 356ad5d09e77..b6a32180d655 100644 --- a/examples/rust/extend_viewer_ui/src/main.rs +++ b/examples/rust/extend_viewer_ui/src/main.rs @@ -102,8 +102,8 @@ impl MyApp { }); ui.separator(); - if let Some(log_db) = self.rerun_app.log_db() { - log_db_ui(ui, log_db); + if let Some(store_db) = self.rerun_app.store_db() { + store_db_ui(ui, store_db); } else { ui.label("No log database loaded yet."); } @@ -111,9 +111,9 @@ impl MyApp { } /// Show the content of the log database. -fn log_db_ui(ui: &mut egui::Ui, log_db: &re_data_store::LogDb) { - if let Some(recording_info) = log_db.recording_info() { - ui.label(format!("Application ID: {}", recording_info.application_id)); +fn store_db_ui(ui: &mut egui::Ui, store_db: &re_data_store::StoreDb) { + if let Some(store_info) = store_db.store_info() { + ui.label(format!("Application ID: {}", store_info.application_id)); } // There can be many timelines, but the `log_time` timeline is always there: @@ -126,9 +126,9 @@ fn log_db_ui(ui: &mut egui::Ui, log_db: &re_data_store::LogDb) { egui::ScrollArea::vertical() .auto_shrink([false, true]) .show(ui, |ui| { - for entity_path in log_db.entity_db.entity_paths() { + for entity_path in store_db.entity_db.entity_paths() { ui.collapsing(entity_path.to_string(), |ui| { - entity_ui(ui, log_db, timeline, entity_path); + entity_ui(ui, store_db, timeline, entity_path); }); } }); @@ -136,12 +136,12 @@ fn log_db_ui(ui: &mut egui::Ui, log_db: &re_data_store::LogDb) { fn entity_ui( ui: &mut egui::Ui, - log_db: &re_data_store::LogDb, + store_db: &re_data_store::StoreDb, timeline: re_log_types::Timeline, entity_path: &re_log_types::EntityPath, ) { // Each entity can have many components (e.g. position, color, radius, …): - if let Some(mut components) = log_db + if let Some(mut components) = store_db .entity_db .data_store .all_components(&timeline, entity_path) @@ -149,7 +149,7 @@ fn entity_ui( components.sort(); // Make the order predicatable for component in components { ui.collapsing(component.to_string(), |ui| { - component_ui(ui, log_db, timeline, entity_path, component); + component_ui(ui, store_db, timeline, entity_path, component); }); } } @@ -157,7 +157,7 @@ fn entity_ui( fn component_ui( ui: &mut egui::Ui, - log_db: &re_data_store::LogDb, + store_db: &re_data_store::StoreDb, timeline: re_log_types::Timeline, entity_path: &re_log_types::EntityPath, component_name: re_log_types::ComponentName, @@ -167,7 +167,7 @@ fn component_ui( let query = re_arrow_store::LatestAtQuery::latest(timeline); if let Some((_, component)) = re_query::get_component_with_instances( - &log_db.entity_db.data_store, + &store_db.entity_db.data_store, &query, entity_path, component_name, diff --git a/examples/rust/objectron/src/main.rs b/examples/rust/objectron/src/main.rs index 89e5edcac9c0..495cb5a7705a 100644 --- a/examples/rust/objectron/src/main.rs +++ b/examples/rust/objectron/src/main.rs @@ -363,7 +363,7 @@ fn parse_duration(arg: &str) -> Result anyhow::Result<()> { // Parse protobuf dataset - let rec_info = args.recording.info().with_context(|| { + let store_info = args.recording.info().with_context(|| { use clap::ValueEnum as _; format!( "Could not read the recording, have you downloaded the dataset? \ @@ -372,7 +372,7 @@ fn run(rec_stream: &RecordingStream, args: &Args) -> anyhow::Result<()> { args.recording.to_possible_value().unwrap().get_name(), ) })?; - let annotations = read_annotations(&rec_info.path_annotations)?; + let annotations = read_annotations(&store_info.path_annotations)?; // See https://github.com/google-research-datasets/Objectron/issues/39 log_coordinate_space(rec_stream, "world", "RUB")?; @@ -388,7 +388,7 @@ fn run(rec_stream: &RecordingStream, args: &Args) -> anyhow::Result<()> { let mut time_offset = 0.0; // Iterate through the parsed dataset and log Rerun primitives - let ar_frames = read_ar_frames(&rec_info.path_ar_frames); + let ar_frames = read_ar_frames(&store_info.path_ar_frames); for (idx, ar_frame) in ar_frames.enumerate() { if idx + global_frame_offset >= args.frames.unwrap_or(usize::MAX) { break 'outer; @@ -396,7 +396,7 @@ fn run(rec_stream: &RecordingStream, args: &Args) -> anyhow::Result<()> { let ar_frame = ar_frame?; let ar_frame = ArFrame::from_raw( - rec_info.path_ar_frames.parent().unwrap().into(), + store_info.path_ar_frames.parent().unwrap().into(), idx, timepoint( idx + global_frame_offset, @@ -443,13 +443,13 @@ fn main() -> anyhow::Result<()> { // --- Protobuf parsing --- #[derive(Debug, Clone)] -struct RecordingInfo { +struct StoreInfo { path_ar_frames: PathBuf, path_annotations: PathBuf, } impl Recording { - fn info(&self) -> anyhow::Result { + fn info(&self) -> anyhow::Result { const DATASET_DIR: &str = concat!( env!("CARGO_MANIFEST_DIR"), "/../../python/objectron/dataset" @@ -473,7 +473,7 @@ impl Recording { .context("empty directory")?? .path(); - Ok(RecordingInfo { + Ok(StoreInfo { // objectron/dataset/book/batch-20/35/geometry.pbdata path_ar_frames: path.join("geometry.pbdata"), // objectron/dataset/book/batch-20/35/annotation.pbdata diff --git a/rerun_py/rerun_sdk/rerun/__init__.py b/rerun_py/rerun_sdk/rerun/__init__.py index 116e5ab25b5c..ce81f1e4fb2c 100644 --- a/rerun_py/rerun_sdk/rerun/__init__.py +++ b/rerun_py/rerun_sdk/rerun/__init__.py @@ -224,7 +224,7 @@ def init( if init_logging: new_recording( application_id, - recording_id, + recording_id=recording_id, make_default=True, make_thread_default=False, spawn=False, diff --git a/rerun_py/rerun_sdk/rerun/log/experimental/blueprint.py b/rerun_py/rerun_sdk/rerun/log/experimental/blueprint.py index 4db0f2b26406..539f19f8dea2 100644 --- a/rerun_py/rerun_sdk/rerun/log/experimental/blueprint.py +++ b/rerun_py/rerun_sdk/rerun/log/experimental/blueprint.py @@ -35,12 +35,12 @@ def new_blueprint( The default blueprint_id is based on `multiprocessing.current_process().authkey` which means that all processes spawned with `multiprocessing` - will have the same default recording_id. + will have the same default blueprint_id. If you are not using `multiprocessing` and still want several different Python processes to log to the same Rerun instance (and be part of the same blueprint), you will need to manually assign them all the same blueprint_id. - Any random UUIDv4 will work, or copy the recording id for the parent process. + Any random UUIDv4 will work, or copy the blueprint_id for the parent process. make_default : bool If true (_not_ the default), the newly initialized blueprint will replace the current active one (if any) in the global scope. diff --git a/rerun_py/src/python_bridge.rs b/rerun_py/src/python_bridge.rs index f2878ab94760..63b375118e03 100644 --- a/rerun_py/src/python_bridge.rs +++ b/rerun_py/src/python_bridge.rs @@ -18,12 +18,12 @@ use re_viewport::{ SpaceViewBlueprint, ViewCategory, }; -use re_log_types::{DataRow, RecordingType}; +use re_log_types::{DataRow, StoreKind}; use rerun::{ log::{PathOp, RowId}, sink::MemorySinkStorage, time::TimePoint, - EntityPath, RecordingId, RecordingStream, RecordingStreamBuilder, + EntityPath, RecordingStream, RecordingStreamBuilder, StoreId, }; pub use rerun::{ @@ -55,8 +55,8 @@ use parking_lot::Mutex; // Python GC is doing, which obviously leads to very bad things :tm:. // // TODO(#2116): drop unused recordings -fn all_recordings() -> parking_lot::MutexGuard<'static, HashMap> { - static ALL_RECORDINGS: OnceCell>> = OnceCell::new(); +fn all_recordings() -> parking_lot::MutexGuard<'static, HashMap> { + static ALL_RECORDINGS: OnceCell>> = OnceCell::new(); ALL_RECORDINGS.get_or_init(Default::default).lock() } @@ -211,15 +211,15 @@ fn new_recording( }); let recording_id = if let Some(recording_id) = recording_id { - RecordingId::from_string(RecordingType::Data, recording_id) + StoreId::from_string(StoreKind::Recording, recording_id) } else { - default_recording_id(py, RecordingType::Data, &application_id) + default_store_id(py, StoreKind::Recording, &application_id) }; let recording = RecordingStreamBuilder::new(application_id) .is_official_example(is_official_example) - .recording_id(recording_id.clone()) - .recording_source(re_log_types::RecordingSource::PythonSdk(python_version(py))) + .store_id(recording_id.clone()) + .store_source(re_log_types::StoreSource::PythonSdk(python_version(py))) .default_enabled(default_enabled) .buffered() .map_err(|err| PyRuntimeError::new_err(err.to_string()))?; @@ -261,14 +261,14 @@ fn new_blueprint( default_enabled: bool, ) -> PyResult { let blueprint_id = if let Some(blueprint_id) = blueprint_id { - RecordingId::from_string(RecordingType::Blueprint, blueprint_id) + StoreId::from_string(StoreKind::Blueprint, blueprint_id) } else { - default_recording_id(py, RecordingType::Blueprint, &application_id) + default_store_id(py, StoreKind::Blueprint, &application_id) }; let blueprint = RecordingStreamBuilder::new(application_id) - .recording_id(blueprint_id.clone()) - .recording_source(re_log_types::RecordingSource::PythonSdk(python_version(py))) + .store_id(blueprint_id.clone()) + .store_source(re_log_types::StoreSource::PythonSdk(python_version(py))) .default_enabled(default_enabled) .buffered() .map_err(|err| PyRuntimeError::new_err(err.to_string()))?; @@ -320,15 +320,15 @@ impl std::ops::Deref for PyRecordingStream { #[pyfunction] fn get_application_id(recording: Option<&PyRecordingStream>) -> Option { get_data_recording(recording)? - .recording_info() + .store_info() .map(|info| info.application_id.to_string()) } #[pyfunction] fn get_recording_id(recording: Option<&PyRecordingStream>) -> Option { get_data_recording(recording)? - .recording_info() - .map(|info| info.recording_id.to_string()) + .store_info() + .map(|info| info.store_id.to_string()) } /// Returns the currently active data recording in the global scope, if any; fallbacks to the @@ -336,7 +336,7 @@ fn get_recording_id(recording: Option<&PyRecordingStream>) -> Option { #[pyfunction] fn get_data_recording(recording: Option<&PyRecordingStream>) -> Option { RecordingStream::get_quiet( - rerun::RecordingType::Data, + rerun::StoreKind::Recording, recording.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -345,7 +345,7 @@ fn get_data_recording(recording: Option<&PyRecordingStream>) -> Option Option { - RecordingStream::global(rerun::RecordingType::Data).map(PyRecordingStream) + RecordingStream::global(rerun::StoreKind::Recording).map(PyRecordingStream) } /// Replaces the currently active recording in the global scope with the specified one. @@ -365,7 +365,7 @@ fn set_global_data_recording( // sorry. py.allow_threads(|| { RecordingStream::set_global( - rerun::RecordingType::Data, + rerun::StoreKind::Recording, recording.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -375,7 +375,7 @@ fn set_global_data_recording( /// Returns the currently active data recording in the thread-local scope, if any. #[pyfunction] fn get_thread_local_data_recording() -> Option { - RecordingStream::thread_local(rerun::RecordingType::Data).map(PyRecordingStream) + RecordingStream::thread_local(rerun::StoreKind::Recording).map(PyRecordingStream) } /// Replaces the currently active recording in the thread-local scope with the specified one. @@ -395,7 +395,7 @@ fn set_thread_local_data_recording( // sorry. py.allow_threads(|| { RecordingStream::set_thread_local( - rerun::RecordingType::Data, + rerun::StoreKind::Recording, recording.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -407,7 +407,7 @@ fn set_thread_local_data_recording( #[pyfunction] fn get_blueprint_recording(overrides: Option<&PyRecordingStream>) -> Option { RecordingStream::get_quiet( - rerun::RecordingType::Blueprint, + rerun::StoreKind::Blueprint, overrides.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -416,7 +416,7 @@ fn get_blueprint_recording(overrides: Option<&PyRecordingStream>) -> Option Option { - RecordingStream::global(rerun::RecordingType::Blueprint).map(PyRecordingStream) + RecordingStream::global(rerun::StoreKind::Blueprint).map(PyRecordingStream) } /// Replaces the currently active recording in the global scope with the specified one. @@ -436,7 +436,7 @@ fn set_global_blueprint_recording( // sorry. py.allow_threads(|| { RecordingStream::set_global( - rerun::RecordingType::Blueprint, + rerun::StoreKind::Blueprint, recording.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -446,7 +446,7 @@ fn set_global_blueprint_recording( /// Returns the currently active blueprint recording in the thread-local scope, if any. #[pyfunction] fn get_thread_local_blueprint_recording() -> Option { - RecordingStream::thread_local(rerun::RecordingType::Blueprint).map(PyRecordingStream) + RecordingStream::thread_local(rerun::StoreKind::Blueprint).map(PyRecordingStream) } /// Replaces the currently active recording in the thread-local scope with the specified one. @@ -466,7 +466,7 @@ fn set_thread_local_blueprint_recording( // sorry. py.allow_threads(|| { RecordingStream::set_thread_local( - rerun::RecordingType::Blueprint, + rerun::StoreKind::Blueprint, recording.map(|rec| rec.0.clone()), ) .map(PyRecordingStream) @@ -1326,11 +1326,7 @@ fn python_version(py: Python<'_>) -> re_log_types::PythonVersion { } } -fn default_recording_id( - py: Python<'_>, - variant: RecordingType, - application_id: &str, -) -> RecordingId { +fn default_store_id(py: Python<'_>, variant: StoreKind, application_id: &str) -> StoreId { use rand::{Rng as _, SeedableRng as _}; use std::hash::{Hash as _, Hasher as _}; @@ -1358,7 +1354,7 @@ fn default_recording_id( application_id.hash(&mut hasher); let mut rng = rand::rngs::StdRng::seed_from_u64(hasher.finish()); let uuid = uuid::Builder::from_random_bytes(rng.gen()).into_uuid(); - RecordingId::from_uuid(variant, uuid) + StoreId::from_uuid(variant, uuid) } fn authkey(py: Python<'_>) -> Vec { diff --git a/tests/rust/test_image_memory/src/main.rs b/tests/rust/test_image_memory/src/main.rs index ea6868c0597a..d79aec3a3131 100644 --- a/tests/rust/test_image_memory/src/main.rs +++ b/tests/rust/test_image_memory/src/main.rs @@ -13,8 +13,8 @@ fn main() -> Result<(), Box> { re_viewer::env_vars::RERUN_TRACK_ALLOCATIONS, ); - let recording_info = rerun::new_recording_info("test_image_memory_rs"); - rerun::native_viewer::spawn(recording_info, Default::default(), |rec_stream| { + let store_info = rerun::new_store_info("test_image_memory_rs"); + rerun::native_viewer::spawn(store_info, Default::default(), |rec_stream| { log_images(&rec_stream).unwrap(); })?; Ok(())