From ce43f58709fe5b2d5cbf5bb528e75db54d7fab5c Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Wed, 5 Jun 2024 21:09:06 +0800 Subject: [PATCH 01/29] Add NOTE for LowLatency transport. (#1088) Signed-off-by: ChenYing Kuo --- DEFAULT_CONFIG.json5 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0dd9f1283b..ab1201a6ad 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -221,6 +221,8 @@ /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. + /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be + /// smaller than the tx batch_size. lowlatency: false, /// Enables QoS on unicast communications. qos: { From c279982c131c556ebd474f4ffa9fd6c096d13830 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Wed, 5 Jun 2024 15:10:56 +0200 Subject: [PATCH 02/29] fix: Improve debug messages in `zenoh-transport` (#1090) * fix: Improve debug messages for failing RX/TX tasks * fix: Improve debug message for `accept_link` timeout * chore: Fix `clippy::redundant_pattern_matching` error --- io/zenoh-transport/src/multicast/link.rs | 4 ++-- io/zenoh-transport/src/unicast/manager.rs | 8 ++++++-- io/zenoh-transport/src/unicast/universal/link.rs | 4 ++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index aede7ae1fb..193df5ca67 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -342,7 +342,7 @@ impl TransportLinkMulticastUniversal { ) .await; if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("TX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); @@ -378,7 +378,7 @@ impl TransportLinkMulticastUniversal { .await; c_signal.trigger(); if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("RX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 2cce7299b0..899887bea0 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -746,13 +746,17 @@ impl TransportManager { let c_manager = self.clone(); self.task_controller .spawn_with_rt(zenoh_runtime::ZRuntime::Acceptor, async move { - if let Err(e) = tokio::time::timeout( + if tokio::time::timeout( c_manager.config.unicast.accept_timeout, super::establishment::accept::accept_link(link, &c_manager), ) .await + .is_err() { - tracing::debug!("{}", e); + tracing::debug!( + "Failed to accept link before deadline ({}ms)", + c_manager.config.unicast.accept_timeout.as_millis() + ); } incoming_counter.fetch_sub(1, SeqCst); }); diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 9a85ee9a46..44a12be4ac 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -97,7 +97,7 @@ impl TransportLinkUnicastUniversal { .await; if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("TX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle // TODO(yuyuan): do more study to check which ZRuntime should be used or refine the @@ -125,7 +125,7 @@ impl TransportLinkUnicastUniversal { // TODO(yuyuan): improve this callback if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("RX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle From 0942a69f719e001ffe5aaf127399c54b3e89cc90 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 10 Jun 2024 09:31:18 +0200 Subject: [PATCH 03/29] Improve pipeline backoff (#1097) * Yield task for backoff * Improve comments and error handling in backoff * Simplify pipeline pull * Consider backoff configuration --- io/zenoh-transport/src/common/pipeline.rs | 83 +++++++++++++---------- 1 file changed, 46 insertions(+), 37 deletions(-) diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 832cabd207..e3a4068b2d 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -49,7 +49,6 @@ use zenoh_protocol::{ type NanoSeconds = u32; const RBLEN: usize = QueueSizeConf::MAX; -const TSLOT: NanoSeconds = 100; // Inner structure to reuse serialization batches struct StageInRefill { @@ -347,6 +346,7 @@ enum Pull { // Inner structure to keep track and signal backoff operations #[derive(Clone)] struct Backoff { + tslot: NanoSeconds, retry_time: NanoSeconds, last_bytes: BatchSize, bytes: Arc, @@ -354,8 +354,9 @@ struct Backoff { } impl Backoff { - fn new(bytes: Arc, backoff: Arc) -> Self { + fn new(tslot: NanoSeconds, bytes: Arc, backoff: Arc) -> Self { Self { + tslot, retry_time: 0, last_bytes: 0, bytes, @@ -365,7 +366,7 @@ impl Backoff { fn next(&mut self) { if self.retry_time == 0 { - self.retry_time = TSLOT; + self.retry_time = self.tslot; self.backoff.store(true, Ordering::Relaxed); } else { match self.retry_time.checked_mul(2) { @@ -383,7 +384,7 @@ impl Backoff { } } - fn stop(&mut self) { + fn reset(&mut self) { self.retry_time = 0; self.backoff.store(false, Ordering::Relaxed); } @@ -400,7 +401,6 @@ impl StageOutIn { #[inline] fn try_pull(&mut self) -> Pull { if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); return Pull::Some(batch); } @@ -412,41 +412,26 @@ impl StageOutIn { let old_bytes = self.backoff.last_bytes; self.backoff.last_bytes = new_bytes; - match new_bytes.cmp(&old_bytes) { - std::cmp::Ordering::Equal => { - // No new bytes have been written on the batch, try to pull - if let Ok(mut g) = self.current.try_lock() { - // First try to pull from stage OUT - if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); + if new_bytes == old_bytes { + // It seems no new bytes have been written on the batch, try to pull + if let Ok(mut g) = self.current.try_lock() { + // First try to pull from stage OUT to make sure we are not in the case + // where new_bytes == old_bytes are because of two identical serializations + if let Some(batch) = self.s_out_r.pull() { + return Pull::Some(batch); + } + + // An incomplete (non-empty) batch may be available in the state IN pipeline. + match g.take() { + Some(batch) => { return Pull::Some(batch); } - - // An incomplete (non-empty) batch is available in the state IN pipeline. - match g.take() { - Some(batch) => { - self.backoff.stop(); - return Pull::Some(batch); - } - None => { - self.backoff.stop(); - return Pull::None; - } + None => { + return Pull::None; } } - // Go to backoff - } - std::cmp::Ordering::Less => { - // There should be a new batch in Stage OUT - if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); - return Pull::Some(batch); - } - // Go to backoff - } - std::cmp::Ordering::Greater => { - // Go to backoff } + // Go to backoff } // Do backoff @@ -569,7 +554,7 @@ impl TransmissionPipeline { s_in: StageOutIn { s_out_r, current, - backoff: Backoff::new(bytes, backoff), + backoff: Backoff::new(config.backoff.as_nanos() as NanoSeconds, bytes, backoff), }, s_ref: StageOutRefill { n_ref_w, s_ref_w }, }); @@ -657,6 +642,11 @@ pub(crate) struct TransmissionPipelineConsumer { impl TransmissionPipelineConsumer { pub(crate) async fn pull(&mut self) -> Option<(WBatch, usize)> { + // Reset backoff before pulling + for queue in self.stage_out.iter_mut() { + queue.s_in.backoff.reset(); + } + while self.active.load(Ordering::Relaxed) { // Calculate the backoff maximum let mut bo = NanoSeconds::MAX; @@ -674,10 +664,29 @@ impl TransmissionPipelineConsumer { } } + // In case of writing many small messages, `recv_async()` will most likely return immedietaly. + // While trying to pull from the queue, the stage_in `lock()` will most likely taken, leading to + // a spinning behaviour while attempting to take the lock. Yield the current task to avoid + // spinning the current task indefinitely. + tokio::task::yield_now().await; + // Wait for the backoff to expire or for a new message - let _ = + let res = tokio::time::timeout(Duration::from_nanos(bo as u64), self.n_out_r.recv_async()) .await; + match res { + Ok(Ok(())) => { + // We have received a notification from the channel that some bytes are available, retry to pull. + } + Ok(Err(_channel_error)) => { + // The channel is closed, we can't be notified anymore. Break the loop and return None. + break; + } + Err(_timeout) => { + // The backoff timeout expired. Be aware that tokio timeout may not sleep for short duration since + // it has time resolution of 1ms: https://docs.rs/tokio/latest/tokio/time/fn.sleep.html + } + } } None } From de7822157df70cdff2dbfa99a6fb8471e7d336e4 Mon Sep 17 00:00:00 2001 From: Tavo Annus Date: Mon, 10 Jun 2024 13:19:33 +0300 Subject: [PATCH 04/29] Add typos check to CI (#1065) * Fix typos * Add typos check to CI --- .github/workflows/ci.yml | 12 +++++- DEFAULT_CONFIG.json5 | 18 ++++----- README.md | 6 +-- _typos.toml | 11 +++++ commons/zenoh-codec/src/core/zint.rs | 4 +- commons/zenoh-collections/src/properties.rs | 2 +- commons/zenoh-config/src/connection_retry.rs | 2 +- commons/zenoh-config/src/include.rs | 2 +- commons/zenoh-config/src/lib.rs | 14 +++---- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 12 +++--- .../zenoh-keyexpr/src/key_expr/format/mod.rs | 4 +- .../src/key_expr/intersect/classical.rs | 2 +- .../src/keyexpr_tree/arc_tree.rs | 4 +- commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- .../src/keyexpr_tree/traits/mod.rs | 2 +- commons/zenoh-macros/src/lib.rs | 2 +- commons/zenoh-protocol/src/core/encoding.rs | 2 +- commons/zenoh-protocol/src/core/resolution.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 2 +- commons/zenoh-protocol/src/network/declare.rs | 4 +- commons/zenoh-protocol/src/transport/close.rs | 2 +- .../zenoh-protocol/src/transport/fragment.rs | 2 +- commons/zenoh-protocol/src/transport/frame.rs | 2 +- .../zenoh-protocol/src/transport/keepalive.rs | 2 +- commons/zenoh-protocol/src/transport/open.rs | 2 +- commons/zenoh-protocol/src/zenoh/put.rs | 2 +- commons/zenoh-protocol/src/zenoh/reply.rs | 2 +- commons/zenoh-shm/src/lib.rs | 4 +- commons/zenoh-sync/src/condition.rs | 4 +- commons/zenoh-util/src/std_only/lib_loader.rs | 4 +- commons/zenoh-util/src/std_only/mod.rs | 2 +- commons/zenoh-util/src/std_only/net/mod.rs | 12 +++--- commons/zenoh-util/src/std_only/time_range.rs | 4 +- commons/zenoh-util/src/std_only/timer.rs | 4 +- examples/README.md | 4 +- examples/examples/z_pub_shm.rs | 6 +-- examples/examples/z_sub_thr.rs | 8 ++-- .../zenoh-link-udp/src/multicast.rs | 2 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 20 +++++----- .../zenoh-link-unixsock_stream/src/unicast.rs | 2 +- io/zenoh-transport/src/common/batch.rs | 2 +- io/zenoh-transport/src/common/pipeline.rs | 2 +- io/zenoh-transport/src/common/seq_num.rs | 4 +- io/zenoh-transport/src/manager.rs | 2 +- io/zenoh-transport/src/multicast/rx.rs | 12 +++--- .../src/unicast/establishment/accept.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/replica/storage.rs | 2 +- plugins/zenoh-plugin-trait/src/lib.rs | 4 +- plugins/zenoh-plugin-trait/src/vtable.rs | 2 +- zenoh-ext/examples/examples/README.md | 2 +- zenoh-ext/examples/examples/z_pub_cache.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 8 ++-- zenoh-ext/src/subscriber_ext.rs | 12 +++--- zenoh/src/info.rs | 2 +- zenoh/src/key_expr.rs | 6 +-- zenoh/src/lib.rs | 2 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 2 +- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/net/routing/dispatcher/resource.rs | 40 +++++++++---------- .../src/net/routing/hat/linkstate_peer/mod.rs | 6 +-- .../net/routing/hat/linkstate_peer/network.rs | 27 +++++++------ .../net/routing/hat/linkstate_peer/pubsub.rs | 32 +++++++-------- .../net/routing/hat/linkstate_peer/queries.rs | 32 +++++++-------- zenoh/src/net/routing/hat/router/mod.rs | 6 +-- zenoh/src/net/routing/hat/router/network.rs | 27 +++++++------ zenoh/src/net/routing/hat/router/pubsub.rs | 32 +++++++-------- zenoh/src/net/routing/hat/router/queries.rs | 32 +++++++-------- .../net/routing/interceptor/downsampling.rs | 4 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/net/runtime/mod.rs | 12 +++--- zenoh/src/net/runtime/orchestrator.rs | 8 ++-- zenoh/src/net/tests/tables.rs | 20 +++++----- zenoh/src/plugins/sealed.rs | 6 +-- zenoh/src/publication.rs | 2 +- zenoh/src/sample.rs | 4 +- zenoh/src/selector.rs | 4 +- zenoh/src/session.rs | 22 +++++----- zenoh/src/subscriber.rs | 4 +- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 88 files changed, 317 insertions(+), 294 deletions(-) create mode 100644 _typos.toml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2aaf1b0763..6320464db3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,6 +133,16 @@ jobs: run: ci/valgrind-check/run.sh shell: bash + typos: + name: Typos Check + runs-on: ubuntu-latest + steps: + - name: Clone this repository + uses: actions/checkout@v4 + + - name: Check spelling + uses: crate-ci/typos@master + # NOTE: In GitHub repository settings, the "Require status checks to pass # before merging" branch protection rule ensures that commits are only merged # from branches where specific status checks have passed. These checks are @@ -141,7 +151,7 @@ jobs: ci: name: CI status checks runs-on: ubuntu-latest - needs: [check, test, valgrind] + needs: [check, test, valgrind, typos] if: always() steps: - name: Check whether all jobs pass diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index ab1201a6ad..b33dbeb8cf 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -40,7 +40,7 @@ exit_on_failure: { router: false, peer: false, client: true }, /// connect establishing retry configuration retry: { - /// intial wait timeout until next connect try + /// initial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try period_max_ms: 4000, @@ -73,7 +73,7 @@ exit_on_failure: true, /// listen retry configuration retry: { - /// intial wait timeout until next try + /// initial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try period_max_ms: 4000, @@ -108,8 +108,8 @@ gossip: { /// Whether gossip scouting is enabled or not enabled: true, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -267,7 +267,7 @@ /// set the actual keep_alive interval to one fourth of the lease time: i.e. send /// 4 keep_alive messages in a lease period. Changing the lease time will have the /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity /// check which considers a link as failed when no messages are received in 3.5 times the /// target interval. keep_alive: 4, @@ -293,7 +293,7 @@ background: 4, }, /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. congestion_control: { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -308,7 +308,7 @@ rx: { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: 65535, @@ -345,7 +345,7 @@ enabled: false, }, auth: { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. usrpwd: { user: null, @@ -398,7 +398,7 @@ // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively // /// This is used in the 'storage_manager' which supports subplugins, each with it's own config // /// - // /// See below exapmle of plugin configuration using `__config__` property + // /// See below example of plugin configuration using `__config__` property // // /// Configure the REST API plugin // rest: { diff --git a/README.md b/README.md index b09ea73d86..af08db7260 100644 --- a/README.md +++ b/README.md @@ -62,9 +62,9 @@ Then you can start run `zenohd`. ## How to build it > [!WARNING] -> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in mantaining compatibility between the various git repositories in the Zenoh project. +> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in maintaining compatibility between the various git repositories in the Zenoh project. -Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be succesfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: +Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash $ rustup update @@ -170,7 +170,7 @@ See other examples of Zenoh usage in [examples/](examples) * `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: - a port number - a string with format `:` (to bind the HTTP server to a specific interface) - - `"None"` to desactivate the REST plugin + - `"None"` to deactivate the REST plugin If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 0000000000..eb9952004f --- /dev/null +++ b/_typos.toml @@ -0,0 +1,11 @@ +[files] +extend-exclude = [ + # Ignore all files in transport tests as they contain + # hashes that are treated as typos. + "io/zenoh-transport/tests/*.rs", +] + + +[default.extend-words] +mis = "mis" # mismatch +thr = "thr" # throughput diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 1c2f5a28e4..8167d895c8 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -274,7 +274,7 @@ zint_impl!(usize); // // guarantees at this point that `x` is never `0`. Since `x` is 64bit, // // then `n` is guaranteed to have a value between 1 and 8, both inclusives. // // `into` is guaranteed to be exactly 9 bytes long. Therefore, copying at most -// // 8 bytes with a pointer offest of 1 is actually safe. +// // 8 bytes with a pointer offset of 1 is actually safe. // let n = 8 - (x.leading_zeros() / 8) as usize; // unsafe { // core::ptr::copy_nonoverlapping( @@ -348,7 +348,7 @@ zint_impl!(usize); // macro_rules! non_zero_array { // ($($i: expr,)*) => { -// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros litteral")}),*] +// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros literal")}),*] // }; // } diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 281ac8ca68..6a3c96241c 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -24,7 +24,7 @@ const COMMENT_PREFIX: char = '#'; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimed. +/// and `=` as separator between a key and its value. Keys and values are trimmed. #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties(HashMap); diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index a845fbfe6a..e1d383749c 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -27,7 +27,7 @@ use crate::mode_dependent::*; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { - // intial wait timeout until next try + // initial wait timeout until next try pub period_init_ms: Option>, // maximum wait timeout until next try pub period_max_ms: Option>, diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs index 709cd7c29f..b89d78d1c0 100644 --- a/commons/zenoh-config/src/include.rs +++ b/commons/zenoh-config/src/include.rs @@ -65,7 +65,7 @@ pub(crate) fn recursive_include

( where P: AsRef, { - // if include property is present, read the file and remove properites found in file from values + // if include property is present, read the file and remove properties found in file from values let include_object = if let Some(include_path) = values.get(include_property_name) { let Some(include_path) = include_path.as_str() else { bail!( diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index f40a528325..c54d75a82a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -153,7 +153,7 @@ pub trait ConfigValidator: Send + Sync { } } -// Necessary to allow to set default emplty weak referece value to plugin.validator field +// Necessary to allow to set default emplty weak reference value to plugin.validator field // because empty weak value is not allowed for Arc impl ConfigValidator for () {} @@ -267,8 +267,8 @@ validated_struct::validator! { GossipConf { /// Whether gossip scouting is enabled or not. enabled: Option, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -375,7 +375,7 @@ validated_struct::validator! { sequence_number_resolution: Bits where (sequence_number_resolution_validator), /// Link lease duration in milliseconds (default: 10000) lease: u64, - /// Number fo keep-alive messages in a link lease duration (default: 4) + /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, /// Zenoh's MTU equivalent (default: 2^16-1) batch_size: BatchSize, @@ -396,7 +396,7 @@ validated_struct::validator! { background: usize, } where (queue_size_validator), /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. pub congestion_control: CongestionControlConf { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -412,7 +412,7 @@ validated_struct::validator! { pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: usize, @@ -455,7 +455,7 @@ validated_struct::validator! { }, pub auth: #[derive(Default)] AuthConf { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. pub usrpwd: #[derive(Default)] UsrPwdConf { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 85b4ef79e2..4291883492 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -107,7 +107,7 @@ impl keyexpr { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -137,7 +137,7 @@ impl keyexpr { /// /// NOTE: this operation can typically be used in a backend implementation, at creation of a Storage to get the keys prefix, /// and then in `zenoh_backend_traits::Storage::on_sample()` this prefix has to be stripped from all received - /// `Sample::key_expr` to retrieve the corrsponding key. + /// `Sample::key_expr` to retrieve the corresponding key. /// /// # Examples: /// ``` @@ -172,12 +172,12 @@ impl keyexpr { } /// Remove the specified `prefix` from `self`. - /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the begining of the `self` key expression. + /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the beginning of the `self` key expression. /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` /// So the result is `["*", "**/c/*"]`. - /// If `prefix` cannot match the begining of `self`, an empty list is reuturned. + /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// /// See below more examples. /// @@ -581,7 +581,7 @@ enum KeyExprConstructionError { LoneDollarStar = -1, SingleStarAfterDoubleStar = -2, DoubleStarAfterDoubleStar = -3, - EmpyChunk = -4, + EmptyChunk = -4, StarsInChunk = -5, DollarAfterDollarOrStar = -6, ContainsSharpOrQMark = -7, @@ -595,7 +595,7 @@ impl<'a> TryFrom<&'a str> for &'a keyexpr { let mut in_big_wild = false; for chunk in value.split('/') { if chunk.is_empty() { - bail!((KeyExprConstructionError::EmpyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) + bail!((KeyExprConstructionError::EmptyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) } if chunk == "$*" { bail!((KeyExprConstructionError::LoneDollarStar) diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index 3a03d8a515..bf5536ec63 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -23,7 +23,7 @@ //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. //! These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -//! - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +//! - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. //! - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. //! - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. //! @@ -73,7 +73,7 @@ use support::{IterativeConstructor, Spec}; /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. /// These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -/// - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +/// - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. /// - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. /// - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. /// diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index fa346a2d4a..cc28ef2c4c 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -110,7 +110,7 @@ fn it_intersect(mut it1: &[u8], mut it2: &[u8]) -> bool { } (it1.is_empty() || it1 == b"**") && (it2.is_empty() || it2 == b"**") } -/// Retruns `true` if the given key expressions intersect. +/// Returns `true` if the given key expressions intersect. /// /// I.e. if it exists a resource key (with no wildcards) that matches /// both given key expressions. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index a0428ac563..dfb7e68261 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -88,10 +88,10 @@ impl< /// # Type inference papercut /// Despite some of `KeArcTree`'s generic parameters having default values, those are only taken into /// account by the compiler when a type is named with some parameters omitted, and not when a type is - /// infered with the same parameters unconstrained. + /// inferred with the same parameters unconstrained. /// /// The simplest way to resolve this is to eventually assign to tree part of the return value - /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be infered). + /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be inferred). pub fn new() -> Result<(Self, DefaultToken), ::ConstructionError> { let token = DefaultToken::new()?; Ok((Self::with_token(&token), token)) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index e2833a912f..5d7991289e 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -42,7 +42,7 @@ //! KeTrees were designed to maximize code reuse. As such, their core properties are reflected through the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. //! //! KeTrees are made up of node, where nodes may or may not have a value (called `weight`) associated with them. To access these weighs, as well as other -//! properties of a node, you can go throught the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. +//! properties of a node, you can go through the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. //! //! # Iterators //! KeTrees provide iterators for the following operations: diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index dd06cf14b8..cee2bd9162 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -192,7 +192,7 @@ pub trait IKeyExprTreeMut<'a, Weight>: IKeyExprTree<'a, Weight> { self.prune_where(|node| node.weight().is_none()) } } -/// The basic operations of a KeTree when a Token is necessary to acess data. +/// The basic operations of a KeTree when a Token is necessary to access data. pub trait ITokenKeyExprTree<'a, Weight, Token> { /// An immutable guard to a node of the tree. type Node: IKeyExprTreeNode; diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 774bebc80a..3118399dc4 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -451,7 +451,7 @@ mod zenoh_runtime_derive; use syn::DeriveInput; use zenoh_runtime_derive::{derive_generic_runtime_param, derive_register_param}; -/// Make the underlying struct `Param` be generic over any `T` satifying a generated `trait DefaultParam { fn param() -> Param; }` +/// Make the underlying struct `Param` be generic over any `T` satisfying a generated `trait DefaultParam { fn param() -> Param; }` /// ```rust,ignore /// #[derive(GenericRuntimeParam)] /// struct Param { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index f202b8e79c..b0b089d9b3 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -148,7 +148,7 @@ impl Encoding { } /// Returns `true`if the string representation of this encoding starts with - /// the string representation of ther given encoding. + /// the string representation of their given encoding. pub fn starts_with(&self, with: T) -> bool where T: Into, diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index 093fd33bb4..bfce6c6466 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -111,7 +111,7 @@ impl fmt::Display for Bits { } #[repr(u8)] -// The value indicates the bit offest +// The value indicates the bit offset #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Field { FrameSN = 0, diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 2e1a2fa7cf..074aae49a5 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -73,7 +73,7 @@ pub const VERSION: u8 = 0x08; // # Array field // // An array contains a fixed number of elements whose number is known a priori or indicated by -// another field. Each element can be either a single byte field or a variable legnth field. +// another field. Each element can be either a single byte field or a variable length field. // // ```text // 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 76415d52f5..396caf187d 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -693,8 +693,8 @@ pub mod interest { /// /// The DECLARE INTEREST message is sent to request the transmission of existing and future /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to - /// request the transmisison of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to - /// mark the end of the transmission of exisiting matching declarations. + /// request the transmission of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to + /// mark the end of the transmission of existing matching declarations. /// /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: /// diff --git a/commons/zenoh-protocol/src/transport/close.rs b/commons/zenoh-protocol/src/transport/close.rs index 4e760400b7..b93fe6d6b6 100644 --- a/commons/zenoh-protocol/src/transport/close.rs +++ b/commons/zenoh-protocol/src/transport/close.rs @@ -16,7 +16,7 @@ /// /// The [`Close`] message is sent in any of the following two cases: /// 1) in response to an INIT or OPEN message which are not accepted; -/// 2) at any time to arbitrarly close the transport with the corresponding zenoh node. +/// 2) at any time to arbitrarily close the transport with the corresponding zenoh node. /// /// The [`Close`] message flow is the following: /// diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 3e80c9cfbf..5af22db4f1 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -18,7 +18,7 @@ use zenoh_buffers::ZSlice; /// # Fragment message /// /// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] -/// that require fragmentation because they are larger thatn the maximum batch size +/// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// /// The [`Fragment`] message flow is the following: diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 184784f9f1..7afce036ce 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -20,7 +20,7 @@ use alloc::vec::Vec; /// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the /// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. -/// The [`Frame`] message is used as means to aggreate multiple +/// The [`Frame`] message is used as means to aggregate multiple /// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. diff --git a/commons/zenoh-protocol/src/transport/keepalive.rs b/commons/zenoh-protocol/src/transport/keepalive.rs index 927b0cd46b..cc9ccfad99 100644 --- a/commons/zenoh-protocol/src/transport/keepalive.rs +++ b/commons/zenoh-protocol/src/transport/keepalive.rs @@ -49,7 +49,7 @@ /// /// NOTE: In order to consider eventual packet loss, transmission latency and jitter, the time /// interval between two subsequent [`KeepAlive`] messages SHOULD be set to one fourth of -/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continous +/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continuous /// connectivity check which considers a link as failed when no messages are received in /// 3.5 times the target keep alive interval. /// diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index d793671b06..f899e8cc24 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -17,7 +17,7 @@ use zenoh_buffers::ZSlice; /// # Open message /// -/// After having succesfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, +/// After having successfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, /// the OPEN message is sent on a link to finalize the initialization of the link and /// associated transport with a zenoh node. /// For convenience, we call [`OpenSyn`] and [`OpenAck`] an OPEN message with the A flag diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 14674e9ad9..ac18aaf00a 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -66,7 +66,7 @@ pub mod ext { pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x2, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 2395e1e9b2..0cdbcd2cdc 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -74,7 +74,7 @@ pub mod ext { pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x3, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 82f3614380..a75e174488 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -62,7 +62,7 @@ impl PartialEq for Chunk { } } -/// Informations about a [`SharedMemoryBuf`]. +/// Information about a [`SharedMemoryBuf`]. /// /// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] @@ -274,7 +274,7 @@ impl fmt::Debug for SharedMemoryReader { /// A shared memory segment manager. /// -/// Allows to access a shared memory segment and reserve some parts of this segment for writting. +/// Allows to access a shared memory segment and reserve some parts of this segment for writing. pub struct SharedMemoryManager { segment_path: String, size: usize, diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 098aa05411..ba615d8888 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -13,7 +13,7 @@ // use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; -use tokio::sync::MutexGuard as AysncMutexGuard; +use tokio::sync::MutexGuard as AsyncMutexGuard; pub type ConditionWaiter = Pin>; /// This is a Condition Variable similar to that provided by POSIX. @@ -44,7 +44,7 @@ impl Condition { /// Waits for the condition to be notified #[inline] - pub async fn wait(&self, guard: AysncMutexGuard<'_, T>) { + pub async fn wait(&self, guard: AsyncMutexGuard<'_, T>) { let listener = self.event.listen(); drop(guard); listener.await; diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 9c682e4343..4f3621e1cc 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -29,7 +29,7 @@ zconfigurable! { pub static ref LIB_DEFAULT_SEARCH_PATHS: String = ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib".to_string(); } -/// LibLoader allows search for librairies and to load them. +/// LibLoader allows search for libraries and to load them. #[derive(Clone, Debug)] pub struct LibLoader { search_paths: Vec, @@ -142,7 +142,7 @@ impl LibLoader { bail!("Library file '{}' not found", filename) } - /// Search and load all librairies with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. + /// Search and load all libraries with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. /// The result is a list of tuple with: /// * the [Library] /// * its full path diff --git a/commons/zenoh-util/src/std_only/mod.rs b/commons/zenoh-util/src/std_only/mod.rs index 1cb406374c..bfd24b6525 100644 --- a/commons/zenoh-util/src/std_only/mod.rs +++ b/commons/zenoh-util/src/std_only/mod.rs @@ -8,7 +8,7 @@ pub use timer::*; pub mod log; pub use log::*; -/// The "ZENOH_HOME" environement variable name +/// The "ZENOH_HOME" environment variable name pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index 83ab08d678..239cdd6647 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -24,7 +24,7 @@ zconfigurable! { } #[cfg(windows)] -unsafe fn get_adapters_adresses(af_spec: i32) -> ZResult> { +unsafe fn get_adapters_addresses(af_spec: i32) -> ZResult> { use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; let mut ret; @@ -81,7 +81,7 @@ pub fn get_interface(name: &str) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -165,7 +165,7 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -245,7 +245,7 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -284,7 +284,7 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -327,7 +327,7 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 50e5542fcc..886083b2f0 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -41,7 +41,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// - the "offset" syntax, which is written `now()`, and allows to specify a target instant as /// an offset applied to an instant of evaluation. These offset are resolved at the evaluation site. /// -/// In range syntax, omiting `` and/or `` implies that the range is unbounded in that direction. +/// In range syntax, omitting `` and/or `` implies that the range is unbounded in that direction. /// /// Exclusive bounds are represented by their respective delimiters pointing towards the exterior. /// Interior bounds are represented by the opposite. @@ -283,7 +283,7 @@ impl TimeExpr { }), } } - /// Substracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible + /// Subtracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible /// because the result would be outside the bounds of the underlying data structure (see [`SystemTime::checked_sub`]). /// Otherwise returns `Some(time_expr)`. pub fn checked_sub(&self, duration: f64) -> Option { diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/std_only/timer.rs index 6e7dde065a..e6eefd9335 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/std_only/timer.rs @@ -86,8 +86,8 @@ impl Eq for TimedEvent {} impl Ord for TimedEvent { fn cmp(&self, other: &Self) -> ComparisonOrdering { // The usual cmp is defined as: self.when.cmp(&other.when) - // This would make the events odered from largets to the smallest in the heap. - // However, we want the events to be ordered from the smallets to the largest. + // This would make the events ordered from largest to the smallest in the heap. + // However, we want the events to be ordered from the smallest to the largest. // As a consequence of this, we swap the comparison terms, converting the heap // from a max-heap into a min-heap. other.when.cmp(&self.when) diff --git a/examples/README.md b/examples/README.md index 0d38e32185..bd846a14f9 100644 --- a/examples/README.md +++ b/examples/README.md @@ -213,7 +213,7 @@ Declares a liveliness token on a given key expression (`group1/zenoh-rs` by default). This token will be seen alive byt the `z_get_liveliness` and `z_sub_liveliness` until - user explicitely drops the token by pressing `'d'` or implicitely dropped by terminating + user explicitly drops the token by pressing `'d'` or implicitly dropped by terminating or killing the `z_liveliness` example. Typical usage: @@ -245,7 +245,7 @@ liveliness tokens being dropped) that match a given key expression (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. - Note: the `z_sub_liveliness` example will not receive informations about + Note: the `z_sub_liveliness` example will not receive information about matching liveliness tokens that were alive before it's start. Typical usage: diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 542cff3b6d..3601680bf8 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -51,7 +51,7 @@ async fn main() -> Result<(), zenoh::Error> { Err(_) => { tokio::time::sleep(Duration::from_millis(100)).await; println!( - "Afer failing allocation the GC collected: {} bytes -- retrying", + "After failing allocation the GC collected: {} bytes -- retrying", shm.garbage_collect() ); println!( @@ -67,7 +67,7 @@ async fn main() -> Result<(), zenoh::Error> { let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); - // Retrive a mutable slice from the SharedMemoryBuf. + // Retrieve a mutable slice from the SharedMemoryBuf. // // This operation is marked unsafe since we cannot guarantee a single mutable reference // across multiple processes. Thus if you use it, and you'll inevitable have to use it, @@ -93,7 +93,7 @@ async fn main() -> Result<(), zenoh::Error> { let freed = shm.garbage_collect(); println!("The Gargabe collector freed {freed} bytes"); let defrag = shm.defragment(); - println!("De-framented {defrag} bytes"); + println!("De-fragmented {defrag} bytes"); } // Dropping the SharedMemoryBuf means to free it. drop(sbuf); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 2a3511b0bf..7e2018b846 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -51,8 +51,8 @@ impl Stats { } fn print_round(&self) { let elapsed = self.round_start.elapsed().as_secs_f64(); - let throughtput = (self.round_size as f64) / elapsed; - println!("{throughtput} msg/s"); + let throughput = (self.round_size as f64) / elapsed; + println!("{throughput} msg/s"); } } impl Drop for Stats { @@ -62,8 +62,8 @@ impl Drop for Stats { }; let elapsed = global_start.elapsed().as_secs_f64(); let total = self.round_size * self.finished_rounds + self.round_count; - let throughtput = total as f64 / elapsed; - println!("Received {total} messages over {elapsed:.2}s: {throughtput}msg/s"); + let throughput = total as f64 / elapsed; + println!("Received {total} messages over {elapsed:.2}s: {throughput}msg/s"); } } diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index 59848b95c1..94d79739bf 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -275,7 +275,7 @@ impl LinkManagerMulticastUdp { .map_err(|e| zerror!("{}: {}", mcast_addr, e))?; } } - IpAddr::V6(src_ip6) => bail!("{}: unexepcted IPv6 source address", src_ip6), + IpAddr::V6(src_ip6) => bail!("{}: unexpected IPv6 source address", src_ip6), }, IpAddr::V6(dst_ip6) => { // Join default multicast group diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 090ef0a340..8964955140 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -81,12 +81,12 @@ impl Invitation { } async fn expect(expected_suffix: u32, pipe: &mut PipeR) -> ZResult<()> { - let recived_suffix = Self::receive(pipe).await?; - if recived_suffix != expected_suffix { + let received_suffix = Self::receive(pipe).await?; + if received_suffix != expected_suffix { bail!( "Suffix mismatch: expected {} got {}", expected_suffix, - recived_suffix + received_suffix ) } Ok(()) @@ -244,7 +244,7 @@ async fn handle_incoming_connections( // read invitation from the request channel let suffix = Invitation::receive(request_channel).await?; - // gererate uplink and downlink names + // generate uplink and downlink names let (dedicated_downlink_path, dedicated_uplink_path) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); @@ -252,10 +252,10 @@ async fn handle_incoming_connections( let mut dedicated_downlink = PipeW::new(&dedicated_downlink_path).await?; let mut dedicated_uplink = PipeR::new(&dedicated_uplink_path, access_mode).await?; - // confirm over the dedicated chanel + // confirm over the dedicated channel Invitation::confirm(suffix, &mut dedicated_downlink).await?; - // got confirmation over the dedicated chanel + // got confirmation over the dedicated channel Invitation::expect(suffix, &mut dedicated_uplink).await?; // create Locators @@ -353,7 +353,7 @@ async fn create_pipe( // generate random suffix let suffix: u32 = rand::thread_rng().gen(); - // gererate uplink and downlink names + // generate uplink and downlink names let (path_downlink, path_uplink) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); // try create uplink and downlink pipes to ensure that the selected suffix is available @@ -390,7 +390,7 @@ impl UnicastPipeClient { // listener owns the request channel, so failure of this call means that there is nobody listening on the provided endpoint let mut request_channel = PipeW::new(&path_uplink).await?; - // create dedicated channel prerequisities. The creation code also ensures that nobody else would use the same channel concurrently + // create dedicated channel prerequisites. The creation code also ensures that nobody else would use the same channel concurrently let ( mut dedicated_downlink, dedicated_suffix, @@ -398,10 +398,10 @@ impl UnicastPipeClient { dedicated_uplink_path, ) = dedicate_pipe(&path_uplink, &path_downlink, access_mode).await?; - // invite the listener to our dedicated channel over the requet channel + // invite the listener to our dedicated channel over the request channel Invitation::send(dedicated_suffix, &mut request_channel).await?; - // read responce that should be sent over the dedicated channel, confirming that everything is OK + // read response that should be sent over the dedicated channel, confirming that everything is OK // on the listener's side and it is already working with the dedicated channel Invitation::expect(dedicated_suffix, &mut dedicated_downlink).await?; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index fa1c2d9d0f..1d95af7eab 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -368,7 +368,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { e })?; - // Update the endpoint with the acutal local path + // Update the endpoint with the actual local path endpoint = EndPoint::new( endpoint.protocol(), local_path_str, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 4139a65a05..efae776980 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -327,7 +327,7 @@ impl WBatch { }) .map_err(|_| zerror!("Compression error"))?; - // Verify wether the resulting compressed data is smaller than the initial input + // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { Ok(Finalize::Buffer) } else { diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index e3a4068b2d..37351596c3 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -250,7 +250,7 @@ impl StageIn { // Treat all messages as non-droppable once we start fragmenting batch = zgetbatch_rets!(true, tch.sn.set(sn).unwrap()); - // Serialize the message fragmnet + // Serialize the message fragment match batch.encode((&mut reader, &mut fragment)) { Ok(_) => { // Update the SN diff --git a/io/zenoh-transport/src/common/seq_num.rs b/io/zenoh-transport/src/common/seq_num.rs index f286d14741..ecbfd8a944 100644 --- a/io/zenoh-transport/src/common/seq_num.rs +++ b/io/zenoh-transport/src/common/seq_num.rs @@ -57,7 +57,7 @@ impl SeqNum { /// - 16_386 (i.e., 2^14) /// - 2_097_152 (i.e., 2^21) /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(value: TransportSn, resolution: Bits) -> ZResult { @@ -179,7 +179,7 @@ impl SeqNumGenerator { /// As a consequence of wire zenoh's representation of sequence numbers /// this should be a multiple of 7. /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(initial_sn: TransportSn, resolution: Bits) -> ZResult { diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index a52a35af83..2657f5cbd4 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -74,7 +74,7 @@ use zenoh_task::TaskController; /// .lease(Duration::from_secs(1)) /// .keep_alive(4) // Send a KeepAlive every 250 ms /// .accept_timeout(Duration::from_secs(1)) -/// .accept_pending(10) // Set to 10 the number of simultanous pending incoming transports +/// .accept_pending(10) // Set to 10 the number of simultaneous pending incoming transports /// .max_sessions(5); // Allow max 5 transports open /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 4927c179d7..6d662f2873 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -63,7 +63,7 @@ impl TransportMulticastInner { || join.ext_qos.is_some() != peer.is_qos() { let e = format!( - "Ingoring Join on {} of peer: {}. Inconsistent parameters.", + "Ignoring Join on {} of peer: {}. Inconsistent parameters.", peer.locator, peer.zid, ); tracing::debug!("{}", e); @@ -81,7 +81,7 @@ impl TransportMulticastInner { ) -> ZResult<()> { if zread!(self.peers).len() >= self.manager.config.multicast.max_sessions { tracing::debug!( - "Ingoring Join on {} from peer: {}. Max sessions reached: {}.", + "Ignoring Join on {} from peer: {}. Max sessions reached: {}.", locator, join.zid, self.manager.config.multicast.max_sessions, @@ -91,7 +91,7 @@ impl TransportMulticastInner { if join.version != self.manager.config.version { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", + "Ignoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", locator, join.zid, join.version, @@ -102,7 +102,7 @@ impl TransportMulticastInner { if join.resolution != self.manager.config.resolution { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", locator, join.zid, join.resolution, @@ -113,7 +113,7 @@ impl TransportMulticastInner { if join.batch_size != batch_size { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", locator, join.zid, join.batch_size, @@ -124,7 +124,7 @@ impl TransportMulticastInner { if !self.manager.config.multicast.is_qos && join.ext_qos.is_some() { tracing::debug!( - "Ingoring Join on {} from peer: {}. QoS is not supported.", + "Ignoring Join on {} from peer: {}. QoS is not supported.", locator, join.zid, ); diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index ce9229db4d..a901aba6ec 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -429,7 +429,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Verify that the cookie is the one we sent if input.cookie_nonce != cookie.nonce { - let e = zerror!("Rejecting OpenSyn on: {}. Unkwown cookie.", self.link); + let e = zerror!("Rejecting OpenSyn on: {}. Unknown cookie.", self.link); return Err((e.into(), Some(close::reason::INVALID))); } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 899887bea0..708d0d39e9 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -95,7 +95,7 @@ pub struct TransportManagerParamsUnicast { pub struct TransportManagerBuilderUnicast { // NOTE: In order to consider eventual packet loss and transmission latency and jitter, // set the actual keep_alive timeout to one fourth of the lease time. - // This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + // This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity // check which considers a link as failed when no messages are received in 3.5 times the // target interval. pub(super) lease: Duration, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 33cfbceb17..4ddacef6bc 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -41,7 +41,7 @@ use zenoh_transport::{ TransportPeerEventHandler, }; -// These keys and certificates below are purposedly generated to run TLS and mTLS tests. +// These keys and certificates below are purposely generated to run TLS and mTLS tests. // // With 2 way authentication (mTLS), using TLS 1.3, we need two pairs of keys and certificates: one // for the "server" and another one for the "client". diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index d3ddbd43cc..ca97e4791f 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -87,7 +87,7 @@ impl Default for ReplicaConfig { // This will determine the time upto which replicas might be diverged // This can be different for each replica if not used to compute hot and warm publication_interval: Duration::from_secs(5), - // This indicates the uncertainity due to the network + // This indicates the uncertainty due to the network // The messages might still be in transit in the network propagation_delay: Duration::from_millis(200), // This is the chunk that you would like your data to be divide into in time. diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 1660d83c3d..f185aaa259 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -102,7 +102,7 @@ //! //! async fn put(&mut self, key: Option, value: Value, timestamp: Timestamp) -> ZResult { //! // the key will be None if it exactly matched with the strip_prefix -//! // create a storge specific special structure to store it +//! // create a storage specific special structure to store it //! // Store the data with timestamp //! // @TODO: //! // store (key, value, timestamp) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 48d200ffb5..b17e4dcb98 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -224,7 +224,7 @@ impl StorageRuntimeInner { config.volume_id ); // let _ = async_std::task::block_on(storage.send(StorageMessage::Stop)); - let _ = storage.send(StorageMessage::Stop); // TODO: was previosuly spawning a task. do we need that? + let _ = storage.send(StorageMessage::Stop); // TODO: was previously spawning a task. do we need that? } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 4131471977..63352fab0a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -295,7 +295,7 @@ impl StorageService { && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { tracing::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{}` identified as needed processing for key {}", sample, k ); diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index 6d9ac35fe9..b9dbb455ab 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping pluign is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! //! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. //! -//! Dynamic pluign is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/vtable.rs b/plugins/zenoh-plugin-trait/src/vtable.rs index e1108f87f1..74c7479c3e 100644 --- a/plugins/zenoh-plugin-trait/src/vtable.rs +++ b/plugins/zenoh-plugin-trait/src/vtable.rs @@ -48,7 +48,7 @@ impl PluginVTable { /// This macro adds non-mangled functions which provides plugin version and loads it into the host. /// If plugin library should work also as static, consider calling this macro under feature condition /// -/// The funcitons declared by this macro are: +/// The functions declared by this macro are: /// /// - `get_plugin_loader_version` - returns `PLUGIN_LOADER_VERSION` const of the crate. The [`PluginsManager`](crate::manager::PluginsManager) /// will check if this version is compatible with the host. diff --git a/zenoh-ext/examples/examples/README.md b/zenoh-ext/examples/examples/README.md index 892bded1cb..498a1ca6fe 100644 --- a/zenoh-ext/examples/examples/README.md +++ b/zenoh-ext/examples/examples/README.md @@ -17,7 +17,7 @@ ### z_pub_cache - Declares a publisher and an assiciated publication cache with a given key expression. + Declares a publisher and an associated publication cache with a given key expression. All the publications are locally cached (with a configurable history size - i.e. max number of cached data per resource). The cache can be queried by a QueryingSubscriber at startup (see next example). Typical usage: diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 58eb7962c9..982829f845 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -59,7 +59,7 @@ struct Args { /// The number of publications to keep in cache. history: usize, #[arg(short = 'o', long)] - /// Set `complete` option to true. This means that this queryable is ulitmate data source, no need to scan other queryables. + /// Set `complete` option to true. This means that this queryable is ultimate data source, no need to scan other queryables. complete: bool, #[arg(short = 'x', long)] /// An optional queryable prefix. diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index a1442fa5c4..5d19964d19 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -//! To manage groups and group memeberships +//! To manage groups and group memberships use flume::{Receiver, Sender}; use futures::prelude::*; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 344fe99d37..431ccd2dde 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -202,7 +202,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cached content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr.as_str().contains('*') { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4d97670e1e..3c738b7da4 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -96,7 +96,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle self.callback(locked(callback)) } - /// Use the given handler to recieve Samples. + /// Use the given handler to receive Samples. #[inline] pub fn with( self, @@ -585,9 +585,9 @@ where } } -/// A Subscriber that will run the given user defined `fetch` funtion at startup. +/// A Subscriber that will run the given user defined `fetch` function at startup. /// -/// The user defined `fetch` funtion should fetch some samples and return them through the callback funtion +/// The user defined `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// @@ -726,7 +726,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { /// Perform an additional `fetch`. /// - /// The provided `fetch` funtion should fetch some samples and return them through the callback funtion + /// The provided `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// /// # Examples diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 192a0a3121..5a9c05972f 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -49,8 +49,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -133,8 +133,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -246,8 +246,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the fetching liveliness subscriber is to retrieve existing liveliness tokens while susbcribing to diff --git a/zenoh/src/info.rs b/zenoh/src/info.rs index 3e0efdf134..36910c666e 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/info.rs @@ -154,7 +154,7 @@ impl<'a> AsyncResolve for PeersZidBuilder<'a> { } /// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows -/// to access informations about the current zenoh [`Session`](crate::Session). +/// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples /// ``` diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index b8837ba31e..c3117561cb 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -81,7 +81,7 @@ pub(crate) enum KeyExprInner<'a> { /// A possibly-owned version of [`keyexpr`] that may carry optimisations for use with a [`Session`] that may have declared it. /// -/// Check [`keyexpr`]'s documentation for detailed explainations of the Key Expression Language. +/// Check [`keyexpr`]'s documentation for detailed explanations of the Key Expression Language. #[repr(transparent)] #[derive(Clone, serde::Deserialize, serde::Serialize)] #[serde(from = "OwnedKeyExpr")] @@ -230,7 +230,7 @@ impl<'a> KeyExpr<'a> { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -264,7 +264,7 @@ impl<'a> KeyExpr<'a> { /// Performs string concatenation and returns the result as a [`KeyExpr`] if possible. /// - /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierachical separation it inserts. + /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierarchical separation it inserts. pub fn concat + ?Sized>(&self, s: &S) -> ZResult> { let s = s.as_ref(); self._concat(s) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3693218291..e8db68b790 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -45,7 +45,7 @@ //! ``` //! //! ### Subscribe -//! The example below shows how to consume values for a key expresison. +//! The example below shows how to consume values for a key expressions. //! ```no_run //! use futures::prelude::*; //! use zenoh::prelude::r#async::*; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 42f10517ea..6ec4bbf735 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -194,7 +194,7 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { pub(crate) fn update_query_routes_from(tables: &mut Tables, res: &mut Arc) { update_query_routes(tables, res); let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { + for child in res.children.values_mut() { update_query_routes_from(tables, child); } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 88c6908028..34f1229137 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -173,7 +173,7 @@ pub struct Resource { pub(crate) parent: Option>, pub(crate) suffix: String, pub(crate) nonwild_prefix: Option<(Arc, String)>, - pub(crate) childs: HashMap>, + pub(crate) children: HashMap>, pub(crate) context: Option, pub(crate) session_ctxs: HashMap>, } @@ -208,7 +208,7 @@ impl Resource { parent: Some(parent.clone()), suffix: String::from(suffix), nonwild_prefix, - childs: HashMap::new(), + children: HashMap::new(), context, session_ctxs: HashMap::new(), } @@ -282,7 +282,7 @@ impl Resource { parent: None, suffix: String::from(""), nonwild_prefix: None, - childs: HashMap::new(), + children: HashMap::new(), context: None, session_ctxs: HashMap::new(), }) @@ -292,7 +292,7 @@ impl Resource { let mut resclone = res.clone(); let mutres = get_mut_unchecked(&mut resclone); if let Some(ref mut parent) = mutres.parent { - if Arc::strong_count(res) <= 3 && res.childs.is_empty() { + if Arc::strong_count(res) <= 3 && res.children.is_empty() { // consider only childless resource held by only one external object (+ 1 strong count for resclone, + 1 strong count for res.parent to a total of 3 ) tracing::debug!("Unregister resource {}", res.expr()); if let Some(context) = mutres.context.as_mut() { @@ -309,7 +309,7 @@ impl Resource { } mutres.nonwild_prefix.take(); { - get_mut_unchecked(parent).childs.remove(&res.suffix); + get_mut_unchecked(parent).children.remove(&res.suffix); } Resource::clean(parent); } @@ -318,11 +318,11 @@ impl Resource { pub fn close(self: &mut Arc) { let r = get_mut_unchecked(self); - for c in r.childs.values_mut() { + for c in r.children.values_mut() { Self::close(c); } r.parent.take(); - r.childs.clear(); + r.children.clear(); r.nonwild_prefix.take(); r.session_ctxs.clear(); } @@ -331,7 +331,7 @@ impl Resource { pub fn print_tree(from: &Arc) -> String { let mut result = from.expr(); result.push('\n'); - for child in from.childs.values() { + for child in from.children.values() { result.push_str(&Resource::print_tree(child)); } result @@ -351,7 +351,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -360,7 +360,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -376,7 +376,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -385,7 +385,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -405,7 +405,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -418,7 +418,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -516,7 +516,7 @@ impl Resource { ) -> WireExpr<'a> { if checkchilds && !suffix.is_empty() { let (chunk, rest) = suffix.split_at(suffix.find('/').unwrap_or(suffix.len())); - if let Some(child) = prefix.childs.get(chunk) { + if let Some(child) = prefix.children.get(chunk) { return get_best_key_(child, rest, sid, true); } } @@ -550,7 +550,7 @@ impl Resource { if from.context.is_some() { matches.push(Arc::downgrade(from)); } - for child in from.childs.values() { + for child in from.children.values() { recursive_push(child, matches) } } @@ -560,7 +560,7 @@ impl Resource { matches: &mut Vec>, ) { if from.parent.is_none() || from.suffix == "/" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches); } return; @@ -582,12 +582,12 @@ impl Resource { matches.push(Arc::downgrade(from)); } if suffix.as_bytes() == b"**" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches) } } if let Some(child) = - from.childs.get("/**").or_else(|| from.childs.get("**")) + from.children.get("/**").or_else(|| from.children.get("**")) { if child.context.is_some() { matches.push(Arc::downgrade(child)) @@ -599,7 +599,7 @@ impl Resource { Some(rest) => { let recheck_keyexpr_one_level_lower = chunk.as_bytes() == b"**" || suffix.as_bytes() == b"**"; - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(rest, child, matches); if recheck_keyexpr_one_level_lower { get_matches_from(key_expr, child, matches) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index d75c8faf1f..808acef23f 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -148,11 +148,11 @@ impl HatTables { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs); - queries::queries_tree_change(&mut tables, &new_childs); + pubsub::pubsub_tree_change(&mut tables, &new_children); + queries::queries_tree_change(&mut tables, &new_children); tracing::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 16844643c4..7d6e3d2850 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -101,7 +101,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -152,7 +152,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -890,12 +890,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -929,7 +930,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -967,22 +968,22 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index f0f8b77111..232e241670 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -37,16 +37,16 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -135,10 +135,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -274,15 +274,15 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -344,10 +344,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -499,10 +499,10 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { } } -pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec]) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { +pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec]) { + // propagate subs to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -518,10 +518,10 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, face: &Arc) } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -203,10 +203,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -330,15 +330,15 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -401,10 +401,10 @@ fn propagate_forget_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -557,10 +557,10 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { } } -pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec]) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { +pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec]) { + // propagate qabls to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -571,10 +571,10 @@ pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec hat_mut!(tables) .routers_net .as_mut() @@ -277,8 +277,8 @@ impl HatTables { }; tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); + pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); + queries::queries_tree_change(&mut tables, &new_children, net_type); tracing::trace!("Computations completed"); match net_type { diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 5089ce9893..e8e3a56aaf 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -100,7 +100,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -151,7 +151,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -893,12 +893,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -932,7 +933,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -970,23 +971,23 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } #[inline] diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index b7d00227c0..e8c6cb4e6a 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -37,16 +37,16 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -153,10 +153,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -341,15 +341,15 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -453,10 +453,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -710,12 +710,12 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn pubsub_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate subs to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -737,10 +737,10 @@ pub(super) fn pubsub_tree_change( reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, }; - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - tree_childs, + tree_children, res, None, &sub_info, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 28ff0800db..76ddba7235 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -188,16 +188,16 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -285,10 +285,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -471,15 +471,15 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -584,10 +584,10 @@ fn propagate_forget_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -932,12 +932,12 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links pub(super) fn queries_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate qabls to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -954,10 +954,10 @@ pub(super) fn queries_tree_change( _ => &res_hat!(res).peer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - tree_childs, + tree_children, res, qabl_info, None, diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index cda132e806..34c59ac07d 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -149,11 +149,11 @@ impl InterceptorTrait for DownsamplingInterceptor { return None; } } else { - tracing::debug!("unxpected cache ID {}", id); + tracing::debug!("unexpected cache ID {}", id); } } } else { - tracing::debug!("unxpected cache type {:?}", ctx.full_expr()); + tracing::debug!("unexpected cache type {:?}", ctx.full_expr()); } } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2a8ec088b8..0ba661c8f1 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -620,7 +620,7 @@ fn metrics(context: &AdminContext, query: Query) { .unwrap(); #[allow(unused_mut)] let mut metrics = format!( - r#"# HELP zenoh_build Informations about zenoh. + r#"# HELP zenoh_build Information about zenoh. # TYPE zenoh_build gauge zenoh_build{{version="{}"}} 1 "#, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 364891460a..c3f8815a50 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -362,7 +362,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { .state .router .new_transport_multicast(transport.clone())?; - Ok(Arc::new(RuntimeMuticastGroup { + Ok(Arc::new(RuntimeMulticastGroup { runtime: runtime.clone(), transport, slave_handlers, @@ -419,20 +419,20 @@ impl TransportPeerEventHandler for RuntimeSession { } } -pub(super) struct RuntimeMuticastGroup { +pub(super) struct RuntimeMulticastGroup { pub(super) runtime: Runtime, pub(super) transport: TransportMulticast, pub(super) slave_handlers: Vec>, } -impl TransportMulticastEventHandler for RuntimeMuticastGroup { +impl TransportMulticastEventHandler for RuntimeMulticastGroup { fn new_peer(&self, peer: TransportPeer) -> ZResult> { let slave_handlers: Vec> = self .slave_handlers .iter() .filter_map(|handler| handler.new_peer(peer.clone()).ok()) .collect(); - Ok(Arc::new(RuntimeMuticastSession { + Ok(Arc::new(RuntimeMulticastSession { main_handler: self .runtime .state @@ -459,12 +459,12 @@ impl TransportMulticastEventHandler for RuntimeMuticastGroup { } } -pub(super) struct RuntimeMuticastSession { +pub(super) struct RuntimeMulticastSession { pub(super) main_handler: Arc, pub(super) slave_handlers: Vec>, } -impl TransportPeerEventHandler for RuntimeMuticastSession { +impl TransportPeerEventHandler for RuntimeMulticastSession { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { self.main_handler.handle_message(msg) } diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 75ad7bdf99..798a3fc694 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -83,7 +83,7 @@ impl Runtime { } } } else { - bail!("No peer specified and multicast scouting desactivated!") + bail!("No peer specified and multicast scouting deactivated!") } } _ => self.connect_peers(&peers, true).await, @@ -335,10 +335,10 @@ impl Runtime { pub(crate) async fn update_peers(&self) -> ZResult<()> { let peers = { self.state.config.lock().connect().endpoints().clone() }; - let tranports = self.manager().get_transports_unicast().await; + let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { - for transport in tranports { + for transport in transports { let should_close = if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() @@ -361,7 +361,7 @@ impl Runtime { } } else { for peer in peers { - if !tranports.iter().any(|transport| { + if !transports.iter().any(|transport| { if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index f5e65f0bdc..bc889d720e 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -638,13 +638,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check @@ -672,13 +672,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr2".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check @@ -706,13 +706,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/**"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/**".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/**"); // mapping strategy check @@ -740,13 +740,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check @@ -774,13 +774,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/z2_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/plugins/sealed.rs index a3bfdc3aac..8bfc1f1dab 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/plugins/sealed.rs @@ -100,14 +100,14 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" /// as they doesn't match the query. - /// * `Err(ZError)`: Problem occured when processing the query. + /// * `Err(ZError)`: Problem occurred when processing the query. /// /// If plugin implements subplugins (as the storage plugin), then it should also reply with information about its subplugins with the same rules. /// /// TODO: /// * add example - /// * rework the admin space: rework "with_extented_string" function, provide it as utility for plugins - /// * reorder paramaters: plugin_status_key should be first as it describes the root of pluginb's admin space + /// * rework the admin space: rework "with_extended_string" function, provide it as utility for plugins + /// * reorder parameters: plugin_status_key should be first as it describes the root of pluginb's admin space /// * Instead of ZResult return just Vec. Check, do we really need ZResult? If yes, make it separate for each status record. /// fn adminspace_getter<'a>( diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f5af22d0e7..9373fa021d 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -281,7 +281,7 @@ impl<'a> Publisher<'a> { /// pointer to it (`Arc`). This is equivalent to `Arc::new(Publisher)`. /// /// This is useful to share ownership of the `Publisher` between several threads - /// and tasks. It also alows to create [`MatchingListener`] with static + /// and tasks. It also allows to create [`MatchingListener`] with static /// lifetime that can be moved to several threads and tasks. /// /// Note: the given zenoh `Publisher` will be undeclared when the last reference to diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 4bef4bca1d..ae9cd78469 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -55,7 +55,7 @@ pub(crate) struct DataInfo { pub qos: QoS, } -/// Informations on the source of a zenoh [`Sample`]. +/// Information on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { @@ -565,7 +565,7 @@ impl QoS { self } - /// Sets express flag vlaue. + /// Sets express flag value. pub fn with_express(mut self, is_express: bool) -> Self { self.inner.set_is_express(is_express); self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..a5f761a323 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -161,7 +161,7 @@ impl<'a> Selector<'a> { selector.push('&') } use std::fmt::Write; - write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallibe. + write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallible. } pub fn remove_time_range(&mut self) { @@ -328,7 +328,7 @@ pub trait Parameters<'a> { where ::Item: Parameter; - /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arrise. + /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arise. fn decode_into_map(&'a self) -> ZResult> where ::Item: Parameter, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 368cded243..0763018c75 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -435,7 +435,7 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also alows to create [`Subscriber`](Subscriber) and + /// and tasks. It also allows to create [`Subscriber`](Subscriber) and /// [`Queryable`](Queryable) with static lifetime that can be moved to several /// threads and tasks /// @@ -552,7 +552,7 @@ impl Session { /// The returned configuration [`Notifier`](Notifier) can be used to read the current /// zenoh configuration through the `get` function or /// modify the zenoh configuration through the `insert`, - /// or `insert_json5` funtion. + /// or `insert_json5` function. /// /// # Examples /// ### Read current zenoh configuration @@ -1686,7 +1686,7 @@ impl Session { } } Err(err) => { - tracing::error!("Received Data for unkown key_expr: {}", err); + tracing::error!("Received Data for unknown key_expr: {}", err); return; } } @@ -1920,7 +1920,7 @@ impl Session { ) } Err(err) => { - error!("Received Query for unkown key_expr: {}", err); + error!("Received Query for unknown key_expr: {}", err); return; } } @@ -2167,7 +2167,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received DeclareSubscriber for unkown wire_expr: {}", + "Received DeclareSubscriber for unknown wire_expr: {}", err ) } @@ -2204,7 +2204,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received Forget Subscriber for unkown key_expr: {}", + "Received Forget Subscriber for unknown key_expr: {}", err ) } @@ -2327,7 +2327,7 @@ impl Primitives for Session { callback(new_reply); } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2336,7 +2336,7 @@ impl Primitives for Session { let key_expr = match state.remote_key_to_expr(&msg.wire_expr) { Ok(key) => key.into_owned(), Err(e) => { - error!("Received ReplyData for unkown key_expr: {}", e); + error!("Received ReplyData for unknown key_expr: {}", e); return; } }; @@ -2488,7 +2488,7 @@ impl Primitives for Session { } } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2513,7 +2513,7 @@ impl Primitives for Session { } } None => { - warn!("Received ResponseFinal for unkown Request: {}", msg.rid); + warn!("Received ResponseFinal for unknown Request: {}", msg.rid); } } } @@ -2680,7 +2680,7 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` #[zenoh_macros::unstable] fn liveliness(&'s self) -> Liveliness<'a>; - /// Get informations about the zenoh [`Session`](Session). + /// Get information about the zenoh [`Session`](Session). /// /// # Examples /// ``` diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index dc53120fff..c4ecd6cbd4 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -81,7 +81,7 @@ pub(crate) struct SubscriberInner<'a> { /// A [`PullMode`] subscriber that provides data through a callback. /// -/// CallbackPullSubscribers only provide data when explicitely pulled by the +/// CallbackPullSubscribers only provide data when explicitly pulled by the /// application with the [`pull`](CallbackPullSubscriber::pull) function. /// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, @@ -671,7 +671,7 @@ pub struct Subscriber<'a, Receiver> { /// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// -/// PullSubscribers only provide data when explicitely pulled by the +/// PullSubscribers only provide data when explicitly pulled by the /// application with the [`pull`](PullSubscriber::pull) function. /// PullSubscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 91614fe430..234cb50454 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -133,7 +133,7 @@ fn retry_config_const_period() { } #[test] -fn retry_config_infinit_period() { +fn retry_config_infinite_period() { let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 8d83d6a10b..be479756b3 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -320,7 +320,7 @@ impl Recipe { // node_task_tracker.close(); // node_task_tracker.wait().await; - // Close the session once all the task assoicated with the node are done. + // Close the session once all the task associated with the node are done. Arc::try_unwrap(session) .unwrap() .close() diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 471b78380b..850676d905 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -131,7 +131,7 @@ fn config_from_args(args: &Args) -> Config { if let Some(id) = &args.id { config.set_id(id.parse().unwrap()).unwrap(); } - // apply '--rest-http-port' to config only if explicitly set (overwritting config), + // apply '--rest-http-port' to config only if explicitly set (overwriting config), // or if no config file is set (to apply its default value) if args.rest_http_port.is_some() || args.config.is_none() { let value = args.rest_http_port.as_deref().unwrap_or("8000"); From 528b87a666c4fee5ad40c08f629f3ba204418f2e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 10 Jun 2024 12:43:13 +0200 Subject: [PATCH 05/29] Start link tx_task before notifying router (#1098) --- .../src/unicast/lowlatency/transport.rs | 6 ++++-- io/zenoh-transport/src/unicast/manager.rs | 13 +++++++++---- .../src/unicast/transport_unicast_inner.rs | 10 ++++++++-- .../src/unicast/universal/transport.rs | 10 +++++++--- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index cac2e1c4c2..2c52df4810 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -246,17 +246,19 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { drop(guard); // create a callback to start the link - let start_link = Box::new(move || { + let start_tx = Box::new(move || { // start keepalive task let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; self.start_keepalive(keep_alive); + }); + let start_rx = Box::new(move || { // start RX task self.internal_start_rx(other_lease); }); - Ok((start_link, ack)) + Ok((start_tx, start_rx, ack)) } /*************************************/ diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 708d0d39e9..1423fec900 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -444,7 +444,7 @@ impl TransportManager { } // Add the link to the transport - let (start_tx_rx, ack) = transport + let (start_tx, start_rx, ack) = transport .add_link(link, other_initial_sn, other_lease) .await .map_err(InitTransportError::Link)?; @@ -456,10 +456,12 @@ impl TransportManager { .await .map_err(|e| InitTransportError::Transport((e, c_t, close::reason::GENERIC)))?; + start_tx(); + // notify transport's callback interface that there is a new link Self::notify_new_link_unicast(&transport, c_link); - start_tx_rx(); + start_rx(); Ok(transport) } @@ -548,7 +550,8 @@ impl TransportManager { }; // Add the link to the transport - let (start_tx_rx, ack) = match t.add_link(link, other_initial_sn, other_lease).await { + let (start_tx, start_rx, ack) = match t.add_link(link, other_initial_sn, other_lease).await + { Ok(val) => val, Err(e) => { let _ = t.close(e.2).await; @@ -575,6 +578,8 @@ impl TransportManager { guard.insert(config.zid, t.clone()); drop(guard); + start_tx(); + // Notify manager's interface that there is a new transport transport_error!( self.notify_new_transport_unicast(&t), @@ -584,7 +589,7 @@ impl TransportManager { // Notify transport's callback interface that there is a new link Self::notify_new_link_unicast(&t, c_link); - start_tx_rx(); + start_rx(); zcondfeat!( "shared-memory", diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index 789a2fe79d..1e3389ff75 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -36,8 +36,14 @@ pub(crate) enum InitTransportError { Transport(TransportError), } -pub(crate) type AddLinkResult<'a> = - Result<(Box, MaybeOpenAck), LinkError>; +pub(crate) type AddLinkResult<'a> = Result< + ( + Box, + Box, + MaybeOpenAck, + ), + LinkError, +>; pub(crate) type InitTransportResult = Result, InitTransportError>; /*************************************/ diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 1004381a44..aa14a64bda 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -292,17 +292,21 @@ impl TransportUnicastTrait for TransportUnicastUniversal { // create a callback to start the link let transport = self.clone(); - let start_link = Box::new(move || { + let mut c_link = link.clone(); + let c_transport = transport.clone(); + let start_tx = Box::new(move || { // Start the TX loop let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; - link.start_tx(transport.clone(), consumer, keep_alive); + c_link.start_tx(c_transport, consumer, keep_alive); + }); + let start_rx = Box::new(move || { // Start the RX loop link.start_rx(transport, other_lease); }); - Ok((start_link, ack)) + Ok((start_tx, start_rx, ack)) } /*************************************/ From d8e66decbe8bd7a1981579fd49ea333c0acd41ff Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 10 Jun 2024 15:56:34 +0200 Subject: [PATCH 06/29] Fix typos (#1110) --- commons/zenoh-util/src/std_only/time_range.rs | 2 +- io/zenoh-links/zenoh-link-udp/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 886083b2f0..bfbb4e3303 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -283,7 +283,7 @@ impl TimeExpr { }), } } - /// Subtracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible + /// Subtracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subtracting the duration is not possible /// because the result would be outside the bounds of the underlying data structure (see [`SystemTime::checked_sub`]). /// Otherwise returns `Some(time_expr)`. pub fn checked_sub(&self, duration: f64) -> Option { diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index b520973df8..31ca32e71b 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -41,7 +41,7 @@ use zenoh_result::{zerror, ZResult}; /// /// # Note /// -/// The theoretical Maximum Transmission Unit (MTU) of UDP is `u16::MAX`. From that we substract the +/// The theoretical Maximum Transmission Unit (MTU) of UDP is `u16::MAX`. From that we subtract the /// size of a UDP header (8 bytes) and the size of IPv4/IPv6 headers (resp. 20 and 40 bytes). /// /// Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via IPv6 From 9d0974289ade917ffe56079479e1ddd598eda494 Mon Sep 17 00:00:00 2001 From: JLer Date: Tue, 11 Jun 2024 19:51:44 +0800 Subject: [PATCH 07/29] bump quinn & rustls (#1086) * bump quinn & rustls * fix ci windows check * add comments --- Cargo.lock | 206 +++++++++--------- Cargo.toml | 10 +- io/zenoh-links/zenoh-link-quic/Cargo.toml | 10 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 1 - io/zenoh-links/zenoh-link-quic/src/unicast.rs | 40 ++-- io/zenoh-links/zenoh-link-quic/src/utils.rs | 158 ++++++-------- io/zenoh-links/zenoh-link-quic/src/verify.rs | 42 ---- io/zenoh-links/zenoh-link-tls/src/utils.rs | 20 ++ 8 files changed, 231 insertions(+), 256 deletions(-) delete mode 100644 io/zenoh-links/zenoh-link-quic/src/verify.rs diff --git a/Cargo.lock b/Cargo.lock index 36078d0238..8aee8b7638 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -528,9 +528,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -628,6 +628,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "0.1.10" @@ -753,6 +759,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "concurrent-queue" version = "2.2.0" @@ -825,9 +841,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -835,9 +851,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" @@ -1817,6 +1833,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "js-sys" version = "0.3.64" @@ -2139,7 +2175,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "libc", "memoffset 0.9.0", @@ -2292,9 +2328,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -2314,7 +2350,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -2695,16 +2731,16 @@ dependencies = [ [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "904e3d3ba178131798c6d9375db2b13b34337d489b089fc5ba0825a2ff1bee73" dependencies = [ "bytes", "pin-project-lite 0.2.13", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.7", + "rustls", "thiserror", "tokio", "tracing", @@ -2712,16 +2748,16 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +checksum = "e974563a4b1c2206bbc61191ca4da9c22e4308b4c455e8906751cc7828393f08" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.16.20", + "ring 0.17.6", "rustc-hash", - "rustls 0.21.7", - "rustls-native-certs 0.6.3", + "rustls", + "rustls-platform-verifier", "slab", "thiserror", "tinyvec", @@ -2730,15 +2766,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +checksum = "e4f0def2590301f4f667db5a77f9694fb004f82796dc1a8b1508fafa3d0e8b72" dependencies = [ - "bytes", "libc", + "once_cell", "socket2 0.5.6", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3014,7 +3050,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.4", - "bitflags 2.4.2", + "bitflags 2.5.0", "serde", "serde_derive", ] @@ -3097,7 +3133,7 @@ version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno 0.3.8", "libc", "linux-raw-sys 0.4.13", @@ -3106,42 +3142,19 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" -dependencies = [ - "log", - "ring 0.16.20", - "rustls-webpki 0.101.5", - "sct", -] - -[[package]] -name = "rustls" -version = "0.22.4" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" dependencies = [ "log", + "once_cell", "ring 0.17.6", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.3", - "schannel", - "security-framework", -] - [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -3176,25 +3189,42 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] -name = "rustls-webpki" -version = "0.101.5" +name = "rustls-platform-verifier" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" + [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.6", "rustls-pki-types", @@ -3255,16 +3285,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -3277,22 +3297,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", + "num-bigint", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -4027,21 +4048,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.7", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.4", + "rustls", "rustls-pki-types", "tokio", ] @@ -5112,8 +5123,8 @@ dependencies = [ "base64 0.21.4", "flume", "futures", - "rustls 0.22.4", - "rustls-webpki 0.102.2", + "rustls", + "rustls-webpki", "serde", "tokio", "tokio-util", @@ -5137,14 +5148,13 @@ dependencies = [ "base64 0.21.4", "futures", "quinn", - "rustls 0.21.7", - "rustls-native-certs 0.7.0", - "rustls-pemfile 1.0.3", + "rustls", + "rustls-pemfile 2.0.0", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "secrecy", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util", "tracing", "webpki-roots", @@ -5203,13 +5213,13 @@ dependencies = [ "async-trait", "base64 0.21.4", "futures", - "rustls 0.22.4", + "rustls", "rustls-pemfile 2.0.0", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "secrecy", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls", "tokio-util", "tracing", "webpki-roots", diff --git a/Cargo.toml b/Cargo.toml index 9036521c78..174f3efb04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,7 +122,7 @@ petgraph = "0.6.3" pnet = "0.34" pnet_datalink = "0.34" proc-macro2 = "1.0.51" -quinn = "0.10.1" +quinn = "0.11.1" quote = "1.0.23" rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates rand_chacha = "0.3.1" @@ -132,7 +132,11 @@ ron = "0.8.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" rustc_version = "0.4.0" -rustls = "0.22.2" +rustls = { version = "0.23.9", default-features = false, features = [ + "logging", + "tls12", + "ring", +] } rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" rustls-webpki = "0.102.0" @@ -155,7 +159,7 @@ token-cell = { version = "1.4.2", default-features = false } tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements tokio-util = "0.7.10" tokio-tungstenite = "0.21" -tokio-rustls = "0.25.0" +tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) console-subscriber = "0.2" typenum = "1.16.0" diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 0e1c720d78..d86e75847b 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -29,8 +29,9 @@ async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } quinn = { workspace = true } -rustls-native-certs = { workspace = true } -rustls-pki-types = { workspace = true } +rustls = { workspace = true } +rustls-pemfile = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } tokio = { workspace = true, features = [ @@ -40,6 +41,7 @@ tokio = { workspace = true, features = [ "sync", "time", ] } +tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } tracing = { workspace = true } webpki-roots = { workspace = true } @@ -51,7 +53,3 @@ zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } -# Lock due to quinn not supporting rustls 0.22 yet -rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } -tokio-rustls = "0.24.1" -rustls-pemfile = { version = "1" } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 0c9bc7365e..deed695ace 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -26,7 +26,6 @@ use zenoh_result::ZResult; mod unicast; mod utils; -mod verify; pub use unicast::*; pub use utils::TlsConfigurator as QuicConfigurator; diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 452fd8a122..8d4b82c339 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -13,11 +13,11 @@ // use crate::{ - config::*, utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, }; use async_trait::async_trait; +use quinn::crypto::rustls::{QuicClientConfig, QuicServerConfig}; use std::fmt; use std::net::IpAddr; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; @@ -68,7 +68,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { tracing::trace!("Closing QUIC link: {}", self); // Flush the QUIC stream let mut guard = zasynclock!(self.send); - if let Err(e) = guard.finish().await { + if let Err(e) = guard.finish() { tracing::trace!("Error closing QUIC stream {}: {}", self, e); } self.connection.close(quinn::VarInt::from_u32(0), &[0]); @@ -206,15 +206,6 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { let addr = get_quic_addr(&epaddr).await?; - let server_name_verification: bool = epconf - .get(TLS_SERVER_NAME_VERIFICATION) - .unwrap_or(TLS_SERVER_NAME_VERIFICATION_DEFAULT) - .parse()?; - - if !server_name_verification { - tracing::warn!("Skipping name verification of servers"); - } - // Initialize the QUIC connection let mut client_crypto = TlsClientConfig::new(&epconf) .await @@ -230,9 +221,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { }; let mut quic_endpoint = quinn::Endpoint::client(SocketAddr::new(ip_addr, 0)) .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new( - client_crypto.client_config, - ))); + + let quic_config: QuicClientConfig = client_crypto + .client_config + .try_into() + .map_err(|e| zerror!("Can not create a new QUIC link bound to {host}: {e}"))?; + quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(quic_config))); let src_addr = quic_endpoint .local_addr() @@ -276,8 +270,22 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .map_err(|e| zerror!("Cannot create a new QUIC listener on {addr}: {e}"))?; server_crypto.server_config.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); - let mut server_config = - quinn::ServerConfig::with_crypto(Arc::new(server_crypto.server_config)); + + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + + let quic_config: QuicServerConfig = server_crypto + .server_config + .try_into() + .map_err(|e| zerror!("Can not create a new QUIC listener on {addr}: {e}"))?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_config)); // We do not accept unidireactional streams. Arc::get_mut(&mut server_config.transport) diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index 40367599cb..bba5b41787 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -12,16 +12,13 @@ // ZettaScale Zenoh Team, // use crate::config::*; -use crate::verify::WebPkiVerifierAnyServerName; -use rustls::OwnedTrustAnchor; use rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, PrivateKey, - RootCertStore, ServerConfig, + pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, + server::WebPkiClientVerifier, + version::TLS13, + ClientConfig, RootCertStore, ServerConfig, }; -use rustls_pki_types::{CertificateDer, TrustAnchor}; use secrecy::ExposeSecret; -use zenoh_link_commons::ConfigurationInspector; -// use rustls_pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}; use std::fs::File; use std::io; use std::net::SocketAddr; @@ -31,6 +28,7 @@ use std::{ }; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; +use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::endpoint::{self, Address}; use zenoh_result::{bail, zerror, ZError, ZResult}; @@ -160,40 +158,45 @@ impl TlsServerConfig { let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .map_err(|err| zerror!("Error processing server certificate: {err}."))? - .into_iter() - .map(Certificate) - .collect(); + .collect::>() + .map_err(|err| zerror!("Error processing server certificate: {err}."))?; - let mut keys: Vec = + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { bail!("No private key found for TLS server."); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let sc = if tls_server_client_auth { let root_cert_store = load_trust_anchors(config)?.map_or_else( || { @@ -203,17 +206,13 @@ impl TlsServerConfig { }, Ok, )?; - let client_auth = AllowAnyAuthenticatedClient::new(root_cert_store); - ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])? - .with_client_cert_verifier(Arc::new(client_auth)) + let client_auth = WebPkiClientVerifier::builder(root_cert_store.into()).build()?; + ServerConfig::builder_with_protocol_versions(&[&TLS13]) + .with_client_cert_verifier(client_auth) .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? } else { ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? @@ -271,68 +270,60 @@ impl TlsClientConfig { // Allows mixed user-generated CA and webPKI CA tracing::debug!("Loading default Web PKI certificates."); let mut root_cert_store = RootCertStore { - roots: webpki_roots::TLS_SERVER_ROOTS - .iter() - .map(|ta| ta.to_owned()) - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|nc| nc.to_vec()), - ) - }) - .collect(), + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), }; if let Some(custom_root_cert) = load_trust_anchors(config)? { tracing::debug!("Loading user-generated certificates."); - root_cert_store.roots.extend(custom_root_cert.roots); + root_cert_store.extend(custom_root_cert.roots); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let cc = if tls_client_server_auth { tracing::debug!("Loading client authentication key and certificate..."); let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .map_err(|err| zerror!("Error processing client certificate: {err}."))? - .into_iter() - .map(Certificate) - .collect(); + .collect::>() + .map_err(|err| zerror!("Error processing client certificate: {err}."))?; - let mut keys: Vec = + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { bail!("No private key found for TLS client."); } - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])?; + let builder = ClientConfig::builder_with_protocol_versions(&[&TLS13]); if tls_server_name_verification { builder @@ -340,6 +331,7 @@ impl TlsClientConfig { .with_client_auth_cert(certs, keys.remove(0)) } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -347,17 +339,14 @@ impl TlsClientConfig { } .map_err(|e| zerror!("Bad certificate/key: {}", e))? } else { - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])?; - + let builder = ClientConfig::builder(); if tls_server_name_verification { builder .with_root_certificates(root_cert_store) .with_no_client_auth() } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -388,30 +377,19 @@ impl TlsClientConfig { } } -fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult>> { let certs: Vec = rustls_pemfile::certs(pem) - .map_err(|err| zerror!("Error processing PEM certificates: {err}."))? - .into_iter() - .map(CertificateDer::from) - .collect(); + .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) + .collect::, ZError>>()?; - let trust_anchors: Vec = certs + let trust_anchors: Vec = certs .into_iter() .map(|cert| { anchor_from_trusted_cert(&cert) .map_err(|err| zerror!("Error processing trust anchor: {err}.")) .map(|trust_anchor| trust_anchor.to_owned()) }) - .collect::, ZError>>()? - .into_iter() - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|nc| nc.to_vec()), - ) - }) - .collect(); + .collect::, ZError>>()?; Ok(trust_anchors) } @@ -472,7 +450,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { let mut pem = BufReader::new(value.as_bytes()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } @@ -480,14 +458,14 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { let certificate_pem = base64_decode(b64_certificate)?; let mut pem = BufReader::new(certificate_pem.as_slice()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { let mut pem = BufReader::new(File::open(filename)?); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } Ok(None) diff --git a/io/zenoh-links/zenoh-link-quic/src/verify.rs b/io/zenoh-links/zenoh-link-quic/src/verify.rs deleted file mode 100644 index baa7864246..0000000000 --- a/io/zenoh-links/zenoh-link-quic/src/verify.rs +++ /dev/null @@ -1,42 +0,0 @@ -use rustls::client::verify_server_cert_signed_by_trust_anchor; -use rustls::server::ParsedCertificate; -use std::time::SystemTime; -use tokio_rustls::rustls::{ - client::{ServerCertVerified, ServerCertVerifier}, - Certificate, RootCertStore, ServerName, -}; - -impl ServerCertVerifier for WebPkiVerifierAnyServerName { - /// Will verify the certificate is valid in the following ways: - /// - Signed by a trusted `RootCertStore` CA - /// - Not Expired - fn verify_server_cert( - &self, - end_entity: &Certificate, - intermediates: &[Certificate], - _server_name: &ServerName, - _scts: &mut dyn Iterator, - _ocsp_response: &[u8], - now: SystemTime, - ) -> Result { - let cert = ParsedCertificate::try_from(end_entity)?; - verify_server_cert_signed_by_trust_anchor(&cert, &self.roots, intermediates, now)?; - Ok(ServerCertVerified::assertion()) - } -} - -/// `ServerCertVerifier` that verifies that the server is signed by a trusted root, but allows any serverName -/// see the trait impl for more information. -pub struct WebPkiVerifierAnyServerName { - roots: RootCertStore, -} - -#[allow(unreachable_pub)] -impl WebPkiVerifierAnyServerName { - /// Constructs a new `WebPkiVerifierAnyServerName`. - /// - /// `roots` is the set of trust anchors to trust for issuing server certs. - pub fn new(roots: RootCertStore) -> Self { - Self { roots } - } -} diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index f62757523c..1c78cd93b3 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -188,6 +188,16 @@ impl TlsServerConfig { bail!("No private key found for TLS server."); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let sc = if tls_server_client_auth { let root_cert_store = load_trust_anchors(config)?.map_or_else( || { @@ -269,6 +279,16 @@ impl TlsClientConfig { root_cert_store.extend(custom_root_cert.roots); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let cc = if tls_client_server_auth { tracing::debug!("Loading client authentication key and certificate..."); let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; From ed6c636a84c00552ca7f7c07d075d5783a40de44 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 12 Jun 2024 16:06:22 +0200 Subject: [PATCH 08/29] Fix interface name scanning when listening on IP unspecified for TCP/TLS/QUIC/WS (#1123) Co-authored-by: Julien Enoch --- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 9 +++++++++ io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 9 +++++++++ io/zenoh-links/zenoh-link-tls/src/unicast.rs | 11 +++++++++++ io/zenoh-links/zenoh-link-udp/src/unicast.rs | 4 ++++ io/zenoh-links/zenoh-link-ws/src/unicast.rs | 9 +++++++++ 5 files changed, 42 insertions(+) diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 8d4b82c339..8dde380577 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -387,7 +387,16 @@ async fn accept_task( } }; + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match quic_conn.local_ip() { + Some(ip) => SocketAddr::new(ip, src_addr.port()), + None => { + tracing::debug!("Can not accept QUIC connection: empty local IP"); + continue; + } + }; let dst_addr = quic_conn.remote_address(); + tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastQuic::new( diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 3ef4f235ed..c07d6f15b9 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -409,6 +409,15 @@ async fn accept_task( res = accept(&socket) => { match res { Ok((stream, dst_addr)) => { + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TCP connection: {}", e); + continue; + } + }; + tracing::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastTcp::new(stream, src_addr, dst_addr)); diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index b12608354e..8776e0ae40 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -372,6 +372,15 @@ async fn accept_task( res = accept(&socket) => { match res { Ok((tcp_stream, dst_addr)) => { + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match tcp_stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TLS connection: {}", e); + continue; + } + }; + // Accept the TLS connection let tls_stream = match acceptor.accept(tcp_stream).await { Ok(stream) => TlsStream::Server(stream), @@ -382,6 +391,8 @@ async fn accept_task( } }; + + tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 1fa9f9a7f4..fba3e23b69 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -498,6 +498,10 @@ async fn accept_read_task( tracing::trace!("Ready to accept UDP connections on: {:?}", src_addr); + if src_addr.ip().is_unspecified() { + tracing::warn!("Interceptors (e.g. Access Control, Downsampling) are not guaranteed to work on UDP when listening on 0.0.0.0 or [::]. Their usage is discouraged. See https://github.com/eclipse-zenoh/zenoh/issues/1126."); + } + loop { // Buffers for deserialization let mut buff = zenoh_buffers::vec::uninit(UDP_MAX_MTU as usize); diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index e94e4b6868..f1aa0088f0 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -498,6 +498,15 @@ async fn accept_task( _ = token.cancelled() => break, }; + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TCP connection: {}", e); + continue; + } + }; + tracing::debug!( "Accepted TCP (WebSocket) connection on {:?}: {:?}", src_addr, From 8160b019a06bcd4a6cabcd114dbb901f65e2dc93 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 13 Jun 2024 18:12:42 +0200 Subject: [PATCH 09/29] Enable releasing from any branch (#1136) --- .github/workflows/release.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 25553c2b0a..f8e614fa12 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,6 +27,10 @@ on: type: string description: Release number required: false + branch: + type: string + description: Release branch + required: false jobs: tag: @@ -42,6 +46,7 @@ jobs: repo: ${{ github.repository }} live-run: ${{ inputs.live-run || false }} version: ${{ inputs.version }} + branch: ${{ inputs.branch }} github-token: ${{ secrets.BOT_TOKEN_WORKFLOW }} - uses: eclipse-zenoh/ci/bump-crates@main From 7adad9482f8d27fab56ca54aa053de4cc2f63557 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 14 Jun 2024 11:48:29 +0200 Subject: [PATCH 10/29] Fix cargo clippy (#1145) --- commons/zenoh-buffers/src/slice.rs | 6 ++-- commons/zenoh-config/src/lib.rs | 6 +++- .../src/keyexpr_tree/arc_tree.rs | 28 +++++++++++++++---- .../src/keyexpr_tree/box_tree.rs | 2 +- .../src/keyexpr_tree/traits/mod.rs | 6 ++-- commons/zenoh-protocol/src/core/encoding.rs | 20 +++++++++---- 6 files changed, 50 insertions(+), 18 deletions(-) diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index a652c6930e..9b5d72ca51 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -76,7 +76,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(lhs) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; // SAFETY: this operation is safe since we check if len is non-zero. Ok(unsafe { NonZeroUsize::new_unchecked(len) }) @@ -98,7 +98,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(lhs) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; Ok(()) } @@ -122,7 +122,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(s) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(s) }; NonZeroUsize::new(len).ok_or(DidntWrite) } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index c54d75a82a..7732a5bbc0 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -1086,7 +1086,11 @@ impl PluginsConfig { for next in split { match remove_from { Value::Object(o) => match o.get_mut(current) { - Some(v) => unsafe { remove_from = std::mem::transmute(v) }, + Some(v) => { + remove_from = unsafe { + std::mem::transmute::<&mut serde_json::Value, &mut serde_json::Value>(v) + } + } None => bail!("{:?} has no {} property", o, current), }, Value::Array(a) => { diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index dfb7e68261..fe2640c604 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -159,8 +159,11 @@ where } // tags{ketree.arc.node.mut} fn node_mut(&'a self, token: &'a mut Token, at: &keyexpr) -> Option { - self.node(unsafe { core::mem::transmute(&*token) }, at) - .map(|(node, _)| (node, token)) + self.node( + unsafe { core::mem::transmute::<&Token, &Token>(&*token) }, + at, + ) + .map(|(node, _)| (node, token)) } // tags{ketree.arc.node.or_create} fn node_or_create(&'a self, token: &'a mut Token, at: &keyexpr) -> Self::NodeMut { @@ -236,7 +239,9 @@ where fn tree_iter_mut(&'a self, token: &'a mut Token) -> Self::TreeIterMut { let inner = ketree_borrow(&self.inner, token); TokenPacker { - iter: TreeIter::new(unsafe { core::mem::transmute(&inner.children) }), + iter: TreeIter::new(unsafe { + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children) + }), token, } } @@ -288,7 +293,12 @@ where let inner = ketree_borrow(&self.inner, token); if inner.wildness.get() || key.is_wild() { IterOrOption::Iter(TokenPacker { - iter: Intersection::new(unsafe { core::mem::transmute(&inner.children) }, key), + iter: Intersection::new( + unsafe { + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children) + }, + key, + ), token, }) } else { @@ -340,7 +350,10 @@ where if inner.wildness.get() || key.is_wild() { unsafe { IterOrOption::Iter(TokenPacker { - iter: Inclusion::new(core::mem::transmute(&inner.children), key), + iter: Inclusion::new( + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children), + key, + ), token, }) } @@ -393,7 +406,10 @@ where if inner.wildness.get() || key.is_wild() { unsafe { IterOrOption::Iter(TokenPacker { - iter: Includer::new(core::mem::transmute(&inner.children), key), + iter: Includer::new( + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children), + key, + ), token, }) } diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index 67777aaa90..aed873f51a 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -157,7 +157,7 @@ where if !node.children.is_empty() { node.weight.take() } else { - let chunk = unsafe { core::mem::transmute::<_, &keyexpr>(node.chunk()) }; + let chunk = unsafe { core::mem::transmute::<&keyexpr, &keyexpr>(node.chunk()) }; match node.parent { None => &mut self.children, Some(parent) => unsafe { &mut (*parent.as_ptr()).children }, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index cee2bd9162..ed3f36a371 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -54,8 +54,10 @@ pub trait IKeyExprTree<'a, Weight> { Self::TreeIterItem: AsNode>, { self.tree_iter().filter_map(|node| { - unsafe { core::mem::transmute::<_, Option<&Weight>>(node.as_node().weight()) } - .map(|w| (node.as_node().keyexpr(), w)) + unsafe { + core::mem::transmute::, Option<&Weight>>(node.as_node().weight()) + } + .map(|w| (node.as_node().keyexpr(), w)) }) } diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index b0b089d9b3..8132729c58 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -89,7 +89,7 @@ impl TryFrom for KnownEncoding { type Error = ZError; fn try_from(value: u8) -> Result { if value < consts::MIMES.len() as u8 + 1 { - Ok(unsafe { mem::transmute(value) }) + Ok(unsafe { mem::transmute::(value) }) } else { Err(zerror!("Unknown encoding")) } @@ -213,9 +213,14 @@ impl From<&'static str> for Encoding { for (i, v) in consts::MIMES.iter().enumerate().skip(1) { if let Some(suffix) = s.strip_prefix(v) { if suffix.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); + return Encoding::Exact(unsafe { + mem::transmute::(i as u8) + }); } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, suffix.into()); + return Encoding::WithSuffix( + unsafe { mem::transmute::(i as u8) }, + suffix.into(), + ); } } } @@ -233,9 +238,14 @@ impl From for Encoding { if s.starts_with(v) { s.replace_range(..v.len(), ""); if s.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); + return Encoding::Exact(unsafe { + mem::transmute::(i as u8) + }); } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, s.into()); + return Encoding::WithSuffix( + unsafe { mem::transmute::(i as u8) }, + s.into(), + ); } } } From 93f93d2d67f25886a25e83922a534694c2135669 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 17 Jun 2024 14:23:12 +0200 Subject: [PATCH 11/29] Release tables locks before propagating subscribers and queryables declarations to void dead locks (#1150) * Send simple sub and qabl declarations using a given function * Send simple sub and qabl declarations after releasing tables lock * Send simple sub and qabl declarations after releasing tables lock (missing places) --- zenoh/src/net/primitives/demux.rs | 24 +- zenoh/src/net/routing/dispatcher/face.rs | 25 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 21 +- zenoh/src/net/routing/dispatcher/queries.rs | 15 +- zenoh/src/net/routing/dispatcher/tables.rs | 8 +- zenoh/src/net/routing/hat/client/mod.rs | 25 +- zenoh/src/net/routing/hat/client/pubsub.rs | 132 ++++--- zenoh/src/net/routing/hat/client/queries.rs | 117 +++--- .../src/net/routing/hat/linkstate_peer/mod.rs | 33 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 185 ++++++---- .../net/routing/hat/linkstate_peer/queries.rs | 177 +++++---- zenoh/src/net/routing/hat/mod.rs | 21 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 25 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 132 ++++--- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 117 +++--- zenoh/src/net/routing/hat/router/mod.rs | 71 +++- zenoh/src/net/routing/hat/router/pubsub.rs | 335 ++++++++++------- zenoh/src/net/routing/hat/router/queries.rs | 344 +++++++++++------- zenoh/src/net/routing/router.rs | 22 +- zenoh/src/net/tests/tables.rs | 12 + 20 files changed, 1194 insertions(+), 647 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index d62e410c81..fe096a9dfe 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -72,9 +72,21 @@ impl TransportPeerEventHandler for DeMux { NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), NetworkBody::OAM(m) => { if let Some(transport) = self.transport.as_ref() { + let mut declares = vec![]; let ctrl_lock = zlock!(self.face.tables.ctrl_lock); let mut tables = zwrite!(self.face.tables.tables); - ctrl_lock.handle_oam(&mut tables, &self.face.tables, m, transport)? + ctrl_lock.handle_oam( + &mut tables, + &self.face.tables, + m, + transport, + &mut |p, m| declares.push((p.clone(), m)), + )?; + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } } } @@ -89,9 +101,17 @@ impl TransportPeerEventHandler for DeMux { fn closing(&self) { self.face.send_close(); if let Some(transport) = self.transport.as_ref() { + let mut declares = vec![]; let ctrl_lock = zlock!(self.face.tables.ctrl_lock); let mut tables = zwrite!(self.face.tables.tables); - let _ = ctrl_lock.closing(&mut tables, &self.face.tables, transport); + let _ = ctrl_lock.closing(&mut tables, &self.face.tables, transport, &mut |p, m| { + declares.push((p.clone(), m)) + }); + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index f2def1d20a..4df9b7054c 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -195,6 +195,7 @@ impl Primitives for Face { unregister_expr(&self.tables, &mut self.state.clone(), m.id); } zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { + let mut declares = vec![]; declare_subscription( ctrl_lock.as_ref(), &self.tables, @@ -202,18 +203,30 @@ impl Primitives for Face { &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { + let mut declares = vec![]; undeclare_subscription( ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { + let mut declares = vec![]; declare_queryable( ctrl_lock.as_ref(), &self.tables, @@ -221,16 +234,27 @@ impl Primitives for Face { &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { + let mut declares = vec![]; undeclare_queryable( ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), @@ -238,7 +262,6 @@ impl Primitives for Face { zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), } - drop(ctrl_lock); } #[inline] diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 6ec4bbf735..5ac1f60627 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -14,7 +14,7 @@ use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; +use crate::net::routing::hat::{HatTrait, SendDeclare}; use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; @@ -37,6 +37,7 @@ pub(crate) fn declare_subscription( expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { tracing::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); @@ -66,7 +67,14 @@ pub(crate) fn declare_subscription( (res, wtables) }; - hat_code.declare_subscription(&mut wtables, face, &mut res, sub_info, node_id); + hat_code.declare_subscription( + &mut wtables, + face, + &mut res, + sub_info, + node_id, + send_declare, + ); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -96,6 +104,7 @@ pub(crate) fn undeclare_subscription( face: &mut Arc, expr: &WireExpr, node_id: NodeId, + send_declare: &mut SendDeclare, ) { tracing::debug!("Undeclare subscription {}", face); let rtables = zread!(tables.tables); @@ -105,7 +114,13 @@ pub(crate) fn undeclare_subscription( drop(rtables); let mut wtables = zwrite!(tables.tables); - hat_code.undeclare_subscription(&mut wtables, face, &mut res, node_id); + hat_code.undeclare_subscription( + &mut wtables, + face, + &mut res, + node_id, + send_declare, + ); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 719a3834d6..9de841949c 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -15,7 +15,7 @@ use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; use super::tables::NodeId; use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; +use crate::net::routing::hat::{HatTrait, SendDeclare}; use crate::net::routing::RoutingContext; use async_trait::async_trait; use std::collections::HashMap; @@ -56,6 +56,7 @@ pub(crate) fn declare_queryable( expr: &WireExpr, qabl_info: &QueryableInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { tracing::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); @@ -85,7 +86,14 @@ pub(crate) fn declare_queryable( (res, wtables) }; - hat_code.declare_queryable(&mut wtables, face, &mut res, qabl_info, node_id); + hat_code.declare_queryable( + &mut wtables, + face, + &mut res, + qabl_info, + node_id, + send_declare, + ); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -112,6 +120,7 @@ pub(crate) fn undeclare_queryable( face: &mut Arc, expr: &WireExpr, node_id: NodeId, + send_declare: &mut SendDeclare, ) { let rtables = zread!(tables.tables); match rtables.get_mapping(face, &expr.scope, expr.mapping) { @@ -120,7 +129,7 @@ pub(crate) fn undeclare_queryable( drop(rtables); let mut wtables = zwrite!(tables.tables); - hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id); + hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id, send_declare); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 2d5eb436e7..9e71eee853 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -174,7 +174,13 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { tracing::debug!("Close {}", face); face.task_controller.terminate_all(Duration::from_secs(10)); finalize_pending_queries(tables, &mut face); - zlock!(tables.ctrl_lock).close_face(tables, &mut face); + let mut declares = vec![]; + let ctrl_lock = zlock!(tables.ctrl_lock); + ctrl_lock.close_face(tables, &mut face, &mut |p, m| declares.push((p.clone(), m))); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } None => tracing::error!("Face already closed!"), } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index c19faf39f8..8e8d8d4cb6 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -36,7 +36,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use std::{ any::Any, @@ -97,9 +97,10 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -109,13 +110,19 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, _transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -139,7 +146,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -167,7 +174,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -229,6 +236,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, _oam: Oam, _transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { Ok(()) } @@ -248,6 +256,7 @@ impl HatBaseTrait for HatCode { _tables: &mut Tables, _tables_ref: &Arc, _transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { Ok(()) } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index fb92ae614d..3f194e4e56 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -17,7 +17,7 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; +use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; @@ -40,6 +40,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) @@ -48,19 +49,22 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } @@ -69,6 +73,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -76,7 +81,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -126,12 +138,13 @@ fn declare_client_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, &propa_sub_info, face, send_declare); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -168,22 +181,29 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } } @@ -193,6 +213,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -202,7 +223,7 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } if client_subs.len() == 1 { let face = &mut client_subs[0]; @@ -210,18 +231,21 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } @@ -231,11 +255,16 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_subscription(tables, face, res); + undeclare_client_subscription(tables, face, res, send_declare); } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, @@ -247,7 +276,14 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + send_declare, + ); } } } @@ -260,8 +296,9 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, res, sub_info, send_declare); } fn undeclare_subscription( @@ -270,8 +307,9 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, res: &mut Arc, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - forget_client_subscription(tables, face, res); + forget_client_subscription(tables, face, res, send_declare); } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 3576148aaf..445f618845 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -17,7 +17,7 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; +use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -79,6 +79,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables.faces.values().cloned(); for mut dst_face in faces { @@ -94,19 +95,22 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -143,9 +147,10 @@ fn declare_client_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfo, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, res, qabl_info); - propagate_simple_queryable(tables, res, Some(face)); + propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] @@ -162,22 +167,29 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -188,6 +200,7 @@ pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -199,26 +212,29 @@ pub(super) fn undeclare_client_queryable( let mut client_qabls = client_qabls(res); if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } else { - propagate_simple_queryable(tables, res, None); + propagate_simple_queryable(tables, res, None, send_declare); } if client_qabls.len() == 1 { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -229,11 +245,16 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_queryable(tables, face, res); + undeclare_client_queryable(tables, face, res, send_declare); } -pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + _face: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables .faces .values() @@ -241,7 +262,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .collect::>>() { for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + propagate_simple_queryable(tables, qabl, Some(&mut face.clone()), send_declare); } } } @@ -258,8 +279,9 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -268,8 +290,9 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, res: &mut Arc, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - forget_client_queryable(tables, face, res); + forget_client_queryable(tables, face, res, send_declare); } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 808acef23f..ad4e1667f0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -27,7 +27,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::{ net::{ @@ -213,9 +213,10 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -225,6 +226,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = if face.state.whatami != WhatAmI::Client { if let Some(net) = hat_mut!(tables).peers_net.as_mut() { @@ -237,8 +239,8 @@ impl HatBaseTrait for HatCode { }; face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); if face.state.whatami != WhatAmI::Client { hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -246,7 +248,12 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -270,7 +277,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -298,7 +305,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -360,6 +367,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -376,8 +384,8 @@ impl HatBaseTrait for HatCode { let changes = net.link_states(list.link_states, zid); for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node(tables, &removed_node.zid); - queries_remove_node(tables, &removed_node.zid); + pubsub_remove_node(tables, &removed_node.zid, send_declare); + queries_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -409,6 +417,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { @@ -419,8 +428,8 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid); - queries_remove_node(tables, &removed_node.zid); + pubsub_remove_node(tables, &removed_node.zid, send_declare); + queries_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 232e241670..80a8eff95d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; +use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; @@ -85,6 +85,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !face_hat!(dst_face).local_subs.contains(res) @@ -92,19 +93,22 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } @@ -113,6 +117,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -120,7 +125,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -167,6 +179,7 @@ fn register_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription @@ -182,7 +195,7 @@ fn register_peer_subscription( if tables.whatami == WhatAmI::Peer { // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); } } @@ -192,8 +205,9 @@ fn declare_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer); + register_peer_subscription(tables, face, res, sub_info, peer, send_declare); } fn register_client_subscription( @@ -242,12 +256,13 @@ fn declare_client_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_peer_subscription(tables, face, res, &propa_sub_info, zid); + register_peer_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } #[inline] @@ -313,22 +328,29 @@ fn send_forget_sourced_subscription_to_net_children( } } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } } @@ -369,7 +391,12 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_subscription( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohId, + send_declare: &mut SendDeclare, +) { tracing::debug!( "Unregister peer subscription {} (peer: {})", res.expr(), @@ -383,7 +410,7 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe .retain(|sub| !Arc::ptr_eq(sub, res)); if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } } } @@ -393,9 +420,10 @@ fn undeclare_peer_subscription( face: Option<&Arc>, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { if res_hat!(res).peer_subs.contains(peer) { - unregister_peer_subscription(tables, res, peer); + unregister_peer_subscription(tables, res, peer, send_declare); propagate_forget_sourced_subscription(tables, res, face, peer); } } @@ -405,14 +433,16 @@ fn forget_peer_subscription( face: &mut Arc, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer); + undeclare_peer_subscription(tables, Some(face), res, peer, send_declare); } pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -423,7 +453,7 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); let peer_subs = remote_peer_subs(tables, res); if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + undeclare_peer_subscription(tables, None, res, &tables.zid.clone(), send_declare); } if client_subs.len() == 1 && !peer_subs { let face = &mut client_subs[0]; @@ -431,18 +461,21 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } @@ -453,11 +486,16 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_subscription(tables, face, res); + undeclare_client_subscription(tables, face, res, send_declare); } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, @@ -467,24 +505,31 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).peer_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { +pub(super) fn pubsub_remove_node( + tables: &mut Tables, + node: &ZenohId, + send_declare: &mut SendDeclare, +) { for mut res in hat!(tables) .peer_subs .iter() @@ -492,7 +537,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { .cloned() .collect::>>() { - unregister_peer_subscription(tables, &mut res, node); + unregister_peer_subscription(tables, &mut res, node, send_declare); update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) @@ -579,13 +624,14 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer) + declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, res, sub_info, send_declare) } } @@ -595,13 +641,14 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer); + forget_peer_subscription(tables, face, res, &peer, send_declare); } } else { - forget_client_subscription(tables, face, res); + forget_client_subscription(tables, face, res, send_declare); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index a1dd01d903..dfe729e4a3 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; +use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -162,6 +162,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables.faces.values().cloned(); for mut dst_face in faces { @@ -175,19 +176,22 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -235,6 +239,7 @@ fn register_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -251,7 +256,7 @@ fn register_peer_queryable( if tables.whatami == WhatAmI::Peer { // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); + propagate_simple_queryable(tables, res, face, send_declare); } } @@ -261,9 +266,10 @@ fn declare_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { let face = Some(face); - register_peer_queryable(tables, face, res, qabl_info, peer); + register_peer_queryable(tables, face, res, qabl_info, peer, send_declare); } fn register_client_queryable( @@ -298,12 +304,13 @@ fn declare_client_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfo, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; - register_peer_queryable(tables, Some(face), res, &local_details, zid); + register_peer_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] @@ -369,22 +376,29 @@ fn send_forget_sourced_queryable_to_net_children( } } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -426,7 +440,12 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_queryable( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohId, + send_declare: &mut SendDeclare, +) { tracing::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); @@ -436,7 +455,7 @@ fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: .retain(|qabl| !Arc::ptr_eq(qabl, res)); if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } } } @@ -446,9 +465,10 @@ fn undeclare_peer_queryable( face: Option<&Arc>, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); + unregister_peer_queryable(tables, res, peer, send_declare); propagate_forget_sourced_queryable(tables, res, face, peer); } } @@ -458,14 +478,16 @@ fn forget_peer_queryable( face: &mut Arc, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer); + undeclare_peer_queryable(tables, Some(face), res, peer, send_declare); } pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -479,28 +501,31 @@ pub(super) fn undeclare_client_queryable( let peer_qabls = remote_peer_qabls(tables, res); if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + undeclare_peer_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); + register_peer_queryable(tables, None, res, &local_info, tables.zid, send_declare); } if client_qabls.len() == 1 && !peer_qabls { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -511,36 +536,48 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_queryable(tables, face, res); + undeclare_client_queryable(tables, face, res, send_declare); } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { if face.whatami == WhatAmI::Client { for qabl in &hat!(tables).peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { +pub(super) fn queries_remove_node( + tables: &mut Tables, + node: &ZenohId, + send_declare: &mut SendDeclare, +) { let mut qabls = vec![]; for res in hat!(tables).peer_qabls.iter() { for qabl in res_hat!(res).peer_qabls.keys() { @@ -550,7 +587,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); + unregister_peer_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res) @@ -644,13 +681,14 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer); + declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare); } } else { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info, send_declare); } } @@ -660,13 +698,14 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer); + forget_peer_queryable(tables, face, res, &peer, send_declare); } } else { - forget_client_queryable(tables, face, res); + forget_client_queryable(tables, face, res, send_declare); } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 82f2a6746e..99e2f175b6 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -23,6 +23,7 @@ use super::{ tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, }, router::RoutesIndexes, + RoutingContext, }; use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; @@ -32,7 +33,7 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, - Oam, + Declare, Oam, }, }; use zenoh_result::ZResult; @@ -64,6 +65,9 @@ impl Sources { } } +pub(crate) type SendDeclare<'a> = dyn FnMut(&Arc, RoutingContext) + + 'a; + pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} pub(crate) trait HatBaseTrait { @@ -80,6 +84,7 @@ pub(crate) trait HatBaseTrait { tables: &mut Tables, tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn new_transport_unicast_face( @@ -88,6 +93,7 @@ pub(crate) trait HatBaseTrait { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn handle_oam( @@ -96,6 +102,7 @@ pub(crate) trait HatBaseTrait { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn map_routing_context( @@ -122,9 +129,15 @@ pub(crate) trait HatBaseTrait { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; - fn close_face(&self, tables: &TablesLock, face: &mut Arc); + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ); } pub(crate) trait HatPubSubTrait { @@ -135,6 +148,7 @@ pub(crate) trait HatPubSubTrait { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn undeclare_subscription( &self, @@ -142,6 +156,7 @@ pub(crate) trait HatPubSubTrait { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)>; @@ -165,6 +180,7 @@ pub(crate) trait HatQueriesTrait { res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn undeclare_queryable( &self, @@ -172,6 +188,7 @@ pub(crate) trait HatQueriesTrait { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)>; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 294932fe24..89270ffe2c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -41,7 +41,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use std::{ any::Any, @@ -138,9 +138,10 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -150,18 +151,24 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if face.state.whatami != WhatAmI::Client { if let Some(net) = hat_mut!(tables).gossip.as_mut() { net.add_link(transport.clone()); } } - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -185,7 +192,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -213,7 +220,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -275,6 +282,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -313,6 +321,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index bbaf0f5bac..175ee8f0ca 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -17,7 +17,7 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; +use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; @@ -40,6 +40,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) @@ -48,19 +49,22 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } @@ -69,6 +73,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -76,7 +81,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -126,12 +138,13 @@ fn declare_client_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, &propa_sub_info, face, send_declare); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -168,22 +181,29 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } } @@ -193,6 +213,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -202,7 +223,7 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } if client_subs.len() == 1 { let face = &mut client_subs[0]; @@ -210,18 +231,21 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } @@ -232,11 +256,16 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_subscription(tables, face, res); + undeclare_client_subscription(tables, face, res, send_declare); } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, @@ -248,7 +277,14 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + send_declare, + ); } } } @@ -261,8 +297,9 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, res, sub_info, send_declare); } fn undeclare_subscription( @@ -271,8 +308,9 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, res: &mut Arc, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - forget_client_subscription(tables, face, res); + forget_client_subscription(tables, face, res, send_declare); } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index aeaee21409..6084164a80 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -17,7 +17,7 @@ use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; +use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -79,6 +79,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables.faces.values().cloned(); for mut dst_face in faces { @@ -94,19 +95,22 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -143,9 +147,10 @@ fn declare_client_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfo, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, res, qabl_info); - propagate_simple_queryable(tables, res, Some(face)); + propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] @@ -162,22 +167,29 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -188,6 +200,7 @@ pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -199,26 +212,29 @@ pub(super) fn undeclare_client_queryable( let mut client_qabls = client_qabls(res); if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } else { - propagate_simple_queryable(tables, res, None); + propagate_simple_queryable(tables, res, None, send_declare); } if client_qabls.len() == 1 { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -229,11 +245,16 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_queryable(tables, face, res); + undeclare_client_queryable(tables, face, res, send_declare); } -pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + _face: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables .faces .values() @@ -241,7 +262,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .collect::>>() { for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + propagate_simple_queryable(tables, qabl, Some(&mut face.clone()), send_declare); } } } @@ -258,8 +279,9 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -268,8 +290,9 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, res: &mut Arc, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - forget_client_queryable(tables, face, res); + forget_client_queryable(tables, face, res, send_declare); } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 571c21bfed..3be278aa02 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -31,7 +31,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::{ net::{ @@ -366,9 +366,10 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -378,6 +379,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = match face.state.whatami { WhatAmI::Router => hat_mut!(tables) @@ -403,8 +405,8 @@ impl HatBaseTrait for HatCode { } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); match face.state.whatami { WhatAmI::Router => { @@ -420,7 +422,12 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -444,7 +451,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -472,7 +479,7 @@ impl HatBaseTrait for HatCode { .drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -534,6 +541,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -554,8 +562,18 @@ impl HatBaseTrait for HatCode { .link_states(list.link_states, zid) .removed_nodes { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -577,11 +595,13 @@ impl HatBaseTrait for HatCode { tables, &removed_node.zid, WhatAmI::Peer, + send_declare, ); queries_remove_node( tables, &removed_node.zid, WhatAmI::Peer, + send_declare, ); } @@ -598,11 +618,13 @@ impl HatBaseTrait for HatCode { tables, &updated_node.zid, &updated_node.links, + send_declare, ); queries_linkstate_change( tables, &updated_node.zid, &updated_node.links, + send_declare, ); } } @@ -650,6 +672,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { @@ -661,8 +684,18 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -683,8 +716,18 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); } hat_mut!(tables).shared_nodes = shared_nodes( diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index e8c6cb4e6a..d6f1f4fbc1 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; +use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; @@ -86,6 +86,7 @@ fn propagate_simple_subscription_to( sub_info: &SubscriberInfo, src_face: &mut Arc, full_peer_net: bool, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) @@ -101,19 +102,22 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } @@ -122,6 +126,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables @@ -137,6 +142,7 @@ fn propagate_simple_subscription( sub_info, src_face, full_peer_net, + send_declare, ); } } @@ -185,6 +191,7 @@ fn register_router_subscription( res: &mut Arc, sub_info: &SubscriberInfo, router: ZenohId, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).router_subs.contains(&router) { // Register router subscription @@ -207,7 +214,7 @@ fn register_router_subscription( } // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); } fn declare_router_subscription( @@ -216,8 +223,9 @@ fn declare_router_subscription( res: &mut Arc, sub_info: &SubscriberInfo, router: ZenohId, + send_declare: &mut SendDeclare, ) { - register_router_subscription(tables, face, res, sub_info, router); + register_router_subscription(tables, face, res, sub_info, router, send_declare); } fn register_peer_subscription( @@ -246,12 +254,13 @@ fn declare_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { register_peer_subscription(tables, face, res, sub_info, peer); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid); + register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } fn register_client_subscription( @@ -300,12 +309,13 @@ fn declare_client_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid); + register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } #[inline] @@ -380,28 +390,39 @@ fn send_forget_sourced_subscription_to_net_children( } } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } } } -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription_to_peers( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { if !hat!(tables).full_net(WhatAmI::Peer) && res_hat!(res).router_subs.len() == 1 && res_hat!(res).router_subs.contains(&tables.zid) @@ -423,18 +444,21 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(&mut face).local_subs.remove(res); } @@ -478,7 +502,12 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { +fn unregister_router_subscription( + tables: &mut Tables, + res: &mut Arc, + router: &ZenohId, + send_declare: &mut SendDeclare, +) { tracing::debug!( "Unregister router subscription {} (router: {})", res.expr(), @@ -494,10 +523,10 @@ fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, if hat_mut!(tables).full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } - propagate_forget_simple_subscription_to_peers(tables, res); + propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } fn undeclare_router_subscription( @@ -505,9 +534,10 @@ fn undeclare_router_subscription( face: Option<&Arc>, res: &mut Arc, router: &ZenohId, + send_declare: &mut SendDeclare, ) { if res_hat!(res).router_subs.contains(router) { - unregister_router_subscription(tables, res, router); + unregister_router_subscription(tables, res, router, send_declare); propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); } } @@ -517,8 +547,9 @@ fn forget_router_subscription( face: &mut Arc, res: &mut Arc, router: &ZenohId, + send_declare: &mut SendDeclare, ) { - undeclare_router_subscription(tables, Some(face), res, router); + undeclare_router_subscription(tables, Some(face), res, router, send_declare); } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { @@ -553,13 +584,14 @@ fn forget_peer_subscription( face: &mut Arc, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { undeclare_peer_subscription(tables, Some(face), res, peer); let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); let peer_subs = remote_peer_subs(tables, res); let zid = tables.zid; if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, res, &zid); + undeclare_router_subscription(tables, None, res, &zid, send_declare); } } @@ -567,6 +599,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -578,9 +611,9 @@ pub(super) fn undeclare_client_subscription( let router_subs = remote_router_subs(tables, res); let peer_subs = remote_peer_subs(tables, res); if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + undeclare_router_subscription(tables, None, res, &tables.zid.clone(), send_declare); } else { - propagate_forget_simple_subscription_to_peers(tables, res); + propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } if client_subs.len() == 1 && !router_subs && !peer_subs { let face = &mut client_subs[0]; @@ -588,18 +621,21 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_subs.remove(res); } @@ -610,11 +646,16 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_subscription(tables, face, res); + undeclare_client_subscription(tables, face, res, send_declare); } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, @@ -624,19 +665,22 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).router_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { @@ -651,25 +695,33 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn pubsub_remove_node( + tables: &mut Tables, + node: &ZenohId, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { match net_type { WhatAmI::Router => { for mut res in hat!(tables) @@ -679,7 +731,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: .cloned() .collect::>>() { - unregister_router_subscription(tables, &mut res, node); + unregister_router_subscription(tables, &mut res, node, send_declare); update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) @@ -697,7 +749,13 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); let peer_subs = remote_peer_subs(tables, &res); if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); + undeclare_router_subscription( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); } update_matches_data_routes(tables, &mut res); @@ -757,7 +815,12 @@ pub(super) fn pubsub_tree_change( update_data_routes_from(tables, &mut tables.root_res.clone()); } -pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn pubsub_linkstate_change( + tables: &mut Tables, + zid: &ZenohId, + links: &[ZenohId], + send_declare: &mut SendDeclare, +) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in &face_hat!(src_face).remote_subs { @@ -791,20 +854,23 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + ), + ); face_hat_mut!(dst_face).local_subs.remove(res); } @@ -816,19 +882,24 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, }; - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: sub_info, + }, + ), + }, + res.expr(), + ), + ); } } } @@ -879,23 +950,24 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - declare_router_subscription(tables, face, res, sub_info, router) + declare_router_subscription(tables, face, res, sub_info, router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer) + declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, res, sub_info, send_declare) } } - _ => declare_client_subscription(tables, face, res, sub_info), + _ => declare_client_subscription(tables, face, res, sub_info, send_declare), } } @@ -905,23 +977,24 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, res, &router) + forget_router_subscription(tables, face, res, &router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer) + forget_peer_subscription(tables, face, res, &peer, send_declare) } } else { - forget_client_subscription(tables, face, res) + forget_client_subscription(tables, face, res, send_declare) } } - _ => forget_client_subscription(tables, face, res), + _ => forget_client_subscription(tables, face, res, send_declare), } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 76ddba7235..ac7840fbe8 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -19,7 +19,7 @@ use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; +use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; @@ -233,6 +233,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); @@ -256,19 +257,22 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -317,6 +321,7 @@ fn register_router_queryable( res: &mut Arc, qabl_info: &QueryableInfo, router: ZenohId, + send_declare: &mut SendDeclare, ) { let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -351,7 +356,7 @@ fn register_router_queryable( } // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); + propagate_simple_queryable(tables, res, face, send_declare); } fn declare_router_queryable( @@ -360,8 +365,9 @@ fn declare_router_queryable( res: &mut Arc, qabl_info: &QueryableInfo, router: ZenohId, + send_declare: &mut SendDeclare, ) { - register_router_queryable(tables, Some(face), res, qabl_info, router); + register_router_queryable(tables, Some(face), res, qabl_info, router, send_declare); } fn register_peer_queryable( @@ -391,12 +397,13 @@ fn declare_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, + send_declare: &mut SendDeclare, ) { let mut face = Some(face); register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); let local_info = local_router_qabl_info(tables, res); let zid = tables.zid; - register_router_queryable(tables, face, res, &local_info, zid); + register_router_queryable(tables, face, res, &local_info, zid, send_declare); } fn register_client_queryable( @@ -431,11 +438,12 @@ fn declare_client_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfo, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; - register_router_queryable(tables, Some(face), res, &local_details, zid); + register_router_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] @@ -510,29 +518,40 @@ fn send_forget_sourced_queryable_to_net_children( } } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } } } -fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable_to_peers( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { if !hat!(tables).full_net(WhatAmI::Peer) && res_hat!(res).router_qabls.len() == 1 && res_hat!(res).router_qabls.contains_key(&tables.zid) @@ -554,18 +573,21 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc }) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(&mut face).local_qabls.remove(res); } @@ -609,7 +631,12 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { +fn unregister_router_queryable( + tables: &mut Tables, + res: &mut Arc, + router: &ZenohId, + send_declare: &mut SendDeclare, +) { tracing::debug!( "Unregister router queryable {} (router: {})", res.expr(), @@ -625,10 +652,10 @@ fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, rou if hat!(tables).full_net(WhatAmI::Peer) { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } - propagate_forget_simple_queryable_to_peers(tables, res); + propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } fn undeclare_router_queryable( @@ -636,9 +663,10 @@ fn undeclare_router_queryable( face: Option<&Arc>, res: &mut Arc, router: &ZenohId, + send_declare: &mut SendDeclare, ) { if res_hat!(res).router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); + unregister_router_queryable(tables, res, router, send_declare); propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); } } @@ -648,8 +676,9 @@ fn forget_router_queryable( face: &mut Arc, res: &mut Arc, router: &ZenohId, + send_declare: &mut SendDeclare, ) { - undeclare_router_queryable(tables, Some(face), res, router); + undeclare_router_queryable(tables, Some(face), res, router, send_declare); } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { @@ -680,6 +709,7 @@ fn forget_peer_queryable( face: &mut Arc, res: &mut Arc, peer: &ZenohId, + send_declare: &mut SendDeclare, ) { undeclare_peer_queryable(tables, Some(face), res, peer); @@ -687,10 +717,10 @@ fn forget_peer_queryable( let peer_qabls = remote_peer_qabls(tables, res); let zid = tables.zid; if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, res, &zid); + undeclare_router_queryable(tables, None, res, &zid, send_declare); } else { let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, zid); + register_router_queryable(tables, None, res, &local_info, zid, send_declare); } } @@ -698,6 +728,7 @@ pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -712,29 +743,32 @@ pub(super) fn undeclare_client_queryable( let peer_qabls = remote_peer_qabls(tables, res); if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + undeclare_router_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid, send_declare); + propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } if client_qabls.len() == 1 && !router_qabls && !peer_qabls { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); face_hat_mut!(face).local_qabls.remove(res); } @@ -745,30 +779,38 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { - undeclare_client_queryable(tables, face, res); + undeclare_client_queryable(tables, face, res, send_declare); } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { if face.whatami == WhatAmI::Client { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { @@ -785,25 +827,33 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn queries_remove_node( + tables: &mut Tables, + node: &ZenohId, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { match net_type { WhatAmI::Router => { let mut qabls = vec![]; @@ -815,7 +865,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); + unregister_router_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res); @@ -836,10 +886,23 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); let peer_qabls = remote_peer_qabls(tables, &res); if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + undeclare_router_queryable( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); } else { let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); + register_router_queryable( + tables, + None, + &mut res, + &local_info, + tables.zid, + send_declare, + ); } update_matches_query_routes(tables, &res); @@ -850,7 +913,12 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } -pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn queries_linkstate_change( + tables: &mut Tables, + zid: &ZenohId, + links: &[ZenohId], + send_declare: &mut SendDeclare, +) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in &face_hat!(src_face).remote_qabls { @@ -884,20 +952,23 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + ), + ); face_hat_mut!(dst_face).local_qabls.remove(res); } @@ -908,19 +979,22 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -1027,23 +1101,24 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - declare_router_queryable(tables, face, res, qabl_info, router) + declare_router_queryable(tables, face, res, qabl_info, router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer) + declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare) } } else { - declare_client_queryable(tables, face, res, qabl_info) + declare_client_queryable(tables, face, res, qabl_info, send_declare) } } - _ => declare_client_queryable(tables, face, res, qabl_info), + _ => declare_client_queryable(tables, face, res, qabl_info, send_declare), } } @@ -1053,23 +1128,24 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, res: &mut Arc, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, res, &router) + forget_router_queryable(tables, face, res, &router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer) + forget_peer_queryable(tables, face, res, &peer, send_declare) } } else { - forget_client_queryable(tables, face, res) + forget_client_queryable(tables, face, res, send_declare) } } - _ => forget_client_queryable(tables, face, res), + _ => forget_client_queryable(tables, face, res, send_declare), } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 87766f021b..d8a5ee4526 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -101,11 +101,17 @@ impl Router { tables: self.tables.clone(), state: newface, }; + let mut declares = vec![]; ctrl_lock - .new_local_face(&mut tables, &self.tables, &mut face) + .new_local_face(&mut tables, &self.tables, &mut face, &mut |p, m| { + declares.push((p.clone(), m)) + }) .unwrap(); drop(tables); drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } Arc::new(face) } @@ -157,7 +163,19 @@ impl Router { let _ = mux.face.set(Face::downgrade(&face)); - ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; + let mut declares = vec![]; + ctrl_lock.new_transport_unicast_face( + &mut tables, + &self.tables, + &mut face, + &transport, + &mut |p, m| declares.push((p.clone(), m)), + )?; + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } Ok(Arc::new(DeMux::new(face, Some(transport), ingress))) } diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index bc889d720e..b6da5a2391 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -69,6 +69,7 @@ fn base_test() { &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); Tables::print(&zread!(tables.tables)); @@ -244,6 +245,7 @@ async fn clean_test() { &"todrop1/todrop11".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") .map(|res| Arc::downgrade(&res)); @@ -258,6 +260,7 @@ async fn clean_test() { &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop12") .map(|res| Arc::downgrade(&res)); @@ -272,6 +275,7 @@ async fn clean_test() { &mut face0.upgrade().unwrap(), &WireExpr::from(1).with_suffix("/todrop12"), NodeId::default(), + &mut |p, m| p.send_declare(m), ); println!("COUNT2: {}", res3.strong_count()); @@ -286,6 +290,7 @@ async fn clean_test() { &mut face0.upgrade().unwrap(), &"todrop1/todrop11".into(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res1.upgrade().is_some()); assert!(res2.upgrade().is_none()); @@ -305,6 +310,7 @@ async fn clean_test() { &"todrop3".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop3") .map(|res| Arc::downgrade(&res)); @@ -318,6 +324,7 @@ async fn clean_test() { &mut face0.upgrade().unwrap(), &"todrop3".into(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res1.upgrade().is_some()); @@ -334,6 +341,7 @@ async fn clean_test() { &"todrop5".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); declare_subscription( zlock!(tables.ctrl_lock).as_ref(), @@ -342,6 +350,7 @@ async fn clean_test() { &"todrop6".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop4") @@ -519,6 +528,7 @@ fn client_test() { &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); register_expr( &tables, @@ -566,6 +576,7 @@ fn client_test() { &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); register_expr( &tables, @@ -613,6 +624,7 @@ fn client_test() { &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); primitives0.clear_data(); From 2500e5a62d8940cbfbc36f27c07360f91ba28c2d Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Thu, 20 Jun 2024 18:34:49 +0800 Subject: [PATCH 12/29] feat: make `TerminatableTask` terminate itself when dropped (#1151) --- commons/zenoh-task/src/lib.rs | 26 ++++++++++++------- zenoh-ext/src/publication_cache.rs | 2 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 10 ------- zenoh/src/net/routing/hat/router/mod.rs | 14 ---------- 4 files changed, 18 insertions(+), 34 deletions(-) diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index 5f7c3c26d2..a733a3de13 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -130,10 +130,16 @@ impl TaskController { } pub struct TerminatableTask { - handle: JoinHandle<()>, + handle: Option>, token: CancellationToken, } +impl Drop for TerminatableTask { + fn drop(&mut self) { + self.terminate(std::time::Duration::from_secs(10)); + } +} + impl TerminatableTask { pub fn create_cancellation_token() -> CancellationToken { CancellationToken::new() @@ -147,7 +153,7 @@ impl TerminatableTask { T: Send + 'static, { TerminatableTask { - handle: rt.spawn(future.map(|_f| ())), + handle: Some(rt.spawn(future.map(|_f| ()))), token, } } @@ -168,24 +174,26 @@ impl TerminatableTask { }; TerminatableTask { - handle: rt.spawn(task), + handle: Some(rt.spawn(task)), token, } } /// Attempts to terminate the task. /// Returns true if task completed / aborted within timeout duration, false otherwise. - pub fn terminate(self, timeout: Duration) -> bool { + pub fn terminate(&mut self, timeout: Duration) -> bool { ResolveFuture::new(async move { self.terminate_async(timeout).await }).res_sync() } /// Async version of [`TerminatableTask::terminate()`]. - pub async fn terminate_async(self, timeout: Duration) -> bool { + pub async fn terminate_async(&mut self, timeout: Duration) -> bool { self.token.cancel(); - if tokio::time::timeout(timeout, self.handle).await.is_err() { - tracing::error!("Failed to terminate the task"); - return false; - }; + if let Some(handle) = self.handle.take() { + if tokio::time::timeout(timeout, handle).await.is_err() { + tracing::error!("Failed to terminate the task"); + return false; + }; + } true } } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 431ccd2dde..821e621482 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -257,7 +257,7 @@ impl<'a> PublicationCache<'a> { let PublicationCache { _queryable, local_sub, - task, + mut task, } = self; _queryable.undeclare().res_async().await?; local_sub.undeclare().res_async().await?; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index ad4e1667f0..b1eeca261f 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -47,7 +47,6 @@ use std::{ any::Any, collections::{HashMap, HashSet}, sync::Arc, - time::Duration, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ @@ -116,15 +115,6 @@ struct HatTables { peers_trees_task: Option, } -impl Drop for HatTables { - fn drop(&mut self) { - if self.peers_trees_task.is_some() { - let task = self.peers_trees_task.take().unwrap(); - task.terminate(Duration::from_secs(10)); - } - } -} - impl HatTables { fn new() -> Self { Self { diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 3be278aa02..2b988917c2 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -52,7 +52,6 @@ use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, sync::Arc, - time::Duration, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ @@ -127,19 +126,6 @@ struct HatTables { router_peers_failover_brokering: bool, } -impl Drop for HatTables { - fn drop(&mut self) { - if self.peers_trees_task.is_some() { - let task = self.peers_trees_task.take().unwrap(); - task.terminate(Duration::from_secs(10)); - } - if self.routers_trees_task.is_some() { - let task = self.routers_trees_task.take().unwrap(); - task.terminate(Duration::from_secs(10)); - } - } -} - impl HatTables { fn new(router_peers_failover_brokering: bool) -> Self { Self { From 869ace655e7b324b9a76e004f21b3b6f69a6b338 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 2 Jul 2024 11:09:09 +0200 Subject: [PATCH 13/29] Fix bug in keyexpr::includes leading to call get_unchecked on empty array UB (#1208) --- commons/zenoh-keyexpr/src/key_expr/include.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/include.rs b/commons/zenoh-keyexpr/src/key_expr/include.rs index ca9efaee2d..15e4f50f40 100644 --- a/commons/zenoh-keyexpr/src/key_expr/include.rs +++ b/commons/zenoh-keyexpr/src/key_expr/include.rs @@ -41,7 +41,7 @@ impl Includer<&[u8], &[u8]> for LTRIncluder { if (lempty && !right.has_verbatim()) || (!lempty && self.includes(lrest, right)) { return true; } - if unsafe { right.has_direct_verbatim_non_empty() } { + if right.has_direct_verbatim() { return false; } right = Split::split_once(right, &DELIMITER).1; From b93ca8494aa7fabfab22583cb0e23b5e5d9e905e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 3 Jul 2024 14:37:27 +0200 Subject: [PATCH 14/29] REST plugin uses unbounded flume channels for queries (#1213) --- plugins/zenoh-plugin-rest/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index fa5c3c6ac3..f683e3992a 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -380,7 +380,12 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result Date: Mon, 8 Jul 2024 11:34:30 +0200 Subject: [PATCH 15/29] fix: typo in selector.rs (#1228) --- zenoh/src/selector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index a5f761a323..05a7349881 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -30,7 +30,7 @@ use std::{ /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters -/// with a few intendend uses: +/// with a few intended uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters /// - filtering by value, /// - filtering by metadata, such as the timestamp of a value, From 0a969cb8bbb670a02b16c0373b133b00d58f1aaf Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Thu, 25 Jul 2024 23:09:03 +0800 Subject: [PATCH 16/29] fix: zenohd --cfg (#1263) * fix: zenohd --cfg * ci: trigger * Update zenohd/src/main.rs --------- Co-authored-by: Luca Cominardi --- README.md | 2 +- zenohd/src/main.rs | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index af08db7260..6ecdf65d88 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,7 @@ See other examples of Zenoh usage in [examples/](examples) * `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. - * `--cfg :`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. + * `--cfg [:]...`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. * `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: - TCP: `tcp/:` diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 850676d905..18ac0c4e07 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -271,6 +271,8 @@ fn config_from_args(args: &Args) -> Config { } Err(e) => tracing::warn!("Couldn't perform configuration {}: {}", json, e), } + } else { + panic!("--cfg accepts KEY:VALUE pairs. {} is not a valid KEY:VALUE pair.", json) } } tracing::debug!("Config: {:?}", &config); From 65f7f88a77bdf7b98283b06cbea80eb1a3756f37 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Jul 2024 09:41:46 +0200 Subject: [PATCH 17/29] Fix failover brokering bug reacting to linkstate changes (#1272) * Change missleading log * Fix failover brokering bug reacting to linkstate changes * Retrigger CI --------- Co-authored-by: Luca Cominardi --- zenoh/src/net/routing/hat/router/mod.rs | 5 +- zenoh/src/net/routing/hat/router/pubsub.rs | 141 ++++++++++----------- 2 files changed, 67 insertions(+), 79 deletions(-) diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 2b988917c2..b4b88d66e9 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -232,8 +232,9 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); - tracing::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); - HatTables::failover_brokering_to(links, peer2) + let res = HatTables::failover_brokering_to(links, peer2); + tracing::trace!("failover_brokering {} {} : {}", peer1, peer2, res); + res }) .unwrap_or(false) } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index d6f1f4fbc1..b223050a42 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -821,86 +821,73 @@ pub(super) fn pubsub_linkstate_change( links: &[ZenohId], send_declare: &mut SendDeclare, ) { - if let Some(src_face) = tables.get_face(zid).cloned() { + if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_subs { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) + let to_forget = face_hat!(src_face) + .local_subs + .iter() + .filter(|res| { + let client_subs = res .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - ), - ); + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); + !remote_router_subs(tables, res) + && !client_subs + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.zid != ctx.face.zid + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); - face_hat_mut!(dst_face).local_subs.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, - }; - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber( - DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, - }, - ), - }, - res.expr(), - ), - ); - } + face_hat_mut!(&mut src_face).local_subs.remove(&res); + } + + for dst_face in tables.faces.values_mut() { + if HatTables::failover_brokering_to(links, dst_face.zid) { + for res in &face_hat!(src_face).remote_subs { + if !face_hat!(dst_face).local_subs.contains(res) { + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, + }; + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); } } } From 3b9e82484c323f2b870dcd9bd04dec596b3586fd Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Jul 2024 09:49:05 +0200 Subject: [PATCH 18/29] Code format --- zenohd/src/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 18ac0c4e07..c32c7d15ca 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -272,7 +272,10 @@ fn config_from_args(args: &Args) -> Config { Err(e) => tracing::warn!("Couldn't perform configuration {}: {}", json, e), } } else { - panic!("--cfg accepts KEY:VALUE pairs. {} is not a valid KEY:VALUE pair.", json) + panic!( + "--cfg accepts KEY:VALUE pairs. {} is not a valid KEY:VALUE pair.", + json + ) } } tracing::debug!("Config: {:?}", &config); From 664915af4b9ed0058f82fc1e7e6f56edf50bebdb Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Jul 2024 10:35:54 +0200 Subject: [PATCH 19/29] Fix clippy warnings --- commons/zenoh-keyexpr/src/key_expr/borrowed.rs | 1 + commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- commons/zenoh-keyexpr/src/lib.rs | 2 +- commons/zenoh-protocol/src/network/request.rs | 2 ++ 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 4291883492..010f57e6bb 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -176,6 +176,7 @@ impl keyexpr { /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` + /// /// So the result is `["*", "**/c/*"]`. /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index 5d7991289e..db18f6ab67 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -36,7 +36,7 @@ //! KeTrees come in two flavours: //! - [`KeBoxTree`] is the easier flavour. Much like a HashMap, it uniquely owns all of its nodes and data. //! - [`KeArcTree`] allows the shared ownership of nodes, allowing you to store subsections of the tree elsewhere -//! without worrying about lifetimes. +//! without worrying about lifetimes. //! //! # Usage //! KeTrees were designed to maximize code reuse. As such, their core properties are reflected through the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. diff --git a/commons/zenoh-keyexpr/src/lib.rs b/commons/zenoh-keyexpr/src/lib.rs index f80a9c177c..c5e444b29e 100644 --- a/commons/zenoh-keyexpr/src/lib.rs +++ b/commons/zenoh-keyexpr/src/lib.rs @@ -24,7 +24,7 @@ //! - [`keyexpr`] is the equivalent of a [`str`], //! - [`OwnedKeyExpr`] works like an [`Arc`], //! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`], but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. +//! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, //! or even if a [`keyexpr::includes`] another. diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index 9e0137ea3a..ccd64ae5cd 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -81,6 +81,7 @@ pub mod ext { pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; pub type Target = zextz64!(0x4, true); + /// ```text /// - Target (0x03) /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ @@ -88,6 +89,7 @@ pub mod ext { /// +---------------+ /// /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. + /// ``` pub type TargetType = QueryTarget; impl TargetType { From 021f7c689337923241788e7c753404ce2b9feb84 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Jul 2024 10:39:35 +0200 Subject: [PATCH 20/29] Code format --- commons/zenoh-keyexpr/src/key_expr/borrowed.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 010f57e6bb..53d30d625e 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -176,7 +176,7 @@ impl keyexpr { /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` - /// + /// /// So the result is `["*", "**/c/*"]`. /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// From e587aa9ce43af6cb78bbe418f478804218f7b19f Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 26 Jul 2024 11:16:28 +0200 Subject: [PATCH 21/29] Fix Clippy errors from Rust 1.80 (#1273) * Allow unexpected `doc_auto_cfg` flag * Keep never-constructed logger interceptor * Ignore interior mutability of `Resource` * Fix typo * Resolve `clippy::doc-lazy-continuation` errors * Upgrade `time@0.3.28` to `time@0.3.36` See https://github.com/time-rs/time/issues/693 --- Cargo.lock | 42 ++++++++++++++------ clippy.toml | 6 +++ zenoh/Cargo.toml | 3 ++ zenoh/src/key_expr.rs | 2 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 ++ zenoh/src/net/routing/interceptor/mod.rs | 5 +++ zenoh/src/plugins/sealed.rs | 9 +++-- zenoh/src/session.rs | 4 +- zenohd/src/main.rs | 5 ++- 9 files changed, 59 insertions(+), 21 deletions(-) create mode 100644 clippy.toml diff --git a/Cargo.lock b/Cargo.lock index 8aee8b7638..7282479a20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1022,9 +1022,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "derive-new" @@ -1896,7 +1899,7 @@ dependencies = [ "regex", "serde", "serde_json", - "time 0.3.28", + "time 0.3.36", "url", "uuid", ] @@ -2264,6 +2267,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" version = "0.1.45" @@ -2676,6 +2685,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2887,7 +2902,7 @@ checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" dependencies = [ "pem", "ring 0.16.20", - "time 0.3.28", + "time 0.3.36", "yasna", ] @@ -3926,21 +3941,23 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", + "num-conv", + "powerfmt", "serde", "time-core", - "time-macros 0.2.14", + "time-macros 0.2.18", ] [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" @@ -3954,10 +3971,11 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -4870,7 +4888,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.28", + "time 0.3.36", ] [[package]] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..49436d7ba9 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,6 @@ +# NOTE: Resources are hashed using their `.suffix` field without using any interior mutable fields. +# See https://github.com/eclipse-zenoh/zenoh/blob/b55c781220d7ea9f7f117570990f6e4e063e58fe/zenoh/src/net/routing/dispatcher/resource.rs#L193 +# A corresponding comment is present in the `Hash` implementation of `Resource` as a reminder that this configuration is set. +ignore-interior-mutability = [ + "zenoh::net::routing::dispatcher::resource::Resource", +] diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index a8bf041c11..78b90ad9f1 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -130,3 +130,6 @@ license-file = ["../LICENSE", "0"] depends = "zenohd (=0.11.0-dev-1), zenoh-plugin-rest (=0.11.0-dev-1), zenoh-plugin-storage-manager (=0.11.0-dev-1)" maintainer-scripts = ".deb" assets = [["../README.md", "README.md", "644"]] + +[lints.rust] +unexpected_cfgs = { level = "allow", check-cfg = ['cfg(doc_auto_cfg)'] } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index c3117561cb..99f5aa6187 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -24,7 +24,7 @@ //! - [`keyexpr`] is the equivalent of a [`str`], //! - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], //! - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. +//! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, //! or even if a [`keyexpr::includes`] another. diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 34f1229137..edfcf26925 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -185,6 +185,10 @@ impl PartialEq for Resource { } impl Eq for Resource {} +// NOTE: The `clippy::mutable_key_type` lint takes issue with the fact that `Resource` contains +// interior mutable data. A configuration option is used to assert that the accessed fields are +// not interior mutable in clippy.toml. Thus care should be taken to ensure soundness of this impl +// as Clippy will not warn about its usage in sets/maps. impl Hash for Resource { fn hash(&self, state: &mut H) { self.expr().hash(state); diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index ef8e6e0fb1..d7a5fc63f3 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -158,6 +158,8 @@ impl InterceptorTrait for ComputeOnMiss { } } +#[allow(dead_code)] + pub(crate) struct IngressMsgLogger {} impl InterceptorTrait for IngressMsgLogger { @@ -185,6 +187,8 @@ impl InterceptorTrait for IngressMsgLogger { Some(ctx) } } + +#[allow(dead_code)] pub(crate) struct EgressMsgLogger {} impl InterceptorTrait for EgressMsgLogger { @@ -212,6 +216,7 @@ impl InterceptorTrait for EgressMsgLogger { } } +#[allow(dead_code)] pub(crate) struct LoggerInterceptor {} impl InterceptorFactoryTrait for LoggerInterceptor { diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/plugins/sealed.rs index 8bfc1f1dab..1c6d752abf 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/plugins/sealed.rs @@ -93,13 +93,14 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// Thus the plugin can reply its contribution to the global admin space of this zenohd. /// Parameters: /// * `selector`: the full selector of the query (usually only key_expr part is used). This selector is - /// exactly the same as it was requested by user, for example "@/router/ROUTER_ID/plugins/PLUGIN_NAME/some/plugin/info" or "@/router/*/plugins/*/foo/bar". - /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the selector matches the `plugin_status_key` + /// exactly the same as it was requested by user, for example "@/router/ROUTER_ID/plugins/PLUGIN_NAME/some/plugin/info" or "@/router/*/plugins/*/foo/bar". + /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the selector matches the `plugin_status_key` /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/router/ROUTER_ID/plugins/PLUGIN_NAME" + /// /// Returns value: /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" - /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" - /// as they doesn't match the query. + /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" + /// as they doesn't match the query. /// * `Err(ZError)`: Problem occurred when processing the query. /// /// If plugin implements subplugins (as the storage plugin), then it should also reply with information about its subplugins with the same rules. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 0763018c75..95366c9216 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2003,7 +2003,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](Queryable) will reply to + /// [`Queryable`](Queryable) will reply to /// /// # Examples /// ```no_run @@ -2602,7 +2602,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::queryable::Queryable) will reply to /// /// # Examples /// ```no_run diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index c32c7d15ca..bcd57f3735 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -81,9 +81,10 @@ struct Args { /// Allows arbitrary configuration changes as column-separated KEY:VALUE pairs, where: /// - KEY must be a valid config path. /// - VALUE must be a valid JSON5 string that can be deserialized to the expected type for the KEY field. + /// /// Examples: - /// --cfg='startup/subscribe:["demo/**"]' - /// --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}' + /// - `--cfg='startup/subscribe:["demo/**"]'` + /// - `--cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` #[arg(long)] cfg: Vec, /// Configure the read and/or write permissions on the admin space. Default is read only. From 2d88c7bdcc46c58a7fbdb67397841452ac8f60ec Mon Sep 17 00:00:00 2001 From: kydos Date: Mon, 29 Jul 2024 14:20:27 +0200 Subject: [PATCH 22/29] Update Cargo.toml (#1277) Updated description to be aligned with what we use everywhere else --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 174f3efb04..80d1990dfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ authors = [ edition = "2021" license = "EPL-2.0 OR Apache-2.0" categories = ["network-programming"] -description = "Zenoh: Zero Overhead Pub/sub, Store/Query and Compute." +description = "Zenoh: The Zero Overhead Pub/Sub/Query Protocol." # DEFAULT-FEATURES NOTE: Be careful with default-features and additivity! # (https://github.com/rust-lang/cargo/issues/11329) From f47354cf0ab8f76e8a7079553873e716e7d880b8 Mon Sep 17 00:00:00 2001 From: brianPA <80439594+brian049@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:59:54 +0800 Subject: [PATCH 23/29] fix: typos (#1297) --- DEFAULT_CONFIG.json5 | 2 +- commons/zenoh-config/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index b33dbeb8cf..129b2d8a35 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -307,7 +307,7 @@ /// Configure the zenoh RX parameters of a link rx: { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 7732a5bbc0..a6aedaa59b 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -411,7 +411,7 @@ validated_struct::validator! { }, pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. From fef38dce9c33336dcd9d1d0ae8baec57aa3c1aee Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 9 Aug 2024 16:29:31 +0200 Subject: [PATCH 24/29] Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments --- .../src/net/routing/hat/linkstate_peer/mod.rs | 62 ++++++------ zenoh/src/net/routing/hat/router/mod.rs | 94 ++++++++++--------- 2 files changed, 86 insertions(+), 70 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index b1eeca261f..41e1b26e72 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -108,11 +108,42 @@ macro_rules! face_hat_mut { } use face_hat_mut; +struct TreesComputationWorker { + _task: TerminatableTask, + tx: flume::Sender>, +} + +impl TreesComputationWorker { + fn new() -> Self { + let (tx, rx) = flume::bounded::>(1); + let task = TerminatableTask::spawn_abortable(zenoh_runtime::ZRuntime::Net, async move { + loop { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + if let Ok(tables_ref) = rx.recv_async().await { + let mut tables = zwrite!(tables_ref.tables); + + tracing::trace!("Compute trees"); + let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + + tracing::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_children); + queries::queries_tree_change(&mut tables, &new_children); + drop(tables); + } + } + }); + Self { _task: task, tx } + } +} + struct HatTables { peer_subs: HashSet>, peer_qabls: HashSet>, peers_net: Option, - peers_trees_task: Option, + peers_trees_worker: TreesComputationWorker, } impl HatTables { @@ -121,36 +152,13 @@ impl HatTables { peer_subs: HashSet::new(), peer_qabls: HashSet::new(), peers_net: None, - peers_trees_task: None, + peers_trees_worker: TreesComputationWorker::new(), } } fn schedule_compute_trees(&mut self, tables_ref: Arc) { - tracing::trace!("Schedule computations"); - if self.peers_trees_task.is_none() { - let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Net, - async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; - let mut tables = zwrite!(tables_ref.tables); - - tracing::trace!("Compute trees"); - let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); - - tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_children); - queries::queries_tree_change(&mut tables, &new_children); - - tracing::trace!("Computations completed"); - hat_mut!(tables).peers_trees_task = None; - }, - TerminatableTask::create_cancellation_token(), - ); - self.peers_trees_task = Some(task); - } + tracing::trace!("Schedule trees computation"); + let _ = self.peers_trees_worker.tx.try_send(tables_ref); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index b4b88d66e9..407562425e 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -113,6 +113,44 @@ macro_rules! face_hat_mut { } use face_hat_mut; +struct TreesComputationWorker { + _task: TerminatableTask, + tx: flume::Sender>, +} + +impl TreesComputationWorker { + fn new(net_type: WhatAmI) -> Self { + let (tx, rx) = flume::bounded::>(1); + let task = TerminatableTask::spawn_abortable(zenoh_runtime::ZRuntime::Net, async move { + loop { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + if let Ok(tables_ref) = rx.recv_async().await { + let mut tables = zwrite!(tables_ref.tables); + + tracing::trace!("Compute trees"); + let new_children = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; + + tracing::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); + queries::queries_tree_change(&mut tables, &new_children, net_type); + drop(tables); + } + } + }); + Self { _task: task, tx } + } +} + struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, @@ -121,8 +159,8 @@ struct HatTables { routers_net: Option, peers_net: Option, shared_nodes: Vec, - routers_trees_task: Option, - peers_trees_task: Option, + routers_trees_worker: TreesComputationWorker, + peers_trees_worker: TreesComputationWorker, router_peers_failover_brokering: bool, } @@ -136,8 +174,8 @@ impl HatTables { routers_net: None, peers_net: None, shared_nodes: vec![], - routers_trees_task: None, - peers_trees_task: None, + routers_trees_worker: TreesComputationWorker::new(WhatAmI::Router), + peers_trees_worker: TreesComputationWorker::new(WhatAmI::Peer), router_peers_failover_brokering, } } @@ -240,45 +278,15 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - tracing::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { - let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Net, - async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; - let mut tables = zwrite!(tables_ref.tables); - - tracing::trace!("Compute trees"); - let new_children = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; - - tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); - queries::queries_tree_change(&mut tables, &new_children, net_type); - - tracing::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; - }, - TerminatableTask::create_cancellation_token(), - ); - match net_type { - WhatAmI::Router => self.routers_trees_task = Some(task), - _ => self.peers_trees_task = Some(task), - }; + tracing::trace!("Schedule trees computation"); + match net_type { + WhatAmI::Router => { + let _ = self.routers_trees_worker.tx.try_send(tables_ref); + } + WhatAmI::Peer => { + let _ = self.peers_trees_worker.tx.try_send(tables_ref); + } + _ => (), } } } From e66745ebdf70f01bf01ba60f69bc902b917b2d24 Mon Sep 17 00:00:00 2001 From: Tiago Neves <32251249+anhaabaete@users.noreply.github.com> Date: Mon, 12 Aug 2024 11:15:05 -0300 Subject: [PATCH 25/29] zenohd-default config error #1292 (#1298) * Zenohd panic when tring load file When zenohd trying load file, if it have a problem it crash cause another treat was "unwrap", and it return to a type config. So, it crash and cause painic. * zenohd default config error #1292 When tring load config file defined by -c option. With haver any problema "unwrap" has been to Config type. I treat it return a Default Config whe it happen * If file fail when try load configs If file fail when try load configs * Update main.rs * Resolve typos at comment Resolve typos at comment --- zenohd/src/main.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index bcd57f3735..ddac046770 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -123,7 +123,11 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap() + Config::from_file(conf_file).unwrap_or_else(|e| { + // if file load fail, wanning it, and load default config + tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); + Config::default() + }) }); if config.mode().is_none() { From c61a0beca73fac39031fd494e8b003680bed317b Mon Sep 17 00:00:00 2001 From: brianPA <80439594+brian049@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:59:54 +0800 Subject: [PATCH 26/29] fix: typos (#1297) --- DEFAULT_CONFIG.json5 | 2 +- commons/zenoh-config/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 27af64ef93..cb61b35a31 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -388,7 +388,7 @@ /// Configure the zenoh RX parameters of a link rx: { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7b63e1602..743bde178a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -441,7 +441,7 @@ validated_struct::validator! { }, pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. From c817e65561308d0225f1264ab951e45c51f8fe8b Mon Sep 17 00:00:00 2001 From: Tiago Neves <32251249+anhaabaete@users.noreply.github.com> Date: Mon, 12 Aug 2024 11:15:05 -0300 Subject: [PATCH 27/29] zenohd-default config error #1292 (#1298) * Zenohd panic when tring load file When zenohd trying load file, if it have a problem it crash cause another treat was "unwrap", and it return to a type config. So, it crash and cause painic. * zenohd default config error #1292 When tring load config file defined by -c option. With haver any problema "unwrap" has been to Config type. I treat it return a Default Config whe it happen * If file fail when try load configs If file fail when try load configs * Update main.rs * Resolve typos at comment Resolve typos at comment --- zenohd/src/main.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 9ce0a64333..18abb72354 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -120,7 +120,11 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap() + Config::from_file(conf_file).unwrap_or_else(|e| { + // if file load fail, wanning it, and load default config + tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); + Config::default() + }) }); if config.mode().is_none() { From 11811f973f1e60d43c4d9714d0c3b8f6d6d3bf24 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 9 Aug 2024 16:29:31 +0200 Subject: [PATCH 28/29] Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments From 78019083c8aa29afa5bda84888f4d5e58bf625b1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 27 Aug 2024 16:54:14 +0200 Subject: [PATCH 29/29] revering fix https://github.com/eclipse-zenoh/zenoh/pull/1298 --- zenohd/src/main.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 18abb72354..9ce0a64333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -120,11 +120,7 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap_or_else(|e| { - // if file load fail, wanning it, and load default config - tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); - Config::default() - }) + Config::from_file(conf_file).unwrap() }); if config.mode().is_none() {