From 11562c765443990e2003653fd74be85c5f730d22 Mon Sep 17 00:00:00 2001 From: Albert Bou Date: Mon, 13 Nov 2023 09:55:47 +0100 Subject: [PATCH 01/21] [BugFix] Minor fix in the logging of PPO and A2C examples (#1693) --- examples/a2c/a2c_atari.py | 4 ++-- examples/a2c/a2c_mujoco.py | 4 ++-- examples/ppo/ppo_atari.py | 4 ++-- examples/ppo/ppo_mujoco.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/a2c/a2c_atari.py b/examples/a2c/a2c_atari.py index 44a37cb3ce6..4598c11844b 100644 --- a/examples/a2c/a2c_atari.py +++ b/examples/a2c/a2c_atari.py @@ -117,9 +117,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and lengths - episode_rewards = data["next", "episode_reward"][data["next", "done"]] + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "done"]] + episode_length = data["next", "step_count"][data["next", "terminated"]] log_info.update( { "train/reward": episode_rewards.mean().item(), diff --git a/examples/a2c/a2c_mujoco.py b/examples/a2c/a2c_mujoco.py index 7f9e588bbf6..48844dee6b6 100644 --- a/examples/a2c/a2c_mujoco.py +++ b/examples/a2c/a2c_mujoco.py @@ -101,9 +101,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and lengths - episode_rewards = data["next", "episode_reward"][data["next", "done"]] + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "done"]] + episode_length = data["next", "step_count"][data["next", "terminated"]] log_info.update( { "train/reward": episode_rewards.mean().item(), diff --git a/examples/ppo/ppo_atari.py b/examples/ppo/ppo_atari.py index eb2ce15ec5a..1bfbccdeba4 100644 --- a/examples/ppo/ppo_atari.py +++ b/examples/ppo/ppo_atari.py @@ -134,9 +134,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and episode lengths - episode_rewards = data["next", "episode_reward"][data["next", "done"]] + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "stop"]] + episode_length = data["next", "step_count"][data["next", "terminated"]] log_info.update( { "train/reward": episode_rewards.mean().item(), diff --git a/examples/ppo/ppo_mujoco.py b/examples/ppo/ppo_mujoco.py index ff6aeda51d2..988bc5300bf 100644 --- a/examples/ppo/ppo_mujoco.py +++ b/examples/ppo/ppo_mujoco.py @@ -120,9 +120,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and episode lengths - episode_rewards = data["next", "episode_reward"][data["next", "done"]] + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "done"]] + episode_length = data["next", "step_count"][data["next", "terminated"]] log_info.update( { "train/reward": episode_rewards.mean().item(), From 6fde4eab5817fab5c5a961a5369eff295aa7a5c4 Mon Sep 17 00:00:00 2001 From: Danylo Baibak Date: Tue, 14 Nov 2023 16:18:57 +0100 Subject: [PATCH 02/21] [CI] Enable retry mechanism (#1681) Co-authored-by: Vincent Moens --- .github/pytorch-probot.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .github/pytorch-probot.yml diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml new file mode 100644 index 00000000000..98af413285f --- /dev/null +++ b/.github/pytorch-probot.yml @@ -0,0 +1,5 @@ +# List of workflows that will be re-run in case of failures +# https://github.com/pytorch/test-infra/blob/main/torchci/lib/bot/retryBot.ts +retryable_workflows: +- Build M1 +- Wheels From 02ff00d3c07a548893a7588be1907a2cd9c68340 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Tue, 14 Nov 2023 16:19:57 +0000 Subject: [PATCH 03/21] [Refactor] Minor changes in prep of https://github.com/pytorch/tensordict/pull/541 (#1696) --- test/test_shared.py | 19 +------------------ torchrl/data/replay_buffers/storages.py | 10 +++++++++- 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/test/test_shared.py b/test/test_shared.py index c4790597359..186c8ae9525 100644 --- a/test/test_shared.py +++ b/test/test_shared.py @@ -144,24 +144,7 @@ def test_shared(self, shared): ) -# @pytest.mark.skipif( -# sys.platform == "win32", -# reason="RuntimeError from Torch serialization.py when creating td_saved on Windows", -# ) -@pytest.mark.parametrize( - "idx", - [ - torch.tensor( - [ - 3, - 5, - 7, - 8, - ] - ), - slice(200), - ], -) +@pytest.mark.parametrize("idx", [0, slice(200)]) @pytest.mark.parametrize("dtype", [torch.float, torch.bool]) def test_memmap(idx, dtype, large_scale=False): N = 5000 if large_scale else 10 diff --git a/torchrl/data/replay_buffers/storages.py b/torchrl/data/replay_buffers/storages.py index ef790b6f9f6..bacb5713492 100644 --- a/torchrl/data/replay_buffers/storages.py +++ b/torchrl/data/replay_buffers/storages.py @@ -638,7 +638,8 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: self.device = data.device if self.device.type != "cpu": warnings.warn( - "Support for Memmap device other than CPU will be deprecated in v0.4.0.", + "Support for Memmap device other than CPU will be deprecated in v0.4.0. " + "Using a 'cuda' device may be suboptimal.", category=DeprecationWarning, ) if is_tensor_collection(data): @@ -668,6 +669,13 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: self._storage = out self.initialized = True + def get(self, index: Union[int, Sequence[int], slice]) -> Any: + result = super().get(index) + # to be deprecated in v0.4 + if result.device != self.device: + return result.to(self.device, non_blocking=True) + return result + # Utils def _mem_map_tensor_as_tensor(mem_map_tensor: MemmapTensor) -> torch.Tensor: From e1eb69dc5056240bab24de1384cc0416704b940b Mon Sep 17 00:00:00 2001 From: Honglong Tian <50365897+FrankTianTT@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:04:44 +0800 Subject: [PATCH 04/21] [BugFix] fix dreamer actor (#1697) Co-authored-by: vmoens --- torchrl/trainers/helpers/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/torchrl/trainers/helpers/models.py b/torchrl/trainers/helpers/models.py index ee343aa438e..3782de64fa2 100644 --- a/torchrl/trainers/helpers/models.py +++ b/torchrl/trainers/helpers/models.py @@ -657,6 +657,7 @@ def _dreamer_make_actor_sim(action_key, proof_environment, actor_module): out_keys=[action_key], default_interaction_type=InteractionType.RANDOM, distribution_class=TanhNormal, + distribution_kwargs={"tanh_loc": True}, spec=CompositeSpec(**{action_key: proof_environment.action_spec}), ), ) @@ -703,8 +704,9 @@ def _dreamer_make_actor_real( SafeProbabilisticModule( in_keys=["loc", "scale"], out_keys=[action_key], - default_interaction_type=InteractionType.RANDOM, + default_interaction_type=InteractionType.MODE, distribution_class=TanhNormal, + distribution_kwargs={"tanh_loc": True}, spec=CompositeSpec( **{action_key: proof_environment.action_spec.to("cpu")} ), From 0badd6e52a84273ab932dd8eda94be67783433c0 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Wed, 15 Nov 2023 13:53:46 +0000 Subject: [PATCH 05/21] [Refactor] Deprecate direct usage of memmap tensors (#1684) --- test/test_libs.py | 6 +-- test/test_rb_distributed.py | 5 ++- test/test_rlhf.py | 11 ++++-- torchrl/data/replay_buffers/storages.py | 50 +++++++++++++++---------- torchrl/data/rlhf/dataset.py | 8 ++-- torchrl/data/rlhf/prompt.py | 8 ++-- torchrl/data/rlhf/reward.py | 16 ++++---- 7 files changed, 61 insertions(+), 43 deletions(-) diff --git a/test/test_libs.py b/test/test_libs.py index f1715a550f4..1e4d2a7d871 100644 --- a/test/test_libs.py +++ b/test/test_libs.py @@ -400,7 +400,7 @@ def test_vecenvs_wrapper(self, envname): ["HalfCheetah-v4", "CartPole-v1", "ALE/Pong-v5"] + (["FetchReach-v2"] if _has_gym_robotics else []), ) - @pytest.mark.flaky(reruns=3, reruns_delay=1) + @pytest.mark.flaky(reruns=8, reruns_delay=1) def test_vecenvs_env(self, envname): from _utils_internal import rollout_consistency_assertion @@ -1897,10 +1897,10 @@ def test_direct_download(self, task): assert len(keys) assert_allclose_td( data_direct._storage._storage.select(*keys).apply( - lambda t: t.as_tensor().float() + lambda t: t.float() ), data_d4rl._storage._storage.select(*keys).apply( - lambda t: t.as_tensor().float() + lambda t: t.float() ), ) diff --git a/test/test_rb_distributed.py b/test/test_rb_distributed.py index 8a46b1a006d..548f04dc41d 100644 --- a/test/test_rb_distributed.py +++ b/test/test_rb_distributed.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. import argparse import os + import sys import time @@ -22,10 +23,10 @@ class ReplayBufferNode(RemoteTensorDictReplayBuffer): - def __init__(self, capacity: int): + def __init__(self, capacity: int, scratch_dir=None): super().__init__( storage=LazyMemmapStorage( - max_size=capacity, scratch_dir="/tmp/", device=torch.device("cpu") + max_size=capacity, scratch_dir=scratch_dir, device=torch.device("cpu") ), sampler=RandomSampler(), writer=RoundRobinWriter(), diff --git a/test/test_rlhf.py b/test/test_rlhf.py index 2abb9a6d386..31ef96681df 100644 --- a/test/test_rlhf.py +++ b/test/test_rlhf.py @@ -14,7 +14,12 @@ import torch.nn.functional as F from _utils_internal import get_default_devices -from tensordict import is_tensor_collection, MemmapTensor, TensorDict, TensorDictBase +from tensordict import ( + is_tensor_collection, + MemoryMappedTensor, + TensorDict, + TensorDictBase, +) from tensordict.nn import TensorDictModule from torchrl.data.rlhf import TensorDictTokenizer from torchrl.data.rlhf.dataset import ( @@ -188,8 +193,8 @@ def test_dataset_to_tensordict(tmpdir, suffix): else: assert ("c", "d", "a") in td.keys(True) assert ("c", "d", "b") in td.keys(True) - assert isinstance(td.get((suffix, "a")), MemmapTensor) - assert isinstance(td.get((suffix, "b")), MemmapTensor) + assert isinstance(td.get((suffix, "a")), MemoryMappedTensor) + assert isinstance(td.get((suffix, "b")), MemoryMappedTensor) @pytest.mark.skipif( diff --git a/torchrl/data/replay_buffers/storages.py b/torchrl/data/replay_buffers/storages.py index bacb5713492..9c8417b9c97 100644 --- a/torchrl/data/replay_buffers/storages.py +++ b/torchrl/data/replay_buffers/storages.py @@ -12,7 +12,7 @@ import torch from tensordict import is_tensorclass -from tensordict.memmap import MemmapTensor +from tensordict.memmap import MemmapTensor, MemoryMappedTensor from tensordict.tensordict import is_tensor_collection, TensorDict, TensorDictBase from tensordict.utils import expand_right @@ -482,7 +482,7 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: if self.device == "auto": self.device = data.device if isinstance(data, torch.Tensor): - # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype + # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype out = torch.empty( self.max_size, *data.shape, @@ -531,12 +531,12 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.get(0) TensorDict( fields={ - some data: MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + some data: MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), some: TensorDict( fields={ nested: TensorDict( fields={ - data: MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, + data: MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([11]), device=cpu, is_shared=False)}, @@ -560,8 +560,8 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.set(range(10), data) >>> storage.get(0) MyClass( - bar=MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), - foo=MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + bar=MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), + foo=MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), batch_size=torch.Size([11]), device=cpu, is_shared=False) @@ -603,7 +603,12 @@ def load_state_dict(self, state_dict): if isinstance(self._storage, torch.Tensor): _mem_map_tensor_as_tensor(self._storage).copy_(_storage) elif self._storage is None: - self._storage = MemmapTensor(_storage) + self._storage = _make_memmap( + _storage, + path=self.scratch_dir + "/tensor.memmap" + if self.scratch_dir is not None + else None, + ) else: raise RuntimeError( f"Cannot copy a storage of type {type(_storage)} onto another of type {type(self._storage)}" @@ -657,9 +662,13 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: ) else: # If not a tensorclass/tensordict, it must be a tensor(-like) - # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype - out = MemmapTensor( - self.max_size, *data.shape, device=self.device, dtype=data.dtype + # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype + out = _make_empty_memmap( + (self.max_size, *data.shape), + dtype=data.dtype, + path=self.scratch_dir + "/tensor.memmap" + if self.scratch_dir is not None + else None, ) if VERBOSE: filesize = os.path.getsize(out.filename) / 1024 / 1024 @@ -685,6 +694,7 @@ def _mem_map_tensor_as_tensor(mem_map_tensor: MemmapTensor) -> torch.Tensor: f"Supported backends are {_CKPT_BACKEND.backends}" ) if isinstance(mem_map_tensor, torch.Tensor): + # This will account for MemoryMappedTensors return mem_map_tensor if _CKPT_BACKEND == "torchsnapshot": # TorchSnapshot doesn't know how to stream MemmapTensor, so we view MemmapTensor @@ -745,25 +755,27 @@ def _collate_list_tensordict(x): return out -def _collate_contiguous(x): +def _collate_id(x): return x -def _collate_as_tensor(x): - return x.as_tensor() - - def _get_default_collate(storage, _is_tensordict=False): if isinstance(storage, ListStorage): if _is_tensordict: return _collate_list_tensordict else: return torch.utils.data._utils.collate.default_collate - elif isinstance(storage, LazyMemmapStorage): - return _collate_as_tensor - elif isinstance(storage, (TensorStorage,)): - return _collate_contiguous + elif isinstance(storage, TensorStorage): + return _collate_id else: raise NotImplementedError( f"Could not find a default collate_fn for storage {type(storage)}." ) + + +def _make_memmap(tensor, path): + return MemoryMappedTensor.from_tensor(tensor, filename=path) + + +def _make_empty_memmap(shape, dtype, path): + return MemoryMappedTensor.empty(shape=shape, dtype=dtype, filename=path) diff --git a/torchrl/data/rlhf/dataset.py b/torchrl/data/rlhf/dataset.py index db2b6a418d6..adc2ddcf0d7 100644 --- a/torchrl/data/rlhf/dataset.py +++ b/torchrl/data/rlhf/dataset.py @@ -77,8 +77,8 @@ class TokenizedDatasetLoader: >>> print(dataset) TensorDict( fields={ - attention_mask: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, + attention_mask: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([185068]), device=None, is_shared=False) @@ -270,8 +270,8 @@ def dataset_to_tensordict( fields={ prefix: TensorDict( fields={ - labels: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), - tokens: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, + labels: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), + tokens: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([10]), device=None, is_shared=False)}, diff --git a/torchrl/data/rlhf/prompt.py b/torchrl/data/rlhf/prompt.py index d534a95379e..d50653c9967 100644 --- a/torchrl/data/rlhf/prompt.py +++ b/torchrl/data/rlhf/prompt.py @@ -74,10 +74,10 @@ def from_dataset( >>> data = PromptData.from_dataset("train") >>> print(data) PromptDataTLDR( - attention_mask=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - prompt_rindex=MemmapTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), - labels=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + prompt_rindex=MemoryMappedTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), + labels=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), logits=None, loss=None, batch_size=torch.Size([116722]), diff --git a/torchrl/data/rlhf/reward.py b/torchrl/data/rlhf/reward.py index e7843e02f46..20f379ef659 100644 --- a/torchrl/data/rlhf/reward.py +++ b/torchrl/data/rlhf/reward.py @@ -41,16 +41,16 @@ class PairwiseDataset: >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), @@ -97,16 +97,16 @@ def from_dataset( >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), From d82aa452446afb3a8832ade3a2e037c1c470058c Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Wed, 15 Nov 2023 13:54:34 +0000 Subject: [PATCH 06/21] Revert "[Refactor] Deprecate direct usage of memmap tensors" (#1698) --- test/test_libs.py | 6 +-- test/test_rb_distributed.py | 5 +-- test/test_rlhf.py | 11 ++---- torchrl/data/replay_buffers/storages.py | 50 ++++++++++--------------- torchrl/data/rlhf/dataset.py | 8 ++-- torchrl/data/rlhf/prompt.py | 8 ++-- torchrl/data/rlhf/reward.py | 16 ++++---- 7 files changed, 43 insertions(+), 61 deletions(-) diff --git a/test/test_libs.py b/test/test_libs.py index 1e4d2a7d871..f1715a550f4 100644 --- a/test/test_libs.py +++ b/test/test_libs.py @@ -400,7 +400,7 @@ def test_vecenvs_wrapper(self, envname): ["HalfCheetah-v4", "CartPole-v1", "ALE/Pong-v5"] + (["FetchReach-v2"] if _has_gym_robotics else []), ) - @pytest.mark.flaky(reruns=8, reruns_delay=1) + @pytest.mark.flaky(reruns=3, reruns_delay=1) def test_vecenvs_env(self, envname): from _utils_internal import rollout_consistency_assertion @@ -1897,10 +1897,10 @@ def test_direct_download(self, task): assert len(keys) assert_allclose_td( data_direct._storage._storage.select(*keys).apply( - lambda t: t.float() + lambda t: t.as_tensor().float() ), data_d4rl._storage._storage.select(*keys).apply( - lambda t: t.float() + lambda t: t.as_tensor().float() ), ) diff --git a/test/test_rb_distributed.py b/test/test_rb_distributed.py index 548f04dc41d..8a46b1a006d 100644 --- a/test/test_rb_distributed.py +++ b/test/test_rb_distributed.py @@ -4,7 +4,6 @@ # LICENSE file in the root directory of this source tree. import argparse import os - import sys import time @@ -23,10 +22,10 @@ class ReplayBufferNode(RemoteTensorDictReplayBuffer): - def __init__(self, capacity: int, scratch_dir=None): + def __init__(self, capacity: int): super().__init__( storage=LazyMemmapStorage( - max_size=capacity, scratch_dir=scratch_dir, device=torch.device("cpu") + max_size=capacity, scratch_dir="/tmp/", device=torch.device("cpu") ), sampler=RandomSampler(), writer=RoundRobinWriter(), diff --git a/test/test_rlhf.py b/test/test_rlhf.py index 31ef96681df..2abb9a6d386 100644 --- a/test/test_rlhf.py +++ b/test/test_rlhf.py @@ -14,12 +14,7 @@ import torch.nn.functional as F from _utils_internal import get_default_devices -from tensordict import ( - is_tensor_collection, - MemoryMappedTensor, - TensorDict, - TensorDictBase, -) +from tensordict import is_tensor_collection, MemmapTensor, TensorDict, TensorDictBase from tensordict.nn import TensorDictModule from torchrl.data.rlhf import TensorDictTokenizer from torchrl.data.rlhf.dataset import ( @@ -193,8 +188,8 @@ def test_dataset_to_tensordict(tmpdir, suffix): else: assert ("c", "d", "a") in td.keys(True) assert ("c", "d", "b") in td.keys(True) - assert isinstance(td.get((suffix, "a")), MemoryMappedTensor) - assert isinstance(td.get((suffix, "b")), MemoryMappedTensor) + assert isinstance(td.get((suffix, "a")), MemmapTensor) + assert isinstance(td.get((suffix, "b")), MemmapTensor) @pytest.mark.skipif( diff --git a/torchrl/data/replay_buffers/storages.py b/torchrl/data/replay_buffers/storages.py index 9c8417b9c97..bacb5713492 100644 --- a/torchrl/data/replay_buffers/storages.py +++ b/torchrl/data/replay_buffers/storages.py @@ -12,7 +12,7 @@ import torch from tensordict import is_tensorclass -from tensordict.memmap import MemmapTensor, MemoryMappedTensor +from tensordict.memmap import MemmapTensor from tensordict.tensordict import is_tensor_collection, TensorDict, TensorDictBase from tensordict.utils import expand_right @@ -482,7 +482,7 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: if self.device == "auto": self.device = data.device if isinstance(data, torch.Tensor): - # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype + # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype out = torch.empty( self.max_size, *data.shape, @@ -531,12 +531,12 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.get(0) TensorDict( fields={ - some data: MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + some data: MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), some: TensorDict( fields={ nested: TensorDict( fields={ - data: MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, + data: MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([11]), device=cpu, is_shared=False)}, @@ -560,8 +560,8 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.set(range(10), data) >>> storage.get(0) MyClass( - bar=MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), - foo=MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + bar=MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), + foo=MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), batch_size=torch.Size([11]), device=cpu, is_shared=False) @@ -603,12 +603,7 @@ def load_state_dict(self, state_dict): if isinstance(self._storage, torch.Tensor): _mem_map_tensor_as_tensor(self._storage).copy_(_storage) elif self._storage is None: - self._storage = _make_memmap( - _storage, - path=self.scratch_dir + "/tensor.memmap" - if self.scratch_dir is not None - else None, - ) + self._storage = MemmapTensor(_storage) else: raise RuntimeError( f"Cannot copy a storage of type {type(_storage)} onto another of type {type(self._storage)}" @@ -662,13 +657,9 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: ) else: # If not a tensorclass/tensordict, it must be a tensor(-like) - # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype - out = _make_empty_memmap( - (self.max_size, *data.shape), - dtype=data.dtype, - path=self.scratch_dir + "/tensor.memmap" - if self.scratch_dir is not None - else None, + # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype + out = MemmapTensor( + self.max_size, *data.shape, device=self.device, dtype=data.dtype ) if VERBOSE: filesize = os.path.getsize(out.filename) / 1024 / 1024 @@ -694,7 +685,6 @@ def _mem_map_tensor_as_tensor(mem_map_tensor: MemmapTensor) -> torch.Tensor: f"Supported backends are {_CKPT_BACKEND.backends}" ) if isinstance(mem_map_tensor, torch.Tensor): - # This will account for MemoryMappedTensors return mem_map_tensor if _CKPT_BACKEND == "torchsnapshot": # TorchSnapshot doesn't know how to stream MemmapTensor, so we view MemmapTensor @@ -755,27 +745,25 @@ def _collate_list_tensordict(x): return out -def _collate_id(x): +def _collate_contiguous(x): return x +def _collate_as_tensor(x): + return x.as_tensor() + + def _get_default_collate(storage, _is_tensordict=False): if isinstance(storage, ListStorage): if _is_tensordict: return _collate_list_tensordict else: return torch.utils.data._utils.collate.default_collate - elif isinstance(storage, TensorStorage): - return _collate_id + elif isinstance(storage, LazyMemmapStorage): + return _collate_as_tensor + elif isinstance(storage, (TensorStorage,)): + return _collate_contiguous else: raise NotImplementedError( f"Could not find a default collate_fn for storage {type(storage)}." ) - - -def _make_memmap(tensor, path): - return MemoryMappedTensor.from_tensor(tensor, filename=path) - - -def _make_empty_memmap(shape, dtype, path): - return MemoryMappedTensor.empty(shape=shape, dtype=dtype, filename=path) diff --git a/torchrl/data/rlhf/dataset.py b/torchrl/data/rlhf/dataset.py index adc2ddcf0d7..db2b6a418d6 100644 --- a/torchrl/data/rlhf/dataset.py +++ b/torchrl/data/rlhf/dataset.py @@ -77,8 +77,8 @@ class TokenizedDatasetLoader: >>> print(dataset) TensorDict( fields={ - attention_mask: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, + attention_mask: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([185068]), device=None, is_shared=False) @@ -270,8 +270,8 @@ def dataset_to_tensordict( fields={ prefix: TensorDict( fields={ - labels: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), - tokens: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, + labels: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), + tokens: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([10]), device=None, is_shared=False)}, diff --git a/torchrl/data/rlhf/prompt.py b/torchrl/data/rlhf/prompt.py index d50653c9967..d534a95379e 100644 --- a/torchrl/data/rlhf/prompt.py +++ b/torchrl/data/rlhf/prompt.py @@ -74,10 +74,10 @@ def from_dataset( >>> data = PromptData.from_dataset("train") >>> print(data) PromptDataTLDR( - attention_mask=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - prompt_rindex=MemoryMappedTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), - labels=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + prompt_rindex=MemmapTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), + labels=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), logits=None, loss=None, batch_size=torch.Size([116722]), diff --git a/torchrl/data/rlhf/reward.py b/torchrl/data/rlhf/reward.py index 20f379ef659..e7843e02f46 100644 --- a/torchrl/data/rlhf/reward.py +++ b/torchrl/data/rlhf/reward.py @@ -41,16 +41,16 @@ class PairwiseDataset: >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), @@ -97,16 +97,16 @@ def from_dataset( >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), From 0a38cbcd53451b1d97a62da3c5473574549e1720 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Wed, 15 Nov 2023 13:55:33 +0000 Subject: [PATCH 07/21] [Refactor] Deprecate direct usage of memmap tensors (#1699) --- test/test_libs.py | 10 ++--- test/test_rb_distributed.py | 5 ++- test/test_rlhf.py | 11 ++++-- torchrl/data/replay_buffers/storages.py | 50 +++++++++++++++---------- torchrl/data/rlhf/dataset.py | 8 ++-- torchrl/data/rlhf/prompt.py | 8 ++-- torchrl/data/rlhf/reward.py | 16 ++++---- 7 files changed, 61 insertions(+), 47 deletions(-) diff --git a/test/test_libs.py b/test/test_libs.py index f1715a550f4..c3379021510 100644 --- a/test/test_libs.py +++ b/test/test_libs.py @@ -400,7 +400,7 @@ def test_vecenvs_wrapper(self, envname): ["HalfCheetah-v4", "CartPole-v1", "ALE/Pong-v5"] + (["FetchReach-v2"] if _has_gym_robotics else []), ) - @pytest.mark.flaky(reruns=3, reruns_delay=1) + @pytest.mark.flaky(reruns=8, reruns_delay=1) def test_vecenvs_env(self, envname): from _utils_internal import rollout_consistency_assertion @@ -1896,12 +1896,8 @@ def test_direct_download(self, task): keys = keys.intersection(data_d4rl._storage._storage.keys(True, True)) assert len(keys) assert_allclose_td( - data_direct._storage._storage.select(*keys).apply( - lambda t: t.as_tensor().float() - ), - data_d4rl._storage._storage.select(*keys).apply( - lambda t: t.as_tensor().float() - ), + data_direct._storage._storage.select(*keys).apply(lambda t: t.float()), + data_d4rl._storage._storage.select(*keys).apply(lambda t: t.float()), ) @pytest.mark.parametrize( diff --git a/test/test_rb_distributed.py b/test/test_rb_distributed.py index 8a46b1a006d..548f04dc41d 100644 --- a/test/test_rb_distributed.py +++ b/test/test_rb_distributed.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. import argparse import os + import sys import time @@ -22,10 +23,10 @@ class ReplayBufferNode(RemoteTensorDictReplayBuffer): - def __init__(self, capacity: int): + def __init__(self, capacity: int, scratch_dir=None): super().__init__( storage=LazyMemmapStorage( - max_size=capacity, scratch_dir="/tmp/", device=torch.device("cpu") + max_size=capacity, scratch_dir=scratch_dir, device=torch.device("cpu") ), sampler=RandomSampler(), writer=RoundRobinWriter(), diff --git a/test/test_rlhf.py b/test/test_rlhf.py index 2abb9a6d386..31ef96681df 100644 --- a/test/test_rlhf.py +++ b/test/test_rlhf.py @@ -14,7 +14,12 @@ import torch.nn.functional as F from _utils_internal import get_default_devices -from tensordict import is_tensor_collection, MemmapTensor, TensorDict, TensorDictBase +from tensordict import ( + is_tensor_collection, + MemoryMappedTensor, + TensorDict, + TensorDictBase, +) from tensordict.nn import TensorDictModule from torchrl.data.rlhf import TensorDictTokenizer from torchrl.data.rlhf.dataset import ( @@ -188,8 +193,8 @@ def test_dataset_to_tensordict(tmpdir, suffix): else: assert ("c", "d", "a") in td.keys(True) assert ("c", "d", "b") in td.keys(True) - assert isinstance(td.get((suffix, "a")), MemmapTensor) - assert isinstance(td.get((suffix, "b")), MemmapTensor) + assert isinstance(td.get((suffix, "a")), MemoryMappedTensor) + assert isinstance(td.get((suffix, "b")), MemoryMappedTensor) @pytest.mark.skipif( diff --git a/torchrl/data/replay_buffers/storages.py b/torchrl/data/replay_buffers/storages.py index bacb5713492..9c8417b9c97 100644 --- a/torchrl/data/replay_buffers/storages.py +++ b/torchrl/data/replay_buffers/storages.py @@ -12,7 +12,7 @@ import torch from tensordict import is_tensorclass -from tensordict.memmap import MemmapTensor +from tensordict.memmap import MemmapTensor, MemoryMappedTensor from tensordict.tensordict import is_tensor_collection, TensorDict, TensorDictBase from tensordict.utils import expand_right @@ -482,7 +482,7 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: if self.device == "auto": self.device = data.device if isinstance(data, torch.Tensor): - # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype + # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype out = torch.empty( self.max_size, *data.shape, @@ -531,12 +531,12 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.get(0) TensorDict( fields={ - some data: MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + some data: MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), some: TensorDict( fields={ nested: TensorDict( fields={ - data: MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, + data: MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([11]), device=cpu, is_shared=False)}, @@ -560,8 +560,8 @@ class LazyMemmapStorage(LazyTensorStorage): >>> storage.set(range(10), data) >>> storage.get(0) MyClass( - bar=MemmapTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), - foo=MemmapTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), + bar=MemoryMappedTensor(shape=torch.Size([11, 12]), device=cpu, dtype=torch.float32, is_shared=False), + foo=MemoryMappedTensor(shape=torch.Size([11]), device=cpu, dtype=torch.float32, is_shared=False), batch_size=torch.Size([11]), device=cpu, is_shared=False) @@ -603,7 +603,12 @@ def load_state_dict(self, state_dict): if isinstance(self._storage, torch.Tensor): _mem_map_tensor_as_tensor(self._storage).copy_(_storage) elif self._storage is None: - self._storage = MemmapTensor(_storage) + self._storage = _make_memmap( + _storage, + path=self.scratch_dir + "/tensor.memmap" + if self.scratch_dir is not None + else None, + ) else: raise RuntimeError( f"Cannot copy a storage of type {type(_storage)} onto another of type {type(self._storage)}" @@ -657,9 +662,13 @@ def _init(self, data: Union[TensorDictBase, torch.Tensor]) -> None: ) else: # If not a tensorclass/tensordict, it must be a tensor(-like) - # if Tensor, we just create a MemmapTensor of the desired shape, device and dtype - out = MemmapTensor( - self.max_size, *data.shape, device=self.device, dtype=data.dtype + # if Tensor, we just create a MemoryMappedTensor of the desired shape, device and dtype + out = _make_empty_memmap( + (self.max_size, *data.shape), + dtype=data.dtype, + path=self.scratch_dir + "/tensor.memmap" + if self.scratch_dir is not None + else None, ) if VERBOSE: filesize = os.path.getsize(out.filename) / 1024 / 1024 @@ -685,6 +694,7 @@ def _mem_map_tensor_as_tensor(mem_map_tensor: MemmapTensor) -> torch.Tensor: f"Supported backends are {_CKPT_BACKEND.backends}" ) if isinstance(mem_map_tensor, torch.Tensor): + # This will account for MemoryMappedTensors return mem_map_tensor if _CKPT_BACKEND == "torchsnapshot": # TorchSnapshot doesn't know how to stream MemmapTensor, so we view MemmapTensor @@ -745,25 +755,27 @@ def _collate_list_tensordict(x): return out -def _collate_contiguous(x): +def _collate_id(x): return x -def _collate_as_tensor(x): - return x.as_tensor() - - def _get_default_collate(storage, _is_tensordict=False): if isinstance(storage, ListStorage): if _is_tensordict: return _collate_list_tensordict else: return torch.utils.data._utils.collate.default_collate - elif isinstance(storage, LazyMemmapStorage): - return _collate_as_tensor - elif isinstance(storage, (TensorStorage,)): - return _collate_contiguous + elif isinstance(storage, TensorStorage): + return _collate_id else: raise NotImplementedError( f"Could not find a default collate_fn for storage {type(storage)}." ) + + +def _make_memmap(tensor, path): + return MemoryMappedTensor.from_tensor(tensor, filename=path) + + +def _make_empty_memmap(shape, dtype, path): + return MemoryMappedTensor.empty(shape=shape, dtype=dtype, filename=path) diff --git a/torchrl/data/rlhf/dataset.py b/torchrl/data/rlhf/dataset.py index db2b6a418d6..adc2ddcf0d7 100644 --- a/torchrl/data/rlhf/dataset.py +++ b/torchrl/data/rlhf/dataset.py @@ -77,8 +77,8 @@ class TokenizedDatasetLoader: >>> print(dataset) TensorDict( fields={ - attention_mask: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids: MemmapTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, + attention_mask: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids: MemoryMappedTensor(shape=torch.Size([185068, 550]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([185068]), device=None, is_shared=False) @@ -270,8 +270,8 @@ def dataset_to_tensordict( fields={ prefix: TensorDict( fields={ - labels: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), - tokens: MemmapTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, + labels: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.float32, is_shared=False), + tokens: MemoryMappedTensor(shape=torch.Size([10, 11]), device=cpu, dtype=torch.int64, is_shared=False)}, batch_size=torch.Size([10]), device=None, is_shared=False)}, diff --git a/torchrl/data/rlhf/prompt.py b/torchrl/data/rlhf/prompt.py index d534a95379e..d50653c9967 100644 --- a/torchrl/data/rlhf/prompt.py +++ b/torchrl/data/rlhf/prompt.py @@ -74,10 +74,10 @@ def from_dataset( >>> data = PromptData.from_dataset("train") >>> print(data) PromptDataTLDR( - attention_mask=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), - prompt_rindex=MemmapTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), - labels=MemmapTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), + prompt_rindex=MemoryMappedTensor(shape=torch.Size([116722]), device=cpu, dtype=torch.int64, is_shared=False), + labels=MemoryMappedTensor(shape=torch.Size([116722, 550]), device=cpu, dtype=torch.int64, is_shared=False), logits=None, loss=None, batch_size=torch.Size([116722]), diff --git a/torchrl/data/rlhf/reward.py b/torchrl/data/rlhf/reward.py index e7843e02f46..20f379ef659 100644 --- a/torchrl/data/rlhf/reward.py +++ b/torchrl/data/rlhf/reward.py @@ -41,16 +41,16 @@ class PairwiseDataset: >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), @@ -97,16 +97,16 @@ def from_dataset( >>> print(data) PairwiseDataset( chosen_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), device=None, is_shared=False), rejected_data=RewardData( - attention_mask=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), - input_ids=MemmapTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + attention_mask=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), + input_ids=MemoryMappedTensor(shape=torch.Size([92534, 550]), device=cpu, dtype=torch.int64, is_shared=False), rewards=None, end_scores=None, batch_size=torch.Size([92534]), From 9b9860fca4d0e78929f8a7ee12847eea542cbcfb Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Wed, 15 Nov 2023 21:27:30 +0000 Subject: [PATCH 08/21] [Doc] Fix discord link (#1701) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 05c2e9843c2..5a21b3701d4 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ pypi nightly version [![Downloads](https://static.pepy.tech/personalized-badge/torchrl?period=total&units=international_system&left_color=blue&right_color=orange&left_text=Downloads)](https://pepy.tech/project/torchrl) [![Downloads](https://static.pepy.tech/personalized-badge/torchrl-nightly?period=total&units=international_system&left_color=blue&right_color=orange&left_text=Downloads%20(nightly))](https://pepy.tech/project/torchrl-nightly) -[![Discord Shield](https://dcbadge.vercel.app/api/server/xSURYdvu)](https://discord.gg/xSURYdvu) +[![Discord Shield](https://dcbadge.vercel.app/api/server/2XJdEenU)](https://discord.gg/2XJdEenU) # TorchRL From 44bd026ed06ac69eab63d3b61dc124d479949712 Mon Sep 17 00:00:00 2001 From: Honglong Tian <50365897+FrankTianTT@users.noreply.github.com> Date: Thu, 16 Nov 2023 05:30:24 +0800 Subject: [PATCH 09/21] [BugFix] make sure the params of exploration-wrapper is float (#1700) --- test/test_exploration.py | 4 ++-- torchrl/modules/tensordict_module/exploration.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test_exploration.py b/test/test_exploration.py index 8a374cd9009..24bb8c246d0 100644 --- a/test/test_exploration.py +++ b/test/test_exploration.py @@ -51,7 +51,7 @@ class TestEGreedy: - @pytest.mark.parametrize("eps_init", [0.0, 0.5, 1.0]) + @pytest.mark.parametrize("eps_init", [0.0, 0.5, 1]) @pytest.mark.parametrize("module", [True, False]) def test_egreedy(self, eps_init, module): torch.manual_seed(0) @@ -78,7 +78,7 @@ def test_egreedy(self, eps_init, module): assert (action == 0).any() assert ((action == 1) | (action == 0)).all() - @pytest.mark.parametrize("eps_init", [0.0, 0.5, 1.0]) + @pytest.mark.parametrize("eps_init", [0.0, 0.5, 1]) @pytest.mark.parametrize("module", [True, False]) @pytest.mark.parametrize("spec_class", ["discrete", "one_hot"]) def test_egreedy_masked(self, module, eps_init, spec_class): diff --git a/torchrl/modules/tensordict_module/exploration.py b/torchrl/modules/tensordict_module/exploration.py index 46f71e2b3d6..5c8ae799061 100644 --- a/torchrl/modules/tensordict_module/exploration.py +++ b/torchrl/modules/tensordict_module/exploration.py @@ -110,7 +110,7 @@ def __init__( self.register_buffer("eps_init", torch.tensor([eps_init])) self.register_buffer("eps_end", torch.tensor([eps_end])) self.annealing_num_steps = annealing_num_steps - self.register_buffer("eps", torch.tensor([eps_init])) + self.register_buffer("eps", torch.tensor([eps_init], dtype=torch.float32)) if spec is not None: if not isinstance(spec, CompositeSpec) and len(self.out_keys) >= 1: @@ -259,7 +259,7 @@ def __init__( if self.eps_end > self.eps_init: raise RuntimeError("eps should decrease over time or be constant") self.annealing_num_steps = annealing_num_steps - self.register_buffer("eps", torch.tensor([eps_init])) + self.register_buffer("eps", torch.tensor([eps_init], dtype=torch.float32)) self.action_key = action_key self.action_mask_key = action_mask_key if spec is not None: @@ -405,7 +405,7 @@ def __init__( self.annealing_num_steps = annealing_num_steps self.register_buffer("mean", torch.tensor([mean])) self.register_buffer("std", torch.tensor([std])) - self.register_buffer("sigma", torch.tensor([sigma_init])) + self.register_buffer("sigma", torch.tensor([sigma_init], dtype=torch.float32)) self.action_key = action_key self.out_keys = list(self.td_module.out_keys) if action_key not in self.out_keys: @@ -613,7 +613,7 @@ def __init__( f"got eps_init={eps_init} and eps_end={eps_end}" ) self.annealing_num_steps = annealing_num_steps - self.register_buffer("eps", torch.tensor([eps_init])) + self.register_buffer("eps", torch.tensor([eps_init], dtype=torch.float32)) self.out_keys = list(self.td_module.out_keys) + self.ou.out_keys self.is_init_key = is_init_key noise_key = self.ou.noise_key From 5cac16a0bc1e1265a1ff6b5e923f859f2fc3929e Mon Sep 17 00:00:00 2001 From: Albert Bou Date: Sun, 19 Nov 2023 20:24:54 +0100 Subject: [PATCH 10/21] [Fix] EndOfLifeTransform fix in end of life detection (#1705) --- torchrl/envs/transforms/gym_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchrl/envs/transforms/gym_transforms.py b/torchrl/envs/transforms/gym_transforms.py index f3a9f2aa469..5645785117d 100644 --- a/torchrl/envs/transforms/gym_transforms.py +++ b/torchrl/envs/transforms/gym_transforms.py @@ -148,7 +148,7 @@ def _step(self, tensordict, next_tensordict): lives = self._get_lives() end_of_life = torch.tensor( - tensordict.get(self.lives_key) < lives, device=self.parent.device + tensordict.get(self.lives_key) > lives, device=self.parent.device ) try: done = next_tensordict.get(self.done_key) From c2edf357d10ae93f18eaf06df7f19212949ee30e Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Mon, 20 Nov 2023 16:54:15 +0000 Subject: [PATCH 11/21] [CI] Fix benchmark on gpu (#1706) Co-authored-by: DanilBaibak --- .github/workflows/benchmarks.yml | 116 +++++++++--------- .github/workflows/benchmarks_pr.yml | 142 ++++++++++++----------- benchmarks/test_collectors_benchmark.py | 15 ++- benchmarks/test_objectives_benchmarks.py | 2 +- 4 files changed, 143 insertions(+), 132 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 1a2384a1df1..01d880708f4 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -34,7 +34,8 @@ jobs: python -m pip install git+https://github.com/pytorch/tensordict python setup.py develop python -m pip install pytest pytest-benchmark - python -m pip install dm_control + python3 -m pip install "gym[accept-rom-license,atari]" + python3 -m pip install dm_control - name: Run benchmarks run: | cd benchmarks/ @@ -57,62 +58,65 @@ jobs: benchmark_gpu: name: GPU Pytest benchmark - runs-on: ubuntu-20.04 - strategy: - matrix: - include: - - os: linux.4xlarge.nvidia.gpu - python-version: 3.8 + runs-on: linux.g5.4xlarge.nvidia.gpu defaults: run: shell: bash -l {0} - container: nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 + container: + image: nvidia/cuda:12.3.0-base-ubuntu22.04 + options: --gpus all steps: - - name: Install deps - run: | - export TZ=Europe/London - export DEBIAN_FRONTEND=noninteractive # tzdata bug - apt-get update -y - apt-get install software-properties-common -y - add-apt-repository ppa:git-core/candidate -y - apt-get update -y - apt-get upgrade -y - apt-get -y install libglu1-mesa libgl1-mesa-glx libosmesa6 gcc curl g++ unzip wget libglfw3-dev libgles2-mesa-dev libglew-dev sudo git cmake libz-dev - - name: Check ldd --version - run: ldd --version - - name: Checkout - uses: actions/checkout@v3 - - name: Update pip - run: | - apt-get install python3.8 python3-pip -y - pip3 install --upgrade pip - - name: Setup git - run: git config --global --add safe.directory /__w/rl/rl - - name: setup Path - run: | - echo /usr/local/bin >> $GITHUB_PATH - - name: Setup Environment - run: | - python3 -m pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu118 - python3 -m pip install git+https://github.com/pytorch/tensordict - python3 setup.py develop - python3 -m pip install pytest pytest-benchmark - python3 -m pip install dm_control - - name: Run benchmarks - run: | - cd benchmarks/ - python3 -m pytest --benchmark-json output.json - - name: Store benchmark results - uses: benchmark-action/github-action-benchmark@v1 - if: ${{ github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch' }} - with: - name: GPU Benchmark Results - tool: 'pytest' - output-file-path: benchmarks/output.json - fail-on-alert: true - alert-threshold: '200%' - alert-comment-cc-users: '@vmoens' - comment-on-alert: true - github-token: ${{ secrets.GITHUB_TOKEN }} - gh-pages-branch: gh-pages - auto-push: true + - name: Install deps + run: | + export TZ=Europe/London + export DEBIAN_FRONTEND=noninteractive # tzdata bug + apt-get update -y + apt-get install software-properties-common -y + add-apt-repository ppa:git-core/candidate -y + apt-get update -y + apt-get upgrade -y + apt-get -y install libglu1-mesa libgl1-mesa-glx libosmesa6 gcc curl g++ unzip wget libglfw3-dev libgles2-mesa-dev libglew-dev sudo git cmake libz-dev + - name: Check ldd --version + run: ldd --version + - name: Checkout + uses: actions/checkout@v3 + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Setup git + run: git config --global --add safe.directory /__w/rl/rl + - name: setup Path + run: | + echo /usr/local/bin >> $GITHUB_PATH + - name: Setup Environment + run: | + python3 -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 + python3 -m pip install git+https://github.com/pytorch/tensordict + python3 setup.py develop + python3 -m pip install pytest pytest-benchmark + python3 -m pip install "gym[accept-rom-license,atari]" + python3 -m pip install dm_control + - name: check GPU presence + run: | + python -c """import torch + assert torch.cuda.device_count() + """ + - name: Run benchmarks + run: | + cd benchmarks/ + python3 -m pytest --benchmark-json output.json + - name: Store benchmark results + uses: benchmark-action/github-action-benchmark@v1 + if: ${{ github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch' }} + with: + name: GPU Benchmark Results + tool: 'pytest' + output-file-path: benchmarks/output.json + fail-on-alert: true + alert-threshold: '200%' + alert-comment-cc-users: '@vmoens' + comment-on-alert: true + github-token: ${{ secrets.GITHUB_TOKEN }} + gh-pages-branch: gh-pages + auto-push: true diff --git a/.github/workflows/benchmarks_pr.yml b/.github/workflows/benchmarks_pr.yml index e44c683a6d6..0f0ad3e5723 100644 --- a/.github/workflows/benchmarks_pr.yml +++ b/.github/workflows/benchmarks_pr.yml @@ -33,7 +33,8 @@ jobs: python -m pip install git+https://github.com/pytorch/tensordict python setup.py develop python -m pip install pytest pytest-benchmark - python -m pip install dm_control + python3 -m pip install "gym[accept-rom-license,atari]" + python3 -m pip install dm_control - name: Setup benchmarks run: | echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV @@ -63,75 +64,78 @@ jobs: benchmark_gpu: name: GPU Pytest benchmark - runs-on: ubuntu-20.04 - strategy: - matrix: - include: - - os: linux.4xlarge.nvidia.gpu - python-version: 3.8 + runs-on: linux.g5.4xlarge.nvidia.gpu defaults: run: shell: bash -l {0} - container: nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 + container: + image: nvidia/cuda:12.3.0-base-ubuntu22.04 + options: --gpus all steps: - - name: Who triggered this? - run: | - echo "Action triggered by ${{ github.event.pull_request.html_url }}" - - name: Install deps - run: | - export TZ=Europe/London - export DEBIAN_FRONTEND=noninteractive # tzdata bug - apt-get update -y - apt-get install software-properties-common -y - add-apt-repository ppa:git-core/candidate -y - apt-get update -y - apt-get upgrade -y - apt-get -y install libglu1-mesa libgl1-mesa-glx libosmesa6 gcc curl g++ unzip wget libglfw3-dev libgles2-mesa-dev libglew-dev sudo git cmake libz-dev - - name: Check ldd --version - run: ldd --version - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 50 # this is to make sure we obtain the target base commit - - name: Update pip - run: | - apt-get install python3.8 python3-pip -y - pip3 install --upgrade pip - - name: Setup git - run: git config --global --add safe.directory /__w/rl/rl - - name: setup Path - run: | - echo /usr/local/bin >> $GITHUB_PATH - - name: Setup Environment - run: | - python3 -m pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu118 - python3 -m pip install git+https://github.com/pytorch/tensordict - python3 setup.py develop - python3 -m pip install pytest pytest-benchmark - python3 -m pip install dm_control - - name: Setup benchmarks - run: | - echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV - echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV - echo "BASELINE_JSON=$(mktemp)" >> $GITHUB_ENV - echo "CONTENDER_JSON=$(mktemp)" >> $GITHUB_ENV - echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV - - name: Run benchmarks - run: | - cd benchmarks/ - RUN_BENCHMARK="pytest --rank 0 --benchmark-json " - git checkout ${{ github.event.pull_request.base.sha }} - $RUN_BENCHMARK ${{ env.BASELINE_JSON }} - git checkout ${{ github.event.pull_request.head.sha }} - $RUN_BENCHMARK ${{ env.CONTENDER_JSON }} - - name: Publish results - uses: apbard/pytest-benchmark-commenter@v3 - with: - token: ${{ secrets.GITHUB_TOKEN }} - benchmark-file: ${{ env.CONTENDER_JSON }} - comparison-benchmark-file: ${{ env.BASELINE_JSON }} - benchmark-metrics: 'name,max,mean,ops' - comparison-benchmark-metric: 'ops' - comparison-higher-is-better: true - comparison-threshold: 5 - benchmark-title: 'Result of GPU Benchmark Tests' + - name: Who triggered this? + run: | + echo "Action triggered by ${{ github.event.pull_request.html_url }}" + - name: Install deps + run: | + export TZ=Europe/London + export DEBIAN_FRONTEND=noninteractive # tzdata bug + apt-get update -y + apt-get install software-properties-common -y + add-apt-repository ppa:git-core/candidate -y + apt-get update -y + apt-get upgrade -y + apt-get -y install libglu1-mesa libgl1-mesa-glx libosmesa6 gcc curl g++ unzip wget libglfw3-dev libgles2-mesa-dev libglew-dev sudo git cmake libz-dev + - name: Check ldd --version + run: ldd --version + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 50 # this is to make sure we obtain the target base commit + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Setup git + run: git config --global --add safe.directory /__w/rl/rl + - name: setup Path + run: | + echo /usr/local/bin >> $GITHUB_PATH + - name: Setup Environment + run: | + python3 -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 + python3 -m pip install git+https://github.com/pytorch/tensordict + python3 setup.py develop + python3 -m pip install pytest pytest-benchmark + python3 -m pip install "gym[accept-rom-license,atari]" + python3 -m pip install dm_control + - name: check GPU presence + run: | + python -c """import torch + assert torch.cuda.device_count() + """ + - name: Setup benchmarks + run: | + echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV + echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV + echo "BASELINE_JSON=$(mktemp)" >> $GITHUB_ENV + echo "CONTENDER_JSON=$(mktemp)" >> $GITHUB_ENV + echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV + - name: Run benchmarks + run: | + cd benchmarks/ + RUN_BENCHMARK="pytest --rank 0 --benchmark-json " + git checkout ${{ github.event.pull_request.base.sha }} + $RUN_BENCHMARK ${{ env.BASELINE_JSON }} + git checkout ${{ github.event.pull_request.head.sha }} + $RUN_BENCHMARK ${{ env.CONTENDER_JSON }} + - name: Publish results + uses: apbard/pytest-benchmark-commenter@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + benchmark-file: ${{ env.CONTENDER_JSON }} + comparison-benchmark-file: ${{ env.BASELINE_JSON }} + benchmark-metrics: 'name,max,mean,ops' + comparison-benchmark-metric: 'ops' + comparison-higher-is-better: true + comparison-threshold: 5 + benchmark-title: 'Result of GPU Benchmark Tests' diff --git a/benchmarks/test_collectors_benchmark.py b/benchmarks/test_collectors_benchmark.py index 9f6c4599587..1e9634f643f 100644 --- a/benchmarks/test_collectors_benchmark.py +++ b/benchmarks/test_collectors_benchmark.py @@ -13,7 +13,7 @@ MultiSyncDataCollector, RandomPolicy, ) -from torchrl.envs import EnvCreator, StepCounter, TransformedEnv +from torchrl.envs import EnvCreator, GymEnv, StepCounter, TransformedEnv from torchrl.envs.libs.dm_control import DMControlEnv @@ -78,9 +78,10 @@ def async_collector_setup(): def single_collector_setup_pixels(): device = "cuda:0" if torch.cuda.device_count() else "cpu" - env = TransformedEnv( - DMControlEnv("cheetah", "run", device=device, from_pixels=True), StepCounter(50) - ) + # env = TransformedEnv( + # DMControlEnv("cheetah", "run", device=device, from_pixels=True), StepCounter(50) + # ) + env = TransformedEnv(GymEnv("ALE/Pong-v5"), StepCounter(50)) c = SyncDataCollector( env, RandomPolicy(env.action_spec), @@ -99,7 +100,8 @@ def sync_collector_setup_pixels(): device = "cuda:0" if torch.cuda.device_count() else "cpu" env = EnvCreator( lambda: TransformedEnv( - DMControlEnv("cheetah", "run", device=device, from_pixels=True), + # DMControlEnv("cheetah", "run", device=device, from_pixels=True), + GymEnv("ALE/Pong-v5"), StepCounter(50), ) ) @@ -121,7 +123,8 @@ def async_collector_setup_pixels(): device = "cuda:0" if torch.cuda.device_count() else "cpu" env = EnvCreator( lambda: TransformedEnv( - DMControlEnv("cheetah", "run", device=device, from_pixels=True), + # DMControlEnv("cheetah", "run", device=device, from_pixels=True), + GymEnv("ALE/Pong-v5"), StepCounter(50), ) ) diff --git a/benchmarks/test_objectives_benchmarks.py b/benchmarks/test_objectives_benchmarks.py index ca5b7eb82ed..d07e8f5da90 100644 --- a/benchmarks/test_objectives_benchmarks.py +++ b/benchmarks/test_objectives_benchmarks.py @@ -123,7 +123,7 @@ def test_gae_speed(benchmark, gae_fn, gamma_tensor, batches, timesteps): gamma = 0.99 if gamma_tensor: - gamma = torch.full(size, gamma) + gamma = torch.full(size, gamma, device=device) lmbda = 0.95 benchmark( From b38d4b793508c2cb16a062d10d6e6c1029638398 Mon Sep 17 00:00:00 2001 From: Albert Bou Date: Thu, 23 Nov 2023 21:41:28 +0100 Subject: [PATCH 12/21] [Algorithm] IMPALA and VTrace module (#1506) Co-authored-by: Vincent Moens --- .../linux_examples/scripts/run_test.sh | 6 + .../collectors/multi_nodes/ray_train.py | 2 +- examples/impala/README.md | 33 + examples/impala/config_multi_node_ray.yaml | 65 ++ .../impala/config_multi_node_submitit.yaml | 46 ++ examples/impala/config_single_node.yaml | 38 ++ examples/impala/impala_multi_node_ray.py | 278 ++++++++ examples/impala/impala_multi_node_submitit.py | 270 ++++++++ examples/impala/impala_single_node.py | 248 +++++++ examples/impala/utils.py | 182 +++++ test/test_cost.py | 620 ++++++++++++++---- torchrl/objectives/a2c.py | 26 +- torchrl/objectives/common.py | 6 +- torchrl/objectives/ppo.py | 19 +- torchrl/objectives/reinforce.py | 25 +- torchrl/objectives/utils.py | 3 + torchrl/objectives/value/__init__.py | 1 + torchrl/objectives/value/advantages.py | 313 ++++++++- torchrl/objectives/value/functional.py | 88 +++ torchrl/objectives/value/vtrace.py | 58 -- 20 files changed, 2140 insertions(+), 187 deletions(-) create mode 100644 examples/impala/README.md create mode 100644 examples/impala/config_multi_node_ray.yaml create mode 100644 examples/impala/config_multi_node_submitit.yaml create mode 100644 examples/impala/config_single_node.yaml create mode 100644 examples/impala/impala_multi_node_ray.py create mode 100644 examples/impala/impala_multi_node_submitit.py create mode 100644 examples/impala/impala_single_node.py create mode 100644 examples/impala/utils.py delete mode 100644 torchrl/objectives/value/vtrace.py diff --git a/.github/unittest/linux_examples/scripts/run_test.sh b/.github/unittest/linux_examples/scripts/run_test.sh index 1abf951c44b..5b57815c444 100755 --- a/.github/unittest/linux_examples/scripts/run_test.sh +++ b/.github/unittest/linux_examples/scripts/run_test.sh @@ -52,6 +52,12 @@ python .github/unittest/helpers/coverage_run_parallel.py examples/decision_trans # ==================================================================================== # # ================================ Gymnasium ========================================= # +python .github/unittest/helpers/coverage_run_parallel.py examples/impala/impala_single_node.py \ + collector.total_frames=80 \ + collector.frames_per_batch=20 \ + collector.num_workers=1 \ + logger.backend= \ + logger.test_interval=10 python .github/unittest/helpers/coverage_run_parallel.py examples/ppo/ppo_mujoco.py \ env.env_name=HalfCheetah-v4 \ collector.total_frames=40 \ diff --git a/examples/distributed/collectors/multi_nodes/ray_train.py b/examples/distributed/collectors/multi_nodes/ray_train.py index a5265f442b7..360c6daac28 100644 --- a/examples/distributed/collectors/multi_nodes/ray_train.py +++ b/examples/distributed/collectors/multi_nodes/ray_train.py @@ -117,7 +117,7 @@ "object_store_memory": 1024**3, } collector = RayCollector( - env_makers=[env] * num_collectors, + create_env_fn=[env] * num_collectors, policy=policy_module, collector_class=SyncDataCollector, collector_kwargs={ diff --git a/examples/impala/README.md b/examples/impala/README.md new file mode 100644 index 00000000000..00e0d010b82 --- /dev/null +++ b/examples/impala/README.md @@ -0,0 +1,33 @@ +## Reproducing Importance Weighted Actor-Learner Architecture (IMPALA) Algorithm Results + +This repository contains scripts that enable training agents using the IMPALA Algorithm on MuJoCo and Atari environments. We follow the original paper [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347) by Espeholt et al. 2018. + +## Examples Structure + +Please note that we provide 2 examples, one for single node training and one for distributed training. Both examples rely on the same utils file, but besides that are independent. Each example contains the following files: + +1. **Main Script:** The definition of algorithm components and the training loop can be found in the main script (e.g. impala_single_node_ray.py). + +2. **Utils File:** A utility file is provided to contain various helper functions, generally to create the environment and the models (e.g. utils.py). + +3. **Configuration File:** This file includes default hyperparameters specified in the original paper. For the multi-node case, the file also includes the configuration file of the Ray cluster. Users can modify these hyperparameters to customize their experiments (e.g. config_single_node.yaml). + + +## Running the Examples + +You can execute the single node IMPALA algorithm on Atari environments by running the following command: + +```bash +python impala_single_node.py +``` + +You can execute the multi-node IMPALA algorithm on Atari environments by running the following command: + +```bash +python impala_single_node_ray.py +``` +or + +```bash +python impala_single_node_submitit.py +``` diff --git a/examples/impala/config_multi_node_ray.yaml b/examples/impala/config_multi_node_ray.yaml new file mode 100644 index 00000000000..e312b336651 --- /dev/null +++ b/examples/impala/config_multi_node_ray.yaml @@ -0,0 +1,65 @@ +# Environment +env: + env_name: PongNoFrameskip-v4 + +# Ray init kwargs - https://docs.ray.io/en/latest/ray-core/api/doc/ray.init.html +ray_init_config: + address: null + num_cpus: null + num_gpus: null + resources: null + object_store_memory: null + local_mode: False + ignore_reinit_error: False + include_dashboard: null + dashboard_host: 127.0.0.1 + dashboard_port: null + job_config: null + configure_logging: True + logging_level: info + logging_format: null + log_to_driver: True + namespace: null + runtime_env: null + storage: null + +# Device for the forward and backward passes +local_device: "cuda:0" + +# Resources assigned to each IMPALA rollout collection worker +remote_worker_resources: + num_cpus: 1 + num_gpus: 0.25 + memory: 1073741824 # 1*1024**3 - 1GB + +# collector +collector: + frames_per_batch: 80 + total_frames: 200_000_000 + num_workers: 12 + +# logger +logger: + backend: wandb + exp_name: Atari_IMPALA + test_interval: 200_000_000 + num_test_episodes: 3 + +# Optim +optim: + lr: 0.0006 + eps: 1e-8 + weight_decay: 0.0 + momentum: 0.0 + alpha: 0.99 + max_grad_norm: 40.0 + anneal_lr: True + +# loss +loss: + gamma: 0.99 + batch_size: 32 + sgd_updates: 1 + critic_coef: 0.5 + entropy_coef: 0.01 + loss_critic_type: l2 diff --git a/examples/impala/config_multi_node_submitit.yaml b/examples/impala/config_multi_node_submitit.yaml new file mode 100644 index 00000000000..f632ba15dc2 --- /dev/null +++ b/examples/impala/config_multi_node_submitit.yaml @@ -0,0 +1,46 @@ +# Environment +env: + env_name: PongNoFrameskip-v4 + +# Device for the forward and backward passes +local_device: "cuda:0" + +# SLURM config +slurm_config: + timeout_min: 10 + slurm_partition: train + slurm_cpus_per_task: 1 + slurm_gpus_per_node: 1 + +# collector +collector: + backend: gloo + frames_per_batch: 80 + total_frames: 200_000_000 + num_workers: 1 + +# logger +logger: + backend: wandb + exp_name: Atari_IMPALA + test_interval: 200_000_000 + num_test_episodes: 3 + +# Optim +optim: + lr: 0.0006 + eps: 1e-8 + weight_decay: 0.0 + momentum: 0.0 + alpha: 0.99 + max_grad_norm: 40.0 + anneal_lr: True + +# loss +loss: + gamma: 0.99 + batch_size: 32 + sgd_updates: 1 + critic_coef: 0.5 + entropy_coef: 0.01 + loss_critic_type: l2 diff --git a/examples/impala/config_single_node.yaml b/examples/impala/config_single_node.yaml new file mode 100644 index 00000000000..d39407c1a69 --- /dev/null +++ b/examples/impala/config_single_node.yaml @@ -0,0 +1,38 @@ +# Environment +env: + env_name: PongNoFrameskip-v4 + +# Device for the forward and backward passes +device: "cuda:0" + +# collector +collector: + frames_per_batch: 80 + total_frames: 200_000_000 + num_workers: 12 + +# logger +logger: + backend: wandb + exp_name: Atari_IMPALA + test_interval: 200_000_000 + num_test_episodes: 3 + +# Optim +optim: + lr: 0.0006 + eps: 1e-8 + weight_decay: 0.0 + momentum: 0.0 + alpha: 0.99 + max_grad_norm: 40.0 + anneal_lr: True + +# loss +loss: + gamma: 0.99 + batch_size: 32 + sgd_updates: 1 + critic_coef: 0.5 + entropy_coef: 0.01 + loss_critic_type: l2 diff --git a/examples/impala/impala_multi_node_ray.py b/examples/impala/impala_multi_node_ray.py new file mode 100644 index 00000000000..a0d2d88c5a2 --- /dev/null +++ b/examples/impala/impala_multi_node_ray.py @@ -0,0 +1,278 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +This script reproduces the IMPALA Algorithm +results from Espeholt et al. 2018 for the on Atari Environments. +""" +import hydra + + +@hydra.main(config_path=".", config_name="config_multi_node_ray", version_base="1.1") +def main(cfg: "DictConfig"): # noqa: F821 + + import time + + import torch.optim + import tqdm + + from tensordict import TensorDict + from torchrl.collectors import SyncDataCollector + from torchrl.collectors.distributed import RayCollector + from torchrl.data import LazyMemmapStorage, TensorDictReplayBuffer + from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement + from torchrl.envs import ExplorationType, set_exploration_type + from torchrl.objectives import A2CLoss + from torchrl.objectives.value import VTrace + from torchrl.record.loggers import generate_exp_name, get_logger + from utils import eval_model, make_env, make_ppo_models + + device = torch.device(cfg.local_device) + + # Correct for frame_skip + frame_skip = 4 + total_frames = cfg.collector.total_frames // frame_skip + frames_per_batch = cfg.collector.frames_per_batch // frame_skip + test_interval = cfg.logger.test_interval // frame_skip + + # Extract other config parameters + batch_size = cfg.loss.batch_size # Number of rollouts per batch + num_workers = ( + cfg.collector.num_workers + ) # Number of parallel workers collecting rollouts + lr = cfg.optim.lr + anneal_lr = cfg.optim.anneal_lr + sgd_updates = cfg.loss.sgd_updates + max_grad_norm = cfg.optim.max_grad_norm + num_test_episodes = cfg.logger.num_test_episodes + total_network_updates = ( + total_frames // (frames_per_batch * batch_size) + ) * cfg.loss.sgd_updates + + # Create models (check utils.py) + actor, critic = make_ppo_models(cfg.env.env_name) + actor, critic = actor.to(device), critic.to(device) + + # Create collector + ray_init_config = { + "address": cfg.ray_init_config.address, + "num_cpus": cfg.ray_init_config.num_cpus, + "num_gpus": cfg.ray_init_config.num_gpus, + "resources": cfg.ray_init_config.resources, + "object_store_memory": cfg.ray_init_config.object_store_memory, + "local_mode": cfg.ray_init_config.local_mode, + "ignore_reinit_error": cfg.ray_init_config.ignore_reinit_error, + "include_dashboard": cfg.ray_init_config.include_dashboard, + "dashboard_host": cfg.ray_init_config.dashboard_host, + "dashboard_port": cfg.ray_init_config.dashboard_port, + "job_config": cfg.ray_init_config.job_config, + "configure_logging": cfg.ray_init_config.configure_logging, + "logging_level": cfg.ray_init_config.logging_level, + "logging_format": cfg.ray_init_config.logging_format, + "log_to_driver": cfg.ray_init_config.log_to_driver, + "namespace": cfg.ray_init_config.namespace, + "runtime_env": cfg.ray_init_config.runtime_env, + "storage": cfg.ray_init_config.storage, + } + remote_config = { + "num_cpus": cfg.remote_worker_resources.num_cpus, + "num_gpus": cfg.remote_worker_resources.num_gpus + if torch.cuda.device_count() + else 0, + "memory": cfg.remote_worker_resources.memory, + } + collector = RayCollector( + create_env_fn=[make_env(cfg.env.env_name, device)] * num_workers, + policy=actor, + collector_class=SyncDataCollector, + frames_per_batch=frames_per_batch, + total_frames=total_frames, + max_frames_per_traj=-1, + ray_init_config=ray_init_config, + remote_configs=remote_config, + sync=False, + update_after_each_batch=True, + ) + + # Create data buffer + sampler = SamplerWithoutReplacement() + data_buffer = TensorDictReplayBuffer( + storage=LazyMemmapStorage(frames_per_batch * batch_size), + sampler=sampler, + batch_size=frames_per_batch * batch_size, + ) + + # Create loss and adv modules + adv_module = VTrace( + gamma=cfg.loss.gamma, + value_network=critic, + actor_network=actor, + average_adv=False, + ) + loss_module = A2CLoss( + actor=actor, + critic=critic, + loss_critic_type=cfg.loss.loss_critic_type, + entropy_coef=cfg.loss.entropy_coef, + critic_coef=cfg.loss.critic_coef, + ) + loss_module.set_keys(done="eol", terminated="eol") + + # Create optimizer + optim = torch.optim.RMSprop( + loss_module.parameters(), + lr=cfg.optim.lr, + weight_decay=cfg.optim.weight_decay, + eps=cfg.optim.eps, + alpha=cfg.optim.alpha, + ) + + # Create logger + logger = None + if cfg.logger.backend: + exp_name = generate_exp_name( + "IMPALA", f"{cfg.logger.exp_name}_{cfg.env.env_name}" + ) + logger = get_logger( + cfg.logger.backend, + logger_name="impala", + experiment_name=exp_name, + project="impala", + ) + + # Create test environment + test_env = make_env(cfg.env.env_name, device, is_test=True) + test_env.eval() + + # Main loop + collected_frames = 0 + num_network_updates = 0 + pbar = tqdm.tqdm(total=total_frames) + accumulator = [] + start_time = sampling_start = time.time() + for i, data in enumerate(collector): + + log_info = {} + sampling_time = time.time() - sampling_start + frames_in_batch = data.numel() + collected_frames += frames_in_batch * frame_skip + pbar.update(data.numel()) + + # Get training rewards and episode lengths + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] + if len(episode_rewards) > 0: + episode_length = data["next", "step_count"][data["next", "terminated"]] + log_info.update( + { + "train/reward": episode_rewards.mean().item(), + "train/episode_length": episode_length.sum().item() + / len(episode_length), + } + ) + + if len(accumulator) < batch_size: + accumulator.append(data) + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + continue + + losses = TensorDict({}, batch_size=[sgd_updates]) + training_start = time.time() + for j in range(sgd_updates): + + # Create a single batch of trajectories + stacked_data = torch.stack(accumulator, dim=0).contiguous() + stacked_data = stacked_data.to(device, non_blocking=True) + + # Compute advantage + with torch.no_grad(): + stacked_data = adv_module(stacked_data) + + # Add to replay buffer + for stacked_d in stacked_data: + stacked_data_reshape = stacked_d.reshape(-1) + data_buffer.extend(stacked_data_reshape) + + for batch in data_buffer: + + # Linearly decrease the learning rate and clip epsilon + alpha = 1.0 + if anneal_lr: + alpha = 1 - (num_network_updates / total_network_updates) + for group in optim.param_groups: + group["lr"] = lr * alpha + num_network_updates += 1 + + # Get a data batch + batch = batch.to(device, non_blocking=True) + + # Forward pass loss + loss = loss_module(batch) + losses[j] = loss.select( + "loss_critic", "loss_entropy", "loss_objective" + ).detach() + loss_sum = ( + loss["loss_critic"] + loss["loss_objective"] + loss["loss_entropy"] + ) + + # Backward pass + loss_sum.backward() + torch.nn.utils.clip_grad_norm_( + list(loss_module.parameters()), max_norm=max_grad_norm + ) + + # Update the networks + optim.step() + optim.zero_grad() + + # Get training losses and times + training_time = time.time() - training_start + losses = losses.apply(lambda x: x.float().mean(), batch_size=[]) + for key, value in losses.items(): + log_info.update({f"train/{key}": value.item()}) + log_info.update( + { + "train/lr": alpha * lr, + "train/sampling_time": sampling_time, + "train/training_time": training_time, + } + ) + + # Get test rewards + with torch.no_grad(), set_exploration_type(ExplorationType.MODE): + if ((i - 1) * frames_in_batch * frame_skip) // test_interval < ( + i * frames_in_batch * frame_skip + ) // test_interval: + actor.eval() + eval_start = time.time() + test_reward = eval_model( + actor, test_env, num_episodes=num_test_episodes + ) + eval_time = time.time() - eval_start + log_info.update( + { + "eval/reward": test_reward, + "eval/time": eval_time, + } + ) + actor.train() + + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + + collector.update_policy_weights_() + sampling_start = time.time() + accumulator = [] + + collector.shutdown() + end_time = time.time() + execution_time = end_time - start_time + print(f"Training took {execution_time:.2f} seconds to finish") + + +if __name__ == "__main__": + main() diff --git a/examples/impala/impala_multi_node_submitit.py b/examples/impala/impala_multi_node_submitit.py new file mode 100644 index 00000000000..3355febbfaf --- /dev/null +++ b/examples/impala/impala_multi_node_submitit.py @@ -0,0 +1,270 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +This script reproduces the IMPALA Algorithm +results from Espeholt et al. 2018 for the on Atari Environments. +""" +import hydra + + +@hydra.main( + config_path=".", config_name="config_multi_node_submitit", version_base="1.1" +) +def main(cfg: "DictConfig"): # noqa: F821 + + import time + + import torch.optim + import tqdm + + from tensordict import TensorDict + from torchrl.collectors import SyncDataCollector + from torchrl.collectors.distributed import DistributedDataCollector + from torchrl.data import LazyMemmapStorage, TensorDictReplayBuffer + from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement + from torchrl.envs import ExplorationType, set_exploration_type + from torchrl.objectives import A2CLoss + from torchrl.objectives.value import VTrace + from torchrl.record.loggers import generate_exp_name, get_logger + from utils import eval_model, make_env, make_ppo_models + + device = torch.device(cfg.local_device) + + # Correct for frame_skip + frame_skip = 4 + total_frames = cfg.collector.total_frames // frame_skip + frames_per_batch = cfg.collector.frames_per_batch // frame_skip + test_interval = cfg.logger.test_interval // frame_skip + + # Extract other config parameters + batch_size = cfg.loss.batch_size # Number of rollouts per batch + num_workers = ( + cfg.collector.num_workers + ) # Number of parallel workers collecting rollouts + lr = cfg.optim.lr + anneal_lr = cfg.optim.anneal_lr + sgd_updates = cfg.loss.sgd_updates + max_grad_norm = cfg.optim.max_grad_norm + num_test_episodes = cfg.logger.num_test_episodes + total_network_updates = ( + total_frames // (frames_per_batch * batch_size) + ) * cfg.loss.sgd_updates + + # Create models (check utils.py) + actor, critic = make_ppo_models(cfg.env.env_name) + actor, critic = actor.to(device), critic.to(device) + + slurm_kwargs = { + "timeout_min": cfg.slurm_config.timeout_min, + "slurm_partition": cfg.slurm_config.slurm_partition, + "slurm_cpus_per_task": cfg.slurm_config.slurm_cpus_per_task, + "slurm_gpus_per_node": cfg.slurm_config.slurm_gpus_per_node, + } + # Create collector + device_str = "device" if num_workers <= 1 else "devices" + if cfg.collector.backend == "nccl": + collector_kwargs = {device_str: "cuda:0", f"storing_{device_str}": "cuda:0"} + elif cfg.collector.backend == "gloo": + collector_kwargs = {device_str: "cpu", f"storing_{device_str}": "cpu"} + else: + raise NotImplementedError( + f"device assignment not implemented for backend {cfg.collector.backend}" + ) + collector = DistributedDataCollector( + create_env_fn=[make_env(cfg.env.env_name, device)] * num_workers, + policy=actor, + num_workers_per_collector=1, + frames_per_batch=frames_per_batch, + total_frames=total_frames, + collector_class=SyncDataCollector, + collector_kwargs=collector_kwargs, + slurm_kwargs=slurm_kwargs, + storing_device="cuda:0" if cfg.collector.backend == "nccl" else "cpu", + launcher="submitit", + # update_after_each_batch=True, + backend=cfg.collector.backend, + ) + + # Create data buffer + sampler = SamplerWithoutReplacement() + data_buffer = TensorDictReplayBuffer( + storage=LazyMemmapStorage(frames_per_batch * batch_size), + sampler=sampler, + batch_size=frames_per_batch * batch_size, + ) + + # Create loss and adv modules + adv_module = VTrace( + gamma=cfg.loss.gamma, + value_network=critic, + actor_network=actor, + average_adv=False, + ) + loss_module = A2CLoss( + actor=actor, + critic=critic, + loss_critic_type=cfg.loss.loss_critic_type, + entropy_coef=cfg.loss.entropy_coef, + critic_coef=cfg.loss.critic_coef, + ) + loss_module.set_keys(done="eol", terminated="eol") + + # Create optimizer + optim = torch.optim.RMSprop( + loss_module.parameters(), + lr=cfg.optim.lr, + weight_decay=cfg.optim.weight_decay, + eps=cfg.optim.eps, + alpha=cfg.optim.alpha, + ) + + # Create logger + logger = None + if cfg.logger.backend: + exp_name = generate_exp_name( + "IMPALA", f"{cfg.logger.exp_name}_{cfg.env.env_name}" + ) + logger = get_logger( + cfg.logger.backend, + logger_name="impala", + experiment_name=exp_name, + project="impala", + ) + + # Create test environment + test_env = make_env(cfg.env.env_name, device, is_test=True) + test_env.eval() + + # Main loop + collected_frames = 0 + num_network_updates = 0 + pbar = tqdm.tqdm(total=total_frames) + accumulator = [] + start_time = sampling_start = time.time() + for i, data in enumerate(collector): + + log_info = {} + sampling_time = time.time() - sampling_start + frames_in_batch = data.numel() + collected_frames += frames_in_batch * frame_skip + pbar.update(data.numel()) + + # Get training rewards and episode lengths + episode_rewards = data["next", "episode_reward"][data["next", "done"]] + if len(episode_rewards) > 0: + episode_length = data["next", "step_count"][data["next", "done"]] + log_info.update( + { + "train/reward": episode_rewards.mean().item(), + "train/episode_length": episode_length.sum().item() + / len(episode_length), + } + ) + + if len(accumulator) < batch_size: + accumulator.append(data) + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + continue + + losses = TensorDict({}, batch_size=[sgd_updates]) + training_start = time.time() + for j in range(sgd_updates): + + # Create a single batch of trajectories + stacked_data = torch.stack(accumulator, dim=0).contiguous() + stacked_data = stacked_data.to(device, non_blocking=True) + + # Compute advantage + with torch.no_grad(): + stacked_data = adv_module(stacked_data) + + # Add to replay buffer + for stacked_d in stacked_data: + stacked_data_reshape = stacked_d.reshape(-1) + data_buffer.extend(stacked_data_reshape) + + for batch in data_buffer: + + # Linearly decrease the learning rate and clip epsilon + alpha = 1.0 + if anneal_lr: + alpha = 1 - (num_network_updates / total_network_updates) + for group in optim.param_groups: + group["lr"] = lr * alpha + num_network_updates += 1 + + # Get a data batch + batch = batch.to(device) + + # Forward pass loss + loss = loss_module(batch) + losses[j] = loss.select( + "loss_critic", "loss_entropy", "loss_objective" + ).detach() + loss_sum = ( + loss["loss_critic"] + loss["loss_objective"] + loss["loss_entropy"] + ) + + # Backward pass + loss_sum.backward() + torch.nn.utils.clip_grad_norm_( + list(loss_module.parameters()), max_norm=max_grad_norm + ) + + # Update the networks + optim.step() + optim.zero_grad() + + # Get training losses and times + training_time = time.time() - training_start + losses = losses.apply(lambda x: x.float().mean(), batch_size=[]) + for key, value in losses.items(): + log_info.update({f"train/{key}": value.item()}) + log_info.update( + { + "train/lr": alpha * lr, + "train/sampling_time": sampling_time, + "train/training_time": training_time, + } + ) + + # Get test rewards + with torch.no_grad(), set_exploration_type(ExplorationType.MODE): + if ((i - 1) * frames_in_batch * frame_skip) // test_interval < ( + i * frames_in_batch * frame_skip + ) // test_interval: + actor.eval() + eval_start = time.time() + test_reward = eval_model( + actor, test_env, num_episodes=num_test_episodes + ) + eval_time = time.time() - eval_start + log_info.update( + { + "eval/reward": test_reward, + "eval/time": eval_time, + } + ) + actor.train() + + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + + collector.update_policy_weights_() + sampling_start = time.time() + accumulator = [] + + collector.shutdown() + end_time = time.time() + execution_time = end_time - start_time + print(f"Training took {execution_time:.2f} seconds to finish") + + +if __name__ == "__main__": + main() diff --git a/examples/impala/impala_single_node.py b/examples/impala/impala_single_node.py new file mode 100644 index 00000000000..cd270f4c9e9 --- /dev/null +++ b/examples/impala/impala_single_node.py @@ -0,0 +1,248 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +This script reproduces the IMPALA Algorithm +results from Espeholt et al. 2018 for the on Atari Environments. +""" +import hydra + + +@hydra.main(config_path=".", config_name="config_single_node", version_base="1.1") +def main(cfg: "DictConfig"): # noqa: F821 + + import time + + import torch.optim + import tqdm + + from tensordict import TensorDict + from torchrl.collectors import MultiaSyncDataCollector + from torchrl.data import LazyMemmapStorage, TensorDictReplayBuffer + from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement + from torchrl.envs import ExplorationType, set_exploration_type + from torchrl.objectives import A2CLoss + from torchrl.objectives.value import VTrace + from torchrl.record.loggers import generate_exp_name, get_logger + from utils import eval_model, make_env, make_ppo_models + + device = torch.device(cfg.device) + + # Correct for frame_skip + frame_skip = 4 + total_frames = cfg.collector.total_frames // frame_skip + frames_per_batch = cfg.collector.frames_per_batch // frame_skip + test_interval = cfg.logger.test_interval // frame_skip + + # Extract other config parameters + batch_size = cfg.loss.batch_size # Number of rollouts per batch + num_workers = ( + cfg.collector.num_workers + ) # Number of parallel workers collecting rollouts + lr = cfg.optim.lr + anneal_lr = cfg.optim.anneal_lr + sgd_updates = cfg.loss.sgd_updates + max_grad_norm = cfg.optim.max_grad_norm + num_test_episodes = cfg.logger.num_test_episodes + total_network_updates = ( + total_frames // (frames_per_batch * batch_size) + ) * cfg.loss.sgd_updates + + # Create models (check utils.py) + actor, critic = make_ppo_models(cfg.env.env_name) + actor, critic = actor.to(device), critic.to(device) + + # Create collector + collector = MultiaSyncDataCollector( + create_env_fn=[make_env(cfg.env.env_name, device)] * num_workers, + policy=actor, + frames_per_batch=frames_per_batch, + total_frames=total_frames, + device=device, + storing_device=device, + max_frames_per_traj=-1, + update_at_each_batch=True, + ) + + # Create data buffer + sampler = SamplerWithoutReplacement() + data_buffer = TensorDictReplayBuffer( + storage=LazyMemmapStorage(frames_per_batch * batch_size), + sampler=sampler, + batch_size=frames_per_batch * batch_size, + ) + + # Create loss and adv modules + adv_module = VTrace( + gamma=cfg.loss.gamma, + value_network=critic, + actor_network=actor, + average_adv=False, + ) + loss_module = A2CLoss( + actor=actor, + critic=critic, + loss_critic_type=cfg.loss.loss_critic_type, + entropy_coef=cfg.loss.entropy_coef, + critic_coef=cfg.loss.critic_coef, + ) + loss_module.set_keys(done="eol", terminated="eol") + + # Create optimizer + optim = torch.optim.RMSprop( + loss_module.parameters(), + lr=cfg.optim.lr, + weight_decay=cfg.optim.weight_decay, + eps=cfg.optim.eps, + alpha=cfg.optim.alpha, + ) + + # Create logger + logger = None + if cfg.logger.backend: + exp_name = generate_exp_name( + "IMPALA", f"{cfg.logger.exp_name}_{cfg.env.env_name}" + ) + logger = get_logger( + cfg.logger.backend, + logger_name="impala", + experiment_name=exp_name, + project="impala", + ) + + # Create test environment + test_env = make_env(cfg.env.env_name, device, is_test=True) + test_env.eval() + + # Main loop + collected_frames = 0 + num_network_updates = 0 + pbar = tqdm.tqdm(total=total_frames) + accumulator = [] + start_time = sampling_start = time.time() + for i, data in enumerate(collector): + + log_info = {} + sampling_time = time.time() - sampling_start + frames_in_batch = data.numel() + collected_frames += frames_in_batch * frame_skip + pbar.update(data.numel()) + + # Get training rewards and episode lengths + episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] + if len(episode_rewards) > 0: + episode_length = data["next", "step_count"][data["next", "terminated"]] + log_info.update( + { + "train/reward": episode_rewards.mean().item(), + "train/episode_length": episode_length.sum().item() + / len(episode_length), + } + ) + + if len(accumulator) < batch_size: + accumulator.append(data) + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + continue + + losses = TensorDict({}, batch_size=[sgd_updates]) + training_start = time.time() + for j in range(sgd_updates): + + # Create a single batch of trajectories + stacked_data = torch.stack(accumulator, dim=0).contiguous() + stacked_data = stacked_data.to(device, non_blocking=True) + + # Compute advantage + with torch.no_grad(): + stacked_data = adv_module(stacked_data) + + # Add to replay buffer + for stacked_d in stacked_data: + stacked_data_reshape = stacked_d.reshape(-1) + data_buffer.extend(stacked_data_reshape) + + for batch in data_buffer: + + # Linearly decrease the learning rate and clip epsilon + alpha = 1.0 + if anneal_lr: + alpha = 1 - (num_network_updates / total_network_updates) + for group in optim.param_groups: + group["lr"] = lr * alpha + num_network_updates += 1 + + # Get a data batch + batch = batch.to(device, non_blocking=True) + + # Forward pass loss + loss = loss_module(batch) + losses[j] = loss.select( + "loss_critic", "loss_entropy", "loss_objective" + ).detach() + loss_sum = ( + loss["loss_critic"] + loss["loss_objective"] + loss["loss_entropy"] + ) + + # Backward pass + loss_sum.backward() + torch.nn.utils.clip_grad_norm_( + list(loss_module.parameters()), max_norm=max_grad_norm + ) + + # Update the networks + optim.step() + optim.zero_grad() + + # Get training losses and times + training_time = time.time() - training_start + losses = losses.apply(lambda x: x.float().mean(), batch_size=[]) + for key, value in losses.items(): + log_info.update({f"train/{key}": value.item()}) + log_info.update( + { + "train/lr": alpha * lr, + "train/sampling_time": sampling_time, + "train/training_time": training_time, + } + ) + + # Get test rewards + with torch.no_grad(), set_exploration_type(ExplorationType.MODE): + if ((i - 1) * frames_in_batch * frame_skip) // test_interval < ( + i * frames_in_batch * frame_skip + ) // test_interval: + actor.eval() + eval_start = time.time() + test_reward = eval_model( + actor, test_env, num_episodes=num_test_episodes + ) + eval_time = time.time() - eval_start + log_info.update( + { + "eval/reward": test_reward, + "eval/time": eval_time, + } + ) + actor.train() + + if logger: + for key, value in log_info.items(): + logger.log_scalar(key, value, collected_frames) + + collector.update_policy_weights_() + sampling_start = time.time() + accumulator = [] + + collector.shutdown() + end_time = time.time() + execution_time = end_time - start_time + print(f"Training took {execution_time:.2f} seconds to finish") + + +if __name__ == "__main__": + main() diff --git a/examples/impala/utils.py b/examples/impala/utils.py new file mode 100644 index 00000000000..2983f8a0193 --- /dev/null +++ b/examples/impala/utils.py @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import torch.nn +import torch.optim +from tensordict.nn import TensorDictModule +from torchrl.data import CompositeSpec +from torchrl.envs import ( + CatFrames, + DoubleToFloat, + EndOfLifeTransform, + ExplorationType, + GrayScale, + GymEnv, + NoopResetEnv, + Resize, + RewardClipping, + RewardSum, + StepCounter, + ToTensorImage, + TransformedEnv, + VecNorm, +) +from torchrl.modules import ( + ActorValueOperator, + ConvNet, + MLP, + OneHotCategorical, + ProbabilisticActor, + ValueOperator, +) + + +# ==================================================================== +# Environment utils +# -------------------------------------------------------------------- + + +def make_env(env_name, device, is_test=False): + env = GymEnv( + env_name, frame_skip=4, from_pixels=True, pixels_only=False, device=device + ) + env = TransformedEnv(env) + env.append_transform(NoopResetEnv(noops=30, random=True)) + if not is_test: + env.append_transform(EndOfLifeTransform()) + env.append_transform(RewardClipping(-1, 1)) + env.append_transform(ToTensorImage(from_int=False)) + env.append_transform(GrayScale()) + env.append_transform(Resize(84, 84)) + env.append_transform(CatFrames(N=4, dim=-3)) + env.append_transform(RewardSum()) + env.append_transform(StepCounter(max_steps=4500)) + env.append_transform(DoubleToFloat()) + env.append_transform(VecNorm(in_keys=["pixels"])) + return env + + +# ==================================================================== +# Model utils +# -------------------------------------------------------------------- + + +def make_ppo_modules_pixels(proof_environment): + + # Define input shape + input_shape = proof_environment.observation_spec["pixels"].shape + + # Define distribution class and kwargs + num_outputs = proof_environment.action_spec.space.n + distribution_class = OneHotCategorical + distribution_kwargs = {} + + # Define input keys + in_keys = ["pixels"] + + # Define a shared Module and TensorDictModule (CNN + MLP) + common_cnn = ConvNet( + activation_class=torch.nn.ReLU, + num_cells=[32, 64, 64], + kernel_sizes=[8, 4, 3], + strides=[4, 2, 1], + ) + common_cnn_output = common_cnn(torch.ones(input_shape)) + common_mlp = MLP( + in_features=common_cnn_output.shape[-1], + activation_class=torch.nn.ReLU, + activate_last_layer=True, + out_features=512, + num_cells=[], + ) + common_mlp_output = common_mlp(common_cnn_output) + + # Define shared net as TensorDictModule + common_module = TensorDictModule( + module=torch.nn.Sequential(common_cnn, common_mlp), + in_keys=in_keys, + out_keys=["common_features"], + ) + + # Define on head for the policy + policy_net = MLP( + in_features=common_mlp_output.shape[-1], + out_features=num_outputs, + activation_class=torch.nn.ReLU, + num_cells=[], + ) + policy_module = TensorDictModule( + module=policy_net, + in_keys=["common_features"], + out_keys=["logits"], + ) + + # Add probabilistic sampling of the actions + policy_module = ProbabilisticActor( + policy_module, + in_keys=["logits"], + spec=CompositeSpec(action=proof_environment.action_spec), + distribution_class=distribution_class, + distribution_kwargs=distribution_kwargs, + return_log_prob=True, + default_interaction_type=ExplorationType.RANDOM, + ) + + # Define another head for the value + value_net = MLP( + activation_class=torch.nn.ReLU, + in_features=common_mlp_output.shape[-1], + out_features=1, + num_cells=[], + ) + value_module = ValueOperator( + value_net, + in_keys=["common_features"], + ) + + return common_module, policy_module, value_module + + +def make_ppo_models(env_name): + + proof_environment = make_env(env_name, device="cpu") + common_module, policy_module, value_module = make_ppo_modules_pixels( + proof_environment + ) + + # Wrap modules in a single ActorCritic operator + actor_critic = ActorValueOperator( + common_operator=common_module, + policy_operator=policy_module, + value_operator=value_module, + ) + + actor = actor_critic.get_policy_operator() + critic = actor_critic.get_value_operator() + + del proof_environment + + return actor, critic + + +# ==================================================================== +# Evaluation utils +# -------------------------------------------------------------------- + + +def eval_model(actor, test_env, num_episodes=3): + test_rewards = torch.zeros(num_episodes, dtype=torch.float32) + for i in range(num_episodes): + td_test = test_env.rollout( + policy=actor, + auto_reset=True, + auto_cast_to_device=True, + break_when_any_done=True, + max_steps=10_000_000, + ) + reward = td_test["next", "episode_reward"][td_test["next", "done"]] + test_rewards[i] = reward.sum() + del td_test + return test_rewards.mean() diff --git a/test/test_cost.py b/test/test_cost.py index eddf1dfc3bf..35297c3a1e6 100644 --- a/test/test_cost.py +++ b/test/test_cost.py @@ -130,6 +130,7 @@ GAE, TD1Estimator, TDLambdaEstimator, + VTrace, ) from torchrl.objectives.value.functional import ( _transpose_time, @@ -140,6 +141,7 @@ vec_generalized_advantage_estimate, vec_td1_advantage_estimate, vec_td_lambda_advantage_estimate, + vtrace_advantage_estimate, ) from torchrl.objectives.value.utils import ( _custom_conv1d, @@ -437,7 +439,7 @@ def test_dqn(self, delay_value, device, action_spec_type, td_est): action_spec_type=action_spec_type, device=device ) loss_fn = DQNLoss(actor, loss_function="l2", delay_value=delay_value) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -915,7 +917,7 @@ def test_qmixer(self, delay_value, device, action_spec_type, td_est): action_spec_type=action_spec_type, device=device ) loss_fn = QMixerLoss(actor, mixer, loss_function="l2", delay_value=delay_value) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -1400,7 +1402,7 @@ def test_ddpg(self, delay_actor, delay_value, device, td_est): delay_actor=delay_actor, delay_value=delay_value, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -2009,7 +2011,7 @@ def test_td3( delay_actor=delay_actor, delay_qvalue=delay_qvalue, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -2696,7 +2698,7 @@ def test_sac( **kwargs, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -3481,7 +3483,7 @@ def test_discrete_sac( loss_function="l2", **kwargs, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -4091,7 +4093,7 @@ def test_redq(self, delay_qvalue, num_qvalue, device, td_est): loss_function="l2", delay_qvalue=delay_qvalue, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -4458,7 +4460,7 @@ def test_redq_batched(self, delay_qvalue, num_qvalue, device, td_est): loss_function="l2", delay_qvalue=delay_qvalue, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -4475,7 +4477,7 @@ def test_redq_batched(self, delay_qvalue, num_qvalue, device, td_est): loss_function="l2", delay_qvalue=delay_qvalue, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn_deprec.make_value_estimator(td_est) return @@ -4895,7 +4897,7 @@ def test_cql( **kwargs, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -5305,7 +5307,7 @@ def test_dcql(self, delay_value, device, action_spec_type, td_est): action_spec_type=action_spec_type, device=device ) loss_fn = DiscreteCQLLoss(actor, loss_function="l2", delay_value=delay_value) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -5536,6 +5538,7 @@ def _create_mock_actor( action_dim=4, device="cpu", observation_key="observation", + sample_log_prob_key="sample_log_prob", ): # Actor action_spec = BoundedTensorSpec( @@ -5550,6 +5553,8 @@ def _create_mock_actor( distribution_class=TanhNormal, in_keys=["loc", "scale"], spec=action_spec, + return_log_prob=True, + log_prob_key=sample_log_prob_key, ) return actor.to(device) @@ -5587,6 +5592,7 @@ def _create_mock_actor_value(self, batch=2, obs_dim=3, action_dim=4, device="cpu distribution_class=TanhNormal, in_keys=["loc", "scale"], spec=action_spec, + return_log_prob=True, ) module = nn.Sequential(base_layer, nn.Linear(5, 1)) value = ValueOperator( @@ -5613,6 +5619,7 @@ def _create_mock_actor_value_shared( distribution_class=TanhNormal, in_keys=["loc", "scale"], spec=action_spec, + return_log_prob=True, ) module = nn.Linear(5, 1) value_head = ValueOperator( @@ -5720,7 +5727,7 @@ def _create_seq_mock_data_ppo( @pytest.mark.parametrize("loss_class", (PPOLoss, ClipPPOLoss, KLPENPPOLoss)) @pytest.mark.parametrize("gradient_mode", (True, False)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) @pytest.mark.parametrize("td_est", list(ValueEstimators) + [None]) def test_ppo(self, loss_class, device, gradient_mode, advantage, td_est): @@ -5733,6 +5740,13 @@ def test_ppo(self, loss_class, device, gradient_mode, advantage, td_est): advantage = GAE( gamma=0.9, lmbda=0.9, value_network=value, differentiable=gradient_mode ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, value_network=value, differentiable=gradient_mode @@ -5799,7 +5813,7 @@ def test_ppo_state_dict(self, loss_class, device, gradient_mode): loss_fn2.load_state_dict(sd) @pytest.mark.parametrize("loss_class", (PPOLoss, ClipPPOLoss, KLPENPPOLoss)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) def test_ppo_shared(self, loss_class, device, advantage): torch.manual_seed(self.seed) @@ -5812,6 +5826,12 @@ def test_ppo_shared(self, loss_class, device, advantage): lmbda=0.9, value_network=value, ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, @@ -5873,6 +5893,7 @@ def test_ppo_shared(self, loss_class, device, advantage): "advantage", ( "gae", + "vtrace", "td", "td_lambda", ), @@ -5892,6 +5913,12 @@ def test_ppo_shared_seq(self, loss_class, device, advantage, separate_losses): lmbda=0.9, value_network=value, ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, @@ -5943,7 +5970,7 @@ def test_ppo_shared_seq(self, loss_class, device, advantage, separate_losses): ) @pytest.mark.parametrize("loss_class", (PPOLoss, ClipPPOLoss, KLPENPPOLoss)) @pytest.mark.parametrize("gradient_mode", (True, False)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): if pack_version.parse(torch.__version__) > pack_version.parse("1.14"): @@ -5957,6 +5984,13 @@ def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): advantage = GAE( gamma=0.9, lmbda=0.9, value_network=value, differentiable=gradient_mode ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, value_network=value, differentiable=gradient_mode @@ -6019,6 +6053,7 @@ def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): ValueEstimators.TD1, ValueEstimators.TD0, ValueEstimators.GAE, + ValueEstimators.VTrace, ValueEstimators.TDLambda, ], ) @@ -6060,7 +6095,7 @@ def test_ppo_tensordict_keys(self, loss_class, td_est): self.set_advantage_keys_through_loss_test(loss_fn, td_est, key_mapping) @pytest.mark.parametrize("loss_class", (PPOLoss, ClipPPOLoss, KLPENPPOLoss)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("td_est", list(ValueEstimators) + [None]) def test_ppo_tensordict_keys_run(self, loss_class, advantage, td_est): """Test PPO loss module with non-default tensordict keys.""" @@ -6078,7 +6113,9 @@ def test_ppo_tensordict_keys_run(self, loss_class, advantage, td_est): sample_log_prob_key=tensor_keys["sample_log_prob"], action_key=tensor_keys["action"], ) - actor = self._create_mock_actor() + actor = self._create_mock_actor( + sample_log_prob_key=tensor_keys["sample_log_prob"] + ) value = self._create_mock_value(out_keys=[tensor_keys["value"]]) if advantage == "gae": @@ -6088,6 +6125,13 @@ def test_ppo_tensordict_keys_run(self, loss_class, advantage, td_est): value_network=value, differentiable=gradient_mode, ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, @@ -6181,7 +6225,9 @@ def test_ppo_notensordict( terminated_key=terminated_key, ) - actor = self._create_mock_actor(observation_key=observation_key) + actor = self._create_mock_actor( + observation_key=observation_key, sample_log_prob_key=sample_log_prob_key + ) value = self._create_mock_value(observation_key=observation_key) loss = loss_class(actor=actor, critic=value) @@ -6240,6 +6286,7 @@ def _create_mock_actor( action_dim=4, device="cpu", observation_key="observation", + sample_log_prob_key="sample_log_prob", ): # Actor action_spec = BoundedTensorSpec( @@ -6254,6 +6301,8 @@ def _create_mock_actor( in_keys=["loc", "scale"], spec=action_spec, distribution_class=TanhNormal, + return_log_prob=True, + log_prob_key=sample_log_prob_key, ) return actor.to(device) @@ -6344,6 +6393,7 @@ def _create_seq_mock_data_a2c( reward_key="reward", done_key="done", terminated_key="terminated", + sample_log_prob_key="sample_log_prob", ): # create a tensordict total_obs = torch.randn(batch, T + 1, obs_dim, device=device) @@ -6373,7 +6423,7 @@ def _create_seq_mock_data_a2c( }, "collector": {"mask": mask}, action_key: action.masked_fill_(~mask.unsqueeze(-1), 0.0), - "sample_log_prob": torch.randn_like(action[..., 1]).masked_fill_( + sample_log_prob_key: torch.randn_like(action[..., 1]).masked_fill_( ~mask, 0.0 ) / 10, @@ -6386,7 +6436,7 @@ def _create_seq_mock_data_a2c( return td @pytest.mark.parametrize("gradient_mode", (True, False)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) @pytest.mark.parametrize("td_est", list(ValueEstimators) + [None]) def test_a2c(self, device, gradient_mode, advantage, td_est): @@ -6399,6 +6449,13 @@ def test_a2c(self, device, gradient_mode, advantage, td_est): advantage = GAE( gamma=0.9, lmbda=0.9, value_network=value, differentiable=gradient_mode ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) elif advantage == "td": advantage = TD1Estimator( gamma=0.9, value_network=value, differentiable=gradient_mode @@ -6523,7 +6580,7 @@ def test_a2c_separate_losses(self, separate_losses): not _has_functorch, reason=f"functorch not found, {FUNCTORCH_ERR}" ) @pytest.mark.parametrize("gradient_mode", (True, False)) - @pytest.mark.parametrize("advantage", ("gae", "td", "td_lambda", None)) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) def test_a2c_diff(self, device, gradient_mode, advantage): if pack_version.parse(torch.__version__) > pack_version.parse("1.14"): @@ -6541,6 +6598,13 @@ def test_a2c_diff(self, device, gradient_mode, advantage): advantage = TD1Estimator( gamma=0.9, value_network=value, differentiable=gradient_mode ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) elif advantage == "td_lambda": advantage = TDLambdaEstimator( gamma=0.9, lmbda=0.9, value_network=value, differentiable=gradient_mode @@ -6590,6 +6654,7 @@ def test_a2c_diff(self, device, gradient_mode, advantage): ValueEstimators.TD1, ValueEstimators.TD0, ValueEstimators.GAE, + ValueEstimators.VTrace, ValueEstimators.TDLambda, ], ) @@ -6607,6 +6672,7 @@ def test_a2c_tensordict_keys(self, td_est): "reward": "reward", "done": "done", "terminated": "terminated", + "sample_log_prob": "sample_log_prob", } self.tensordict_keys_test( @@ -6629,8 +6695,16 @@ def test_a2c_tensordict_keys(self, td_est): } self.set_advantage_keys_through_loss_test(loss_fn, td_est, key_mapping) + @pytest.mark.parametrize( + "td_est", + [ + ValueEstimators.GAE, + ValueEstimators.VTrace, + ], + ) + @pytest.mark.parametrize("advantage", ("gae", "vtrace", None)) @pytest.mark.parametrize("device", get_default_devices()) - def test_a2c_tensordict_keys_run(self, device): + def test_a2c_tensordict_keys_run(self, device, advantage, td_est): """Test A2C loss module with non-default tensordict keys.""" torch.manual_seed(self.seed) gradient_mode = True @@ -6639,6 +6713,7 @@ def test_a2c_tensordict_keys_run(self, device): value_key = "state_value_test" action_key = "action_test" reward_key = "reward_test" + sample_log_prob_key = "sample_log_prob_test" done_key = ("done", "test") terminated_key = ("terminated", "test") @@ -6648,24 +6723,29 @@ def test_a2c_tensordict_keys_run(self, device): reward_key=reward_key, done_key=done_key, terminated_key=terminated_key, + sample_log_prob_key=sample_log_prob_key, ) - actor = self._create_mock_actor(device=device) - value = self._create_mock_value(device=device, out_keys=[value_key]) - advantage = GAE( - gamma=0.9, - lmbda=0.9, - value_network=value, - differentiable=gradient_mode, - ) - advantage.set_keys( - advantage=advantage_key, - value_target=value_target_key, - value=value_key, - reward=reward_key, - done=done_key, - terminated=terminated_key, + actor = self._create_mock_actor( + device=device, sample_log_prob_key=sample_log_prob_key ) + value = self._create_mock_value(device=device, out_keys=[value_key]) + if advantage == "gae": + advantage = GAE( + gamma=0.9, lmbda=0.9, value_network=value, differentiable=gradient_mode + ) + elif advantage == "vtrace": + advantage = VTrace( + gamma=0.9, + value_network=value, + actor_network=actor, + differentiable=gradient_mode, + ) + elif advantage is None: + pass + else: + raise NotImplementedError + loss_fn = A2CLoss(actor, value, loss_critic_type="l2") loss_fn.set_keys( advantage=advantage_key, @@ -6675,9 +6755,23 @@ def test_a2c_tensordict_keys_run(self, device): reward=reward_key, done=done_key, terminated=done_key, + sample_log_prob=sample_log_prob_key, ) - advantage(td) + if advantage is not None: + advantage.set_keys( + advantage=advantage_key, + value_target=value_target_key, + value=value_key, + reward=reward_key, + done=done_key, + terminated=terminated_key, + sample_log_prob=sample_log_prob_key, + ) + advantage(td) + else: + if td_est is not None: + loss_fn.make_value_estimator(td_est) loss = loss_fn(td) loss_critic = loss["loss_critic"] @@ -6775,7 +6869,16 @@ class TestReinforce(LossModuleTestBase): @pytest.mark.parametrize("delay_value", [True, False]) @pytest.mark.parametrize("gradient_mode", [True, False]) @pytest.mark.parametrize("advantage", ["gae", "td", "td_lambda", None]) - @pytest.mark.parametrize("td_est", list(ValueEstimators) + [None]) + @pytest.mark.parametrize( + "td_est", + [ + ValueEstimators.TD1, + ValueEstimators.TD0, + ValueEstimators.GAE, + ValueEstimators.TDLambda, + None, + ], + ) def test_reinforce_value_net(self, advantage, gradient_mode, delay_value, td_est): n_obs = 3 n_act = 5 @@ -7493,7 +7596,7 @@ def test_dreamer_actor(self, device, imagination_horizon, discount_loss, td_est) imagination_horizon=imagination_horizon, discount_loss=discount_loss, ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_module.make_value_estimator(td_est) return @@ -8235,7 +8338,7 @@ def test_iql( expectile=expectile, loss_function="l2", ) - if td_est is ValueEstimators.GAE: + if td_est in (ValueEstimators.GAE, ValueEstimators.VTrace): with pytest.raises(NotImplementedError): loss_fn.make_value_estimator(td_est) return @@ -9596,6 +9699,113 @@ def test_gae_multidim( torch.testing.assert_close(r1, r3, rtol=1e-4, atol=1e-4) torch.testing.assert_close(r1, r2, rtol=1e-4, atol=1e-4) + @pytest.mark.parametrize("device", get_default_devices()) + @pytest.mark.parametrize("gamma", [0.99, 0.5, 0.1]) + @pytest.mark.parametrize("N", [(1,), (3,), (7, 3)]) + @pytest.mark.parametrize("T", [200, 5, 3]) + @pytest.mark.parametrize("dtype", [torch.float, torch.double]) + @pytest.mark.parametrize("has_done", [False, True]) + def test_vtrace(self, device, gamma, N, T, dtype, has_done): + torch.manual_seed(0) + + done = torch.zeros(*N, T, 1, device=device, dtype=torch.bool) + terminated = done.clone() + if has_done: + terminated = terminated.bernoulli_(0.1) + done = done.bernoulli_(0.1) | terminated + reward = torch.randn(*N, T, 1, device=device, dtype=dtype) + state_value = torch.randn(*N, T, 1, device=device, dtype=dtype) + next_state_value = torch.randn(*N, T, 1, device=device, dtype=dtype) + log_pi = torch.log(torch.rand(*N, T, 1, device=device, dtype=dtype)) + log_mu = torch.log(torch.rand(*N, T, 1, device=device, dtype=dtype)) + + _, value_target = vtrace_advantage_estimate( + gamma, + log_pi, + log_mu, + state_value, + next_state_value, + reward, + done=done, + terminated=terminated, + ) + + assert not torch.isnan(value_target).any() + assert not torch.isinf(value_target).any() + + @pytest.mark.parametrize("device", get_default_devices()) + @pytest.mark.parametrize("gamma", [0.99, 0.5, 0.1]) + @pytest.mark.parametrize("N", [(3,), (7, 3)]) + @pytest.mark.parametrize("T", [100, 3]) + @pytest.mark.parametrize("dtype", [torch.float, torch.double]) + @pytest.mark.parametrize("feature_dim", [[5], [2, 5]]) + @pytest.mark.parametrize("has_done", [True, False]) + def test_vtrace_multidim(self, device, gamma, N, T, dtype, has_done, feature_dim): + D = feature_dim + time_dim = -1 - len(D) + + torch.manual_seed(0) + + done = torch.zeros(*N, T, *D, device=device, dtype=torch.bool) + terminated = done.clone() + if has_done: + terminated = terminated.bernoulli_(0.1) + done = done.bernoulli_(0.1) | terminated + reward = torch.randn(*N, T, *D, device=device, dtype=dtype) + state_value = torch.randn(*N, T, *D, device=device, dtype=dtype) + next_state_value = torch.randn(*N, T, *D, device=device, dtype=dtype) + log_pi = torch.log(torch.rand(*N, T, *D, device=device, dtype=dtype)) + log_mu = torch.log(torch.rand(*N, T, *D, device=device, dtype=dtype)) + + r1 = vtrace_advantage_estimate( + gamma, + log_pi, + log_mu, + state_value, + next_state_value, + reward, + done=done, + terminated=terminated, + time_dim=time_dim, + ) + if len(D) == 2: + r2 = [ + vtrace_advantage_estimate( + gamma, + log_pi[..., i : i + 1, j], + log_mu[..., i : i + 1, j], + state_value[..., i : i + 1, j], + next_state_value[..., i : i + 1, j], + reward[..., i : i + 1, j], + terminated=terminated[..., i : i + 1, j], + done=done[..., i : i + 1, j], + time_dim=-2, + ) + for i in range(D[0]) + for j in range(D[1]) + ] + else: + r2 = [ + vtrace_advantage_estimate( + gamma, + log_pi[..., i : i + 1], + log_mu[..., i : i + 1], + state_value[..., i : i + 1], + next_state_value[..., i : i + 1], + reward[..., i : i + 1], + done=done[..., i : i + 1], + terminated=terminated[..., i : i + 1], + time_dim=-2, + ) + for i in range(D[0]) + ] + + list2 = list(zip(*r2)) + r2 = [torch.cat(list2[0], -1), torch.cat(list2[1], -1)] + if len(D) == 2: + r2 = [r2[0].unflatten(-1, D), r2[1].unflatten(-1, D)] + torch.testing.assert_close(r1, r2, rtol=1e-4, atol=1e-4) + @pytest.mark.parametrize("device", get_default_devices()) @pytest.mark.parametrize("gamma", [0.5, 0.99, 0.1]) @pytest.mark.parametrize("lmbda", [0.1, 0.5, 0.99]) @@ -10530,6 +10740,7 @@ class TestAdv: [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) def test_dispatch( @@ -10540,18 +10751,46 @@ def test_dispatch( value_net = TensorDictModule( nn.Linear(3, 1), in_keys=["obs"], out_keys=["state_value"] ) - module = adv( - gamma=0.98, - value_network=value_net, - differentiable=False, - **kwargs, - ) - kwargs = { - "obs": torch.randn(1, 10, 3), - "next_reward": torch.randn(1, 10, 1, requires_grad=True), - "next_done": torch.zeros(1, 10, 1, dtype=torch.bool), - "next_obs": torch.randn(1, 10, 3), - } + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + differentiable=False, + **kwargs, + ) + kwargs = { + "obs": torch.randn(1, 10, 3), + "sample_log_prob": torch.log(torch.rand(1, 10, 1)), + "next_reward": torch.randn(1, 10, 1, requires_grad=True), + "next_done": torch.zeros(1, 10, 1, dtype=torch.bool), + "next_terminated": torch.zeros(1, 10, 1, dtype=torch.bool), + "next_obs": torch.randn(1, 10, 3), + } + else: + module = adv( + gamma=0.98, + value_network=value_net, + differentiable=False, + **kwargs, + ) + kwargs = { + "obs": torch.randn(1, 10, 3), + "next_reward": torch.randn(1, 10, 1, requires_grad=True), + "next_done": torch.zeros(1, 10, 1, dtype=torch.bool), + "next_terminated": torch.zeros(1, 10, 1, dtype=torch.bool), + "next_obs": torch.randn(1, 10, 3), + } advantage, value_target = module(**kwargs) assert advantage.shape == torch.Size([1, 10, 1]) assert value_target.shape == torch.Size([1, 10, 1]) @@ -10562,6 +10801,7 @@ def test_dispatch( [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) def test_diff_reward( @@ -10572,23 +10812,55 @@ def test_diff_reward( value_net = TensorDictModule( nn.Linear(3, 1), in_keys=["obs"], out_keys=["state_value"] ) - module = adv( - gamma=0.98, - value_network=value_net, - differentiable=True, - **kwargs, - ) - td = TensorDict( - { - "obs": torch.randn(1, 10, 3), - "next": { + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + differentiable=True, + **kwargs, + ) + td = TensorDict( + { "obs": torch.randn(1, 10, 3), - "reward": torch.randn(1, 10, 1, requires_grad=True), - "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "sample_log_prob": torch.log(torch.rand(1, 10, 1)), + "next": { + "obs": torch.randn(1, 10, 3), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "terminated": torch.zeros(1, 10, 1, dtype=torch.bool), + }, }, - }, - [1, 10], - ) + [1, 10], + ) + else: + module = adv( + gamma=0.98, + value_network=value_net, + differentiable=True, + **kwargs, + ) + td = TensorDict( + { + "obs": torch.randn(1, 10, 3), + "next": { + "obs": torch.randn(1, 10, 3), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + }, + }, + [1, 10], + ) td = module(td.clone(False)) # check that the advantage can't backprop to the value params td["advantage"].sum().backward() @@ -10603,6 +10875,7 @@ def test_diff_reward( [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) @pytest.mark.parametrize("shifted", [True, False]) @@ -10610,25 +10883,60 @@ def test_non_differentiable(self, adv, shifted, kwargs): value_net = TensorDictModule( nn.Linear(3, 1), in_keys=["obs"], out_keys=["state_value"] ) - module = adv( - gamma=0.98, - value_network=value_net, - differentiable=False, - shifted=shifted, - **kwargs, - ) - td = TensorDict( - { - "obs": torch.randn(1, 10, 3), - "next": { + + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + differentiable=False, + shifted=shifted, + **kwargs, + ) + td = TensorDict( + { "obs": torch.randn(1, 10, 3), - "reward": torch.randn(1, 10, 1, requires_grad=True), - "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "sample_log_prob": torch.log(torch.rand(1, 10, 1)), + "next": { + "obs": torch.randn(1, 10, 3), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "terminated": torch.zeros(1, 10, 1, dtype=torch.bool), + }, }, - }, - [1, 10], - names=[None, "time"], - ) + [1, 10], + names=[None, "time"], + ) + else: + module = adv( + gamma=0.98, + value_network=value_net, + differentiable=False, + shifted=shifted, + **kwargs, + ) + td = TensorDict( + { + "obs": torch.randn(1, 10, 3), + "next": { + "obs": torch.randn(1, 10, 3), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + }, + }, + [1, 10], + names=[None, "time"], + ) td = module(td.clone(False)) assert td["advantage"].is_leaf @@ -10638,6 +10946,7 @@ def test_non_differentiable(self, adv, shifted, kwargs): [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) @pytest.mark.parametrize("has_value_net", [True, False]) @@ -10660,28 +10969,65 @@ def test_skip_existing( else: value_net = None - module = adv( - gamma=0.98, - value_network=value_net, - differentiable=True, - shifted=shifted, - skip_existing=skip_existing, - **kwargs, - ) - td = TensorDict( - { - "obs": torch.randn(1, 10, 3), - "state_value": torch.ones(1, 10, 1), - "next": { + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + differentiable=True, + shifted=shifted, + skip_existing=skip_existing, + **kwargs, + ) + td = TensorDict( + { "obs": torch.randn(1, 10, 3), + "sample_log_prob": torch.log(torch.rand(1, 10, 1)), "state_value": torch.ones(1, 10, 1), - "reward": torch.randn(1, 10, 1, requires_grad=True), - "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "next": { + "obs": torch.randn(1, 10, 3), + "state_value": torch.ones(1, 10, 1), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + "terminated": torch.zeros(1, 10, 1, dtype=torch.bool), + }, }, - }, - [1, 10], - names=[None, "time"], - ) + [1, 10], + names=[None, "time"], + ) + else: + module = adv( + gamma=0.98, + value_network=value_net, + differentiable=True, + shifted=shifted, + skip_existing=skip_existing, + **kwargs, + ) + td = TensorDict( + { + "obs": torch.randn(1, 10, 3), + "state_value": torch.ones(1, 10, 1), + "next": { + "obs": torch.randn(1, 10, 3), + "state_value": torch.ones(1, 10, 1), + "reward": torch.randn(1, 10, 1, requires_grad=True), + "done": torch.zeros(1, 10, 1, dtype=torch.bool), + }, + }, + [1, 10], + names=[None, "time"], + ) td = module(td.clone(False)) if has_value_net and not skip_existing: exp_val = 0 @@ -10699,15 +11045,34 @@ def test_skip_existing( [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) def test_set_keys(self, value, adv, kwargs): value_net = TensorDictModule(nn.Linear(3, 1), in_keys=["obs"], out_keys=[value]) - module = adv( - gamma=0.98, - value_network=value_net, - **kwargs, - ) + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + **kwargs, + ) + else: + module = adv( + gamma=0.98, + value_network=value_net, + **kwargs, + ) module.set_keys(value=value) assert module.tensor_keys.value == value @@ -10721,6 +11086,7 @@ def test_set_keys(self, value, adv, kwargs): [GAE, {"lmbda": 0.95}], [TD1Estimator, {}], [TDLambdaEstimator, {"lmbda": 0.95}], + [VTrace, {}], ], ) def test_set_deprecated_keys(self, adv, kwargs): @@ -10729,14 +11095,36 @@ def test_set_deprecated_keys(self, adv, kwargs): ) with pytest.warns(DeprecationWarning): - module = adv( - gamma=0.98, - value_network=value_net, - value_key="test_value", - advantage_key="advantage_test", - value_target_key="value_target_test", - **kwargs, - ) + + if adv is VTrace: + actor_net = TensorDictModule( + nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"] + ) + actor_net = ProbabilisticActor( + module=actor_net, + in_keys=["logits"], + out_keys=["action"], + distribution_class=OneHotCategorical, + return_log_prob=True, + ) + module = adv( + gamma=0.98, + actor_network=actor_net, + value_network=value_net, + value_key="test_value", + advantage_key="advantage_test", + value_target_key="value_target_test", + **kwargs, + ) + else: + module = adv( + gamma=0.98, + value_network=value_net, + value_key="test_value", + advantage_key="advantage_test", + value_target_key="value_target_test", + **kwargs, + ) assert module.tensor_keys.value == "test_value" assert module.tensor_keys.advantage == "advantage_test" assert module.tensor_keys.value_target == "value_target_test" diff --git a/torchrl/objectives/a2c.py b/torchrl/objectives/a2c.py index bb7b9014f0d..92955d4cab3 100644 --- a/torchrl/objectives/a2c.py +++ b/torchrl/objectives/a2c.py @@ -3,11 +3,17 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings +from copy import deepcopy from dataclasses import dataclass from typing import Tuple import torch -from tensordict.nn import dispatch, ProbabilisticTensorDictSequential, TensorDictModule +from tensordict.nn import ( + dispatch, + ProbabilisticTensorDictSequential, + repopulate_module, + TensorDictModule, +) from tensordict.tensordict import TensorDict, TensorDictBase from tensordict.utils import NestedKey from torch import distributions as d @@ -20,7 +26,13 @@ distance_loss, ValueEstimators, ) -from torchrl.objectives.value import GAE, TD0Estimator, TD1Estimator, TDLambdaEstimator +from torchrl.objectives.value import ( + GAE, + TD0Estimator, + TD1Estimator, + TDLambdaEstimator, + VTrace, +) class A2CLoss(LossModule): @@ -202,6 +214,7 @@ class _AcceptedKeys: reward: NestedKey = "reward" done: NestedKey = "done" terminated: NestedKey = "terminated" + sample_log_prob: NestedKey = "sample_log_prob" default_keys = _AcceptedKeys() default_value_estimator: ValueEstimators = ValueEstimators.GAE @@ -389,6 +402,14 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams self._value_estimator = GAE(value_network=self.critic, **hp) elif value_type == ValueEstimators.TDLambda: self._value_estimator = TDLambdaEstimator(value_network=self.critic, **hp) + elif value_type == ValueEstimators.VTrace: + # VTrace currently does not support functional call on the actor + actor_with_params = repopulate_module( + deepcopy(self.actor), self.actor_params + ) + self._value_estimator = VTrace( + value_network=self.critic, actor_network=actor_with_params, **hp + ) else: raise NotImplementedError(f"Unknown value type {value_type}") @@ -399,5 +420,6 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams "reward": self.tensor_keys.reward, "done": self.tensor_keys.done, "terminated": self.tensor_keys.terminated, + "sample_log_prob": self.tensor_keys.sample_log_prob, } self._value_estimator.set_keys(**tensor_keys) diff --git a/torchrl/objectives/common.py b/torchrl/objectives/common.py index bdccbda3808..37c5e820d23 100644 --- a/torchrl/objectives/common.py +++ b/torchrl/objectives/common.py @@ -138,7 +138,7 @@ def set_keys(self, **kwargs) -> None: """ for key, value in kwargs.items(): if key not in self._AcceptedKeys.__dict__: - raise ValueError(f"{key} it not an accepted tensordict key") + raise ValueError(f"{key} is not an accepted tensordict key") if value is not None: setattr(self.tensor_keys, key, value) else: @@ -447,6 +447,10 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams raise NotImplementedError( f"Value type {value_type} it not implemented for loss {type(self)}." ) + elif value_type == ValueEstimators.VTrace: + raise NotImplementedError( + f"Value type {value_type} it not implemented for loss {type(self)}." + ) elif value_type == ValueEstimators.TDLambda: raise NotImplementedError( f"Value type {value_type} it not implemented for loss {type(self)}." diff --git a/torchrl/objectives/ppo.py b/torchrl/objectives/ppo.py index e576ca33c1c..2a2cc2fdb6e 100644 --- a/torchrl/objectives/ppo.py +++ b/torchrl/objectives/ppo.py @@ -4,11 +4,17 @@ # LICENSE file in the root directory of this source tree. import math import warnings +from copy import deepcopy from dataclasses import dataclass from typing import Tuple import torch -from tensordict.nn import dispatch, ProbabilisticTensorDictSequential, TensorDictModule +from tensordict.nn import ( + dispatch, + ProbabilisticTensorDictSequential, + repopulate_module, + TensorDictModule, +) from tensordict.tensordict import TensorDict, TensorDictBase from tensordict.utils import NestedKey from torch import distributions as d @@ -22,7 +28,7 @@ ) from .common import LossModule -from .value import GAE, TD0Estimator, TD1Estimator, TDLambdaEstimator +from .value import GAE, TD0Estimator, TD1Estimator, TDLambdaEstimator, VTrace class PPOLoss(LossModule): @@ -469,6 +475,14 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams self._value_estimator = GAE(value_network=self.critic, **hp) elif value_type == ValueEstimators.TDLambda: self._value_estimator = TDLambdaEstimator(value_network=self.critic, **hp) + elif value_type == ValueEstimators.VTrace: + # VTrace currently does not support functional call on the actor + actor_with_params = repopulate_module( + deepcopy(self.actor), self.actor_params + ) + self._value_estimator = VTrace( + value_network=self.critic, actor_network=actor_with_params, **hp + ) else: raise NotImplementedError(f"Unknown value type {value_type}") @@ -479,6 +493,7 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams "reward": self.tensor_keys.reward, "done": self.tensor_keys.done, "terminated": self.tensor_keys.terminated, + "sample_log_prob": self.tensor_keys.sample_log_prob, } self._value_estimator.set_keys(**tensor_keys) diff --git a/torchrl/objectives/reinforce.py b/torchrl/objectives/reinforce.py index 93910f1eebf..1ae9c1e8252 100644 --- a/torchrl/objectives/reinforce.py +++ b/torchrl/objectives/reinforce.py @@ -3,12 +3,18 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings +from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch -from tensordict.nn import dispatch, ProbabilisticTensorDictSequential, TensorDictModule +from tensordict.nn import ( + dispatch, + ProbabilisticTensorDictSequential, + repopulate_module, + TensorDictModule, +) from tensordict.tensordict import TensorDict, TensorDictBase from tensordict.utils import NestedKey from torchrl.objectives.common import LossModule @@ -18,7 +24,13 @@ distance_loss, ValueEstimators, ) -from torchrl.objectives.value import GAE, TD0Estimator, TD1Estimator, TDLambdaEstimator +from torchrl.objectives.value import ( + GAE, + TD0Estimator, + TD1Estimator, + TDLambdaEstimator, + VTrace, +) class ReinforceLoss(LossModule): @@ -340,6 +352,14 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams self._value_estimator = GAE(value_network=self.critic, **hp) elif value_type == ValueEstimators.TDLambda: self._value_estimator = TDLambdaEstimator(value_network=self.critic, **hp) + elif value_type == ValueEstimators.VTrace: + # VTrace currently does not support functional call on the actor + actor_with_params = repopulate_module( + deepcopy(self.actor), self.actor_params + ) + self._value_estimator = VTrace( + value_network=self.critic, actor_network=actor_with_params, **hp + ) else: raise NotImplementedError(f"Unknown value type {value_type}") @@ -350,5 +370,6 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams "reward": self.tensor_keys.reward, "done": self.tensor_keys.done, "terminated": self.tensor_keys.terminated, + "sample_log_prob": self.tensor_keys.sample_log_prob, } self._value_estimator.set_keys(**tensor_keys) diff --git a/torchrl/objectives/utils.py b/torchrl/objectives/utils.py index bc678ed0154..b8ec5ec7c32 100644 --- a/torchrl/objectives/utils.py +++ b/torchrl/objectives/utils.py @@ -39,6 +39,7 @@ class ValueEstimators(Enum): TD1 = "TD(1) (infinity-step return)" TDLambda = "TD(lambda)" GAE = "Generalized advantage estimate" + VTrace = "V-trace" def default_value_kwargs(value_type: ValueEstimators): @@ -61,6 +62,8 @@ def default_value_kwargs(value_type: ValueEstimators): return {"gamma": 0.99, "lmbda": 0.95, "differentiable": True} elif value_type == ValueEstimators.TDLambda: return {"gamma": 0.99, "lmbda": 0.95, "differentiable": True} + elif value_type == ValueEstimators.VTrace: + return {"gamma": 0.99, "differentiable": True} else: raise NotImplementedError(f"Unknown value type {value_type}.") diff --git a/torchrl/objectives/value/__init__.py b/torchrl/objectives/value/__init__.py index 11ae2e6d9e2..51496986153 100644 --- a/torchrl/objectives/value/__init__.py +++ b/torchrl/objectives/value/__init__.py @@ -12,4 +12,5 @@ TDLambdaEstimate, TDLambdaEstimator, ValueEstimatorBase, + VTrace, ) diff --git a/torchrl/objectives/value/advantages.py b/torchrl/objectives/value/advantages.py index 4d3a25279a1..42ba404c05d 100644 --- a/torchrl/objectives/value/advantages.py +++ b/torchrl/objectives/value/advantages.py @@ -2,6 +2,8 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. + + import abc import functools import warnings @@ -32,8 +34,10 @@ vec_generalized_advantage_estimate, vec_td1_return_estimate, vec_td_lambda_return_estimate, + vtrace_advantage_estimate, ) + try: from torch import vmap except ImportError as err: @@ -147,6 +151,17 @@ def _call_value_nets( return value, value_ +def _call_actor_net( + actor_net: TensorDictModuleBase, + data: TensorDictBase, + params: TensorDictBase, + log_prob_key: NestedKey, +): + # TODO: extend to handle time dimension (and vmap?) + log_pi = actor_net(data.select(actor_net.in_keys)).get(log_prob_key) + return log_pi + + class ValueEstimatorBase(TensorDictModuleBase): """An abstract parent class for value function modules. @@ -179,9 +194,11 @@ class _AcceptedKeys: whether a trajectory is done. Defaults to ``"done"``. terminated (NestedKey): The key in the input TensorDict that indicates whether a trajectory is terminated. Defaults to ``"terminated"``. - steps_to_next_obs_key (NestedKey): The key in the input tensordict + steps_to_next_obs (NestedKey): The key in the input tensordict that indicates the number of steps to the next observation. Defaults to ``"steps_to_next_obs"``. + sample_log_prob (NestedKey): The key in the input tensordict that + indicates the log probability of the sampled action. Defaults to ``"sample_log_prob"``. """ advantage: NestedKey = "advantage" @@ -191,6 +208,7 @@ class _AcceptedKeys: done: NestedKey = "done" terminated: NestedKey = "terminated" steps_to_next_obs: NestedKey = "steps_to_next_obs" + sample_log_prob: NestedKey = "sample_log_prob" default_keys = _AcceptedKeys() value_network: Union[TensorDictModule, Callable] @@ -223,6 +241,10 @@ def terminated_key(self): def steps_to_next_obs_key(self): return self.tensor_keys.steps_to_next_obs + @property + def sample_log_prob_key(self): + return self.tensor_keys.sample_log_prob + @abc.abstractmethod def forward( self, @@ -341,7 +363,7 @@ def set_keys(self, **kwargs) -> None: raise ValueError("tensordict keys cannot be None") if key not in self._AcceptedKeys.__dict__: raise KeyError( - f"{key} it not an accepted tensordict key for advantages" + f"{key} is not an accepted tensordict key for advantages" ) if ( key == "value" @@ -597,7 +619,7 @@ def value_estimate( if self.average_rewards: reward = reward - reward.mean() - reward = reward / reward.std().clamp_min(1e-4) + reward = reward / reward.std().clamp_min(1e-5) tensordict.set( ("next", self.tensor_keys.reward), reward ) # we must update the rewards if they are used later in the code @@ -799,7 +821,7 @@ def value_estimate( if self.average_rewards: reward = reward - reward.mean() - reward = reward / reward.std().clamp_min(1e-4) + reward = reward / reward.std().clamp_min(1e-5) tensordict.set( ("next", self.tensor_keys.reward), reward ) # we must update the rewards if they are used later in the code @@ -1137,7 +1159,7 @@ def __init__( def forward( self, tensordict: TensorDictBase, - *unused_args, + *, params: Optional[List[Tensor]] = None, target_params: Optional[List[Tensor]] = None, ) -> TensorDictBase: @@ -1328,6 +1350,287 @@ def value_estimate( return value_target +class VTrace(ValueEstimatorBase): + """A class wrapper around V-Trace estimate functional. + + Refer to "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures" + :ref:`here `_ for more context. + + Args: + gamma (scalar): exponential mean discount. + value_network (TensorDictModule): value operator used to retrieve the value estimates. + actor_network (TensorDictModule): actor operator used to retrieve the log prob. + rho_thresh (Union[float, Tensor]): rho clipping parameter for importance weights. + Defaults to ``1.0``. + c_thresh (Union[float, Tensor]): c clipping parameter for importance weights. + Defaults to ``1.0``. + average_adv (bool): if ``True``, the resulting advantage values will be standardized. + Default is ``False``. + differentiable (bool, optional): if ``True``, gradients are propagated through + the computation of the value function. Default is ``False``. + + .. note:: + The proper way to make the function call non-differentiable is to + decorate it in a `torch.no_grad()` context manager/decorator or + pass detached parameters for functional modules. + skip_existing (bool, optional): if ``True``, the value network will skip + modules which outputs are already present in the tensordict. + Defaults to ``None``, ie. the value of :func:`tensordict.nn.skip_existing()` + is not affected. + Defaults to "state_value". + advantage_key (str or tuple of str, optional): [Deprecated] the key of + the advantage entry. Defaults to ``"advantage"``. + value_target_key (str or tuple of str, optional): [Deprecated] the key + of the advantage entry. Defaults to ``"value_target"``. + value_key (str or tuple of str, optional): [Deprecated] the value key to + read from the input tensordict. Defaults to ``"state_value"``. + shifted (bool, optional): if ``True``, the value and next value are + estimated with a single call to the value network. This is faster + but is only valid whenever (1) the ``"next"`` value is shifted by + only one time step (which is not the case with multi-step value + estimation, for instance) and (2) when the parameters used at time + ``t`` and ``t+1`` are identical (which is not the case when target + parameters are to be used). Defaults to ``False``. + + VTrace will return an :obj:`"advantage"` entry containing the advantage value. It will also + return a :obj:`"value_target"` entry with the V-Trace target value. + + .. note:: + As other advantage functions do, if the ``value_key`` is already present + in the input tensordict, the VTrace module will ignore the calls to the value + network (if any) and use the provided value instead. + + """ + + def __init__( + self, + *, + gamma: Union[float, torch.Tensor], + actor_network: TensorDictModule, + value_network: TensorDictModule, + rho_thresh: Union[float, torch.Tensor] = 1.0, + c_thresh: Union[float, torch.Tensor] = 1.0, + average_adv: bool = False, + differentiable: bool = False, + skip_existing: Optional[bool] = None, + advantage_key: Optional[NestedKey] = None, + value_target_key: Optional[NestedKey] = None, + value_key: Optional[NestedKey] = None, + shifted: bool = False, + ): + super().__init__( + shifted=shifted, + value_network=value_network, + differentiable=differentiable, + advantage_key=advantage_key, + value_target_key=value_target_key, + value_key=value_key, + skip_existing=skip_existing, + ) + try: + device = next(value_network.parameters()).device + except (AttributeError, StopIteration): + device = torch.device("cpu") + + if not isinstance(gamma, torch.Tensor): + gamma = torch.tensor(gamma, device=device) + if not isinstance(rho_thresh, torch.Tensor): + rho_thresh = torch.tensor(rho_thresh, device=device) + if not isinstance(c_thresh, torch.Tensor): + c_thresh = torch.tensor(c_thresh, device=device) + + self.register_buffer("gamma", gamma) + self.register_buffer("rho_thresh", rho_thresh) + self.register_buffer("c_thresh", c_thresh) + self.average_adv = average_adv + self.actor_network = actor_network + + if isinstance(gamma, torch.Tensor) and gamma.shape != (): + raise NotImplementedError( + "Per-value gamma is not supported yet. Gamma must be a scalar." + ) + + @property + def in_keys(self): + parent_in_keys = super().in_keys + extended_in_keys = parent_in_keys + [self.tensor_keys.sample_log_prob] + return extended_in_keys + + @_self_set_skip_existing + @_self_set_grad_enabled + @dispatch + def forward( + self, + tensordict: TensorDictBase, + *, + params: Optional[List[Tensor]] = None, + target_params: Optional[List[Tensor]] = None, + ) -> TensorDictBase: + """Computes the V-Trace correction given the data in tensordict. + + If a functional module is provided, a nested TensorDict containing the parameters + (and if relevant the target parameters) can be passed to the module. + + Args: + tensordict (TensorDictBase): A TensorDict containing the data + (an observation key, "action", "reward", "done" and "next" tensordict state + as returned by the environment) necessary to compute the value estimates and the GAE. + The data passed to this module should be structured as :obj:`[*B, T, F]` where :obj:`B` are + the batch size, :obj:`T` the time dimension and :obj:`F` the feature dimension(s). + params (TensorDictBase, optional): A nested TensorDict containing the params + to be passed to the functional value network module. + target_params (TensorDictBase, optional): A nested TensorDict containing the + target params to be passed to the functional value network module. + + Returns: + An updated TensorDict with an advantage and a value_error keys as defined in the constructor. + + Examples: + >>> value_net = TensorDictModule(nn.Linear(3, 1), in_keys=["obs"], out_keys=["state_value"]) + >>> actor_net = TensorDictModule(nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"]) + >>> actor_net = ProbabilisticActor( + ... module=actor_net, + ... in_keys=["logits"], + ... out_keys=["action"], + ... distribution_class=OneHotCategorical, + ... return_log_prob=True, + ... ) + >>> module = VTrace( + ... gamma=0.98, + ... value_network=value_net, + ... actor_network=actor_net, + ... differentiable=False, + ... ) + >>> obs, next_obs = torch.randn(2, 1, 10, 3) + >>> reward = torch.randn(1, 10, 1) + >>> done = torch.zeros(1, 10, 1, dtype=torch.bool) + >>> terminated = torch.zeros(1, 10, 1, dtype=torch.bool) + >>> sample_log_prob = torch.randn(1, 10, 1) + >>> tensordict = TensorDict({ + ... "obs": obs, + ... "done": done, + ... "terminated": terminated, + ... "sample_log_prob": sample_log_prob, + ... "next": {"obs": next_obs, "reward": reward, "done": done, "terminated": terminated}, + ... }, batch_size=[1, 10]) + >>> _ = module(tensordict) + >>> assert "advantage" in tensordict.keys() + + The module supports non-tensordict (i.e. unpacked tensordict) inputs too: + + Examples: + >>> value_net = TensorDictModule(nn.Linear(3, 1), in_keys=["obs"], out_keys=["state_value"]) + >>> actor_net = TensorDictModule(nn.Linear(3, 4), in_keys=["obs"], out_keys=["logits"]) + >>> actor_net = ProbabilisticActor( + ... module=actor_net, + ... in_keys=["logits"], + ... out_keys=["action"], + ... distribution_class=OneHotCategorical, + ... return_log_prob=True, + ... ) + >>> module = VTrace( + ... gamma=0.98, + ... value_network=value_net, + ... actor_network=actor_net, + ... differentiable=False, + ... ) + >>> obs, next_obs = torch.randn(2, 1, 10, 3) + >>> reward = torch.randn(1, 10, 1) + >>> done = torch.zeros(1, 10, 1, dtype=torch.bool) + >>> terminated = torch.zeros(1, 10, 1, dtype=torch.bool) + >>> sample_log_prob = torch.randn(1, 10, 1) + >>> tensordict = TensorDict({ + ... "obs": obs, + ... "done": done, + ... "terminated": terminated, + ... "sample_log_prob": sample_log_prob, + ... "next": {"obs": next_obs, "reward": reward, "done": done, "terminated": terminated}, + ... }, batch_size=[1, 10]) + >>> advantage, value_target = module( + ... obs=obs, next_reward=reward, next_done=done, next_obs=next_obs, next_terminated=terminated, sample_log_prob=sample_log_prob + ... ) + + """ + if tensordict.batch_dims < 1: + raise RuntimeError( + "Expected input tensordict to have at least one dimensions, got " + f"tensordict.batch_size = {tensordict.batch_size}" + ) + reward = tensordict.get(("next", self.tensor_keys.reward)) + device = reward.device + gamma = self.gamma.to(device) + steps_to_next_obs = tensordict.get(self.tensor_keys.steps_to_next_obs, None) + if steps_to_next_obs is not None: + gamma = gamma ** steps_to_next_obs.view_as(reward) + + # Make sure we have the value and next value + if self.value_network is not None: + if params is not None: + params = params.detach() + if target_params is None: + target_params = params.clone(False) + with hold_out_net(self.value_network): + # we may still need to pass gradient, but we don't want to assign grads to + # value net params + value, next_value = _call_value_nets( + value_net=self.value_network, + data=tensordict, + params=params, + next_params=target_params, + single_call=self.shifted, + value_key=self.tensor_keys.value, + detach_next=True, + ) + else: + value = tensordict.get(self.tensor_keys.value) + next_value = tensordict.get(("next", self.tensor_keys.value)) + + # Make sure we have the log prob computed at collection time + if self.tensor_keys.sample_log_prob not in tensordict.keys(): + raise ValueError( + f"Expected {self.tensor_keys.sample_log_prob} to be in tensordict" + ) + log_mu = tensordict.get(self.tensor_keys.sample_log_prob).view_as(value) + + # Compute log prob with current policy + with hold_out_net(self.actor_network): + log_pi = _call_actor_net( + actor_net=self.actor_network, + data=tensordict, + params=None, + log_prob_key=self.tensor_keys.sample_log_prob, + ).view_as(value) + + # Compute the V-Trace correction + done = tensordict.get(("next", self.tensor_keys.done)) + terminated = tensordict.get(("next", self.tensor_keys.terminated)) + + adv, value_target = vtrace_advantage_estimate( + gamma, + log_pi, + log_mu, + value, + next_value, + reward, + done, + terminated, + rho_thresh=self.rho_thresh, + c_thresh=self.c_thresh, + time_dim=tensordict.ndim - 1, + ) + + if self.average_adv: + loc = adv.mean() + scale = adv.std().clamp_min(1e-5) + adv = adv - loc + adv = adv / scale + + tensordict.set(self.tensor_keys.advantage, adv) + tensordict.set(self.tensor_keys.value_target, value_target) + + return tensordict + + def _deprecate_class(cls, new_cls): @wraps(cls.__init__) def new_init(self, *args, **kwargs): diff --git a/torchrl/objectives/value/functional.py b/torchrl/objectives/value/functional.py index 7c33895e965..6c43af02aeb 100644 --- a/torchrl/objectives/value/functional.py +++ b/torchrl/objectives/value/functional.py @@ -27,6 +27,7 @@ "vec_td_lambda_return_estimate", "td_lambda_advantage_estimate", "vec_td_lambda_advantage_estimate", + "vtrace_advantage_estimate", ] from torchrl.objectives.value.utils import ( @@ -1212,6 +1213,93 @@ def vec_td_lambda_advantage_estimate( ) +######################################################################## +# V-Trace +# ----- + + +@_transpose_time +def vtrace_advantage_estimate( + gamma: float, + log_pi: torch.Tensor, + log_mu: torch.Tensor, + state_value: torch.Tensor, + next_state_value: torch.Tensor, + reward: torch.Tensor, + done: torch.Tensor, + terminated: torch.Tensor | None = None, + rho_thresh: Union[float, torch.Tensor] = 1.0, + c_thresh: Union[float, torch.Tensor] = 1.0, + time_dim: int = -2, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Computes V-Trace off-policy actor critic targets. + + Refer to "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures" + https://arxiv.org/abs/1802.01561 for more context. + + Args: + gamma (scalar): exponential mean discount. + log_pi (Tensor): collection actor log probability of taking actions in the environment. + log_mu (Tensor): current actor log probability of taking actions in the environment. + state_value (Tensor): value function result with state input. + next_state_value (Tensor): value function result with next_state input. + reward (Tensor): reward of taking actions in the environment. + done (Tensor): boolean flag for end of episode. + terminated (torch.Tensor): a [B, T] boolean tensor containing the terminated states. + rho_thresh (Union[float, Tensor]): rho clipping parameter for importance weights. + c_thresh (Union[float, Tensor]): c clipping parameter for importance weights. + time_dim (int): dimension where the time is unrolled. Defaults to -2. + + All tensors (values, reward and done) must have shape + ``[*Batch x TimeSteps x *F]``, with ``*F`` feature dimensions. + """ + if not (next_state_value.shape == state_value.shape == reward.shape == done.shape): + raise RuntimeError(SHAPE_ERR) + + device = state_value.device + + if not isinstance(rho_thresh, torch.Tensor): + rho_thresh = torch.tensor(rho_thresh, device=device) + if not isinstance(c_thresh, torch.Tensor): + c_thresh = torch.tensor(c_thresh, device=device) + + c_thresh = c_thresh.to(device) + rho_thresh = rho_thresh.to(device) + + not_done = (~done).int() + not_terminated = not_done if terminated is None else (~terminated).int() + *batch_size, time_steps, lastdim = not_done.shape + done_discounts = gamma * not_done + terminated_discounts = gamma * not_terminated + + rho = (log_pi - log_mu).exp() + clipped_rho = rho.clamp_max(rho_thresh) + deltas = clipped_rho * ( + reward + terminated_discounts * next_state_value - state_value + ) + clipped_c = rho.clamp_max(c_thresh) + + vs_minus_v_xs = [torch.zeros_like(next_state_value[..., -1, :])] + for i in reversed(range(time_steps)): + discount_t, c_t, delta_t = ( + done_discounts[..., i, :], + clipped_c[..., i, :], + deltas[..., i, :], + ) + vs_minus_v_xs.append(delta_t + discount_t * c_t * vs_minus_v_xs[-1]) + vs_minus_v_xs = torch.stack(vs_minus_v_xs[1:], dim=time_dim) + vs_minus_v_xs = torch.flip(vs_minus_v_xs, dims=[time_dim]) + vs = vs_minus_v_xs + state_value + vs_t_plus_1 = torch.cat( + [vs[..., 1:, :], next_state_value[..., -1:, :]], dim=time_dim + ) + advantages = clipped_rho * ( + reward + terminated_discounts * vs_t_plus_1 - state_value + ) + + return advantages, vs + + ######################################################################## # Reward to go # ------------ diff --git a/torchrl/objectives/value/vtrace.py b/torchrl/objectives/value/vtrace.py deleted file mode 100644 index 43f5246502f..00000000000 --- a/torchrl/objectives/value/vtrace.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Tuple, Union - -import torch - - -def _c_val( - log_pi: torch.Tensor, - log_mu: torch.Tensor, - c: Union[float, torch.Tensor] = 1, -) -> torch.Tensor: - return (log_pi - log_mu).clamp_max(math.log(c)).exp() - - -def _dv_val( - rewards: torch.Tensor, - vals: torch.Tensor, - gamma: Union[float, torch.Tensor], - rho_bar: Union[float, torch.Tensor], - log_pi: torch.Tensor, - log_mu: torch.Tensor, -) -> Tuple[torch.Tensor, torch.Tensor]: - rho = _c_val(log_pi, log_mu, rho_bar) - next_vals = torch.cat([vals[:, 1:], torch.zeros_like(vals[:, :1])], 1) - dv = rho * (rewards + gamma * next_vals - vals) - return dv, rho - - -def _vtrace( - rewards: torch.Tensor, - vals: torch.Tensor, - log_pi: torch.Tensor, - log_mu: torch.Tensor, - gamma: Union[torch.Tensor, float], - rho_bar: Union[float, torch.Tensor] = 1.0, - c_bar: Union[float, torch.Tensor] = 1.0, -) -> Tuple[torch.Tensor, torch.Tensor]: - T = vals.shape[1] - if not isinstance(gamma, torch.Tensor): - gamma = torch.full_like(vals, gamma) - - dv, rho = _dv_val(rewards, vals, gamma, rho_bar, log_pi, log_mu) - c = _c_val(log_pi, log_mu, c_bar) - - v_out = [] - v_out.append(vals[:, -1] + dv[:, -1]) - for t in range(T - 2, -1, -1): - _v_out = ( - vals[:, t] + dv[:, t] + gamma[:, t] * c[:, t] * (v_out[-1] - vals[:, t + 1]) - ) - v_out.append(_v_out) - v_out = torch.stack(list(reversed(v_out)), 1) - return v_out, rho From fa149e43638e00d2914091aa4361368709d28f18 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Fri, 24 Nov 2023 10:07:00 +0000 Subject: [PATCH 13/21] [Doc] Fix discord link (#1712) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5a21b3701d4..905e8d28a4c 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ pypi nightly version [![Downloads](https://static.pepy.tech/personalized-badge/torchrl?period=total&units=international_system&left_color=blue&right_color=orange&left_text=Downloads)](https://pepy.tech/project/torchrl) [![Downloads](https://static.pepy.tech/personalized-badge/torchrl-nightly?period=total&units=international_system&left_color=blue&right_color=orange&left_text=Downloads%20(nightly))](https://pepy.tech/project/torchrl-nightly) -[![Discord Shield](https://dcbadge.vercel.app/api/server/2XJdEenU)](https://discord.gg/2XJdEenU) +[![Discord Shield](https://dcbadge.vercel.app/api/server/cZs26Qq3Dd)](https://discord.gg/cZs26Qq3Dd) # TorchRL From bc7595fbd0a9da085004685f6bf929b746436de3 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Fri, 24 Nov 2023 14:44:20 +0000 Subject: [PATCH 14/21] [Refactor] Refactor functional calls in losses (#1707) --- test/assets/generate.py | 10 +- test/test_cost.py | 25 ++- torchrl/data/rlhf/dataset.py | 1 + torchrl/envs/transforms/rlhf.py | 25 +-- torchrl/envs/transforms/utils.py | 9 + torchrl/modules/tensordict_module/actors.py | 21 +-- torchrl/modules/tensordict_module/common.py | 6 +- torchrl/modules/tensordict_module/sequence.py | 6 +- torchrl/objectives/a2c.py | 14 +- torchrl/objectives/common.py | 174 ++++-------------- torchrl/objectives/cql.py | 58 +++--- torchrl/objectives/ddpg.py | 28 ++- torchrl/objectives/decision_transformer.py | 6 +- torchrl/objectives/deprecated.py | 40 ++-- torchrl/objectives/dqn.py | 28 +-- torchrl/objectives/iql.py | 37 +--- torchrl/objectives/multiagent/qmixer.py | 9 +- torchrl/objectives/ppo.py | 16 +- torchrl/objectives/redq.py | 21 +-- torchrl/objectives/reinforce.py | 12 +- torchrl/objectives/sac.py | 68 +++---- torchrl/objectives/td3.py | 32 +--- torchrl/objectives/utils.py | 38 ++-- torchrl/objectives/value/advantages.py | 38 ++-- 24 files changed, 273 insertions(+), 449 deletions(-) diff --git a/test/assets/generate.py b/test/assets/generate.py index 75a87bb71b5..45006e57a84 100644 --- a/test/assets/generate.py +++ b/test/assets/generate.py @@ -36,14 +36,20 @@ def get_minibatch(): batch_size=16, block_size=33, tensorclass_type=PromptData, - dataset_name="test/datasets_mini/openai_summarize_tldr", + dataset_name="CarperAI/openai_summarize_tldr", device="cpu", infinite=False, prefetch=0, split="train", - from_disk=True, + from_disk=False, root_dir=tmpdir, ) for data in dl: data = data.clone().memmap_("test/datasets_mini/tldr_batch/") break + print("done") + + +if __name__ == "__main__": + # generate_small_dataset() + get_minibatch() diff --git a/test/test_cost.py b/test/test_cost.py index 35297c3a1e6..6153e1ae712 100644 --- a/test/test_cost.py +++ b/test/test_cost.py @@ -47,7 +47,7 @@ get_default_devices, ) from mocking_classes import ContinuousActionConvMockEnv -from tensordict.nn import get_functional, NormalParamExtractor, TensorDictModule +from tensordict.nn import NormalParamExtractor, TensorDictModule from tensordict.nn.utils import Buffer # from torchrl.data.postprocs.utils import expand_as_right @@ -4967,6 +4967,18 @@ def test_cql( else: raise NotImplementedError(k) loss_fn.zero_grad() + assert all( + (p.grad is None) or (p.grad == 0).all() + for p in loss_fn.actor_network_params.values( + include_nested=True, leaves_only=True + ) + ) + assert all( + (p.grad is None) or (p.grad == 0).all() + for p in loss_fn.qvalue_network_params.values( + include_nested=True, leaves_only=True + ) + ) sum([item for _, item in loss.items()]).backward() named_parameters = list(loss_fn.named_parameters()) @@ -6500,6 +6512,8 @@ def test_a2c(self, device, gradient_mode, advantage, td_est): assert ("critic" not in name) or ("target_" in name) value.zero_grad() + for n, p in loss_fn.named_parameters(): + assert p.grad is None or p.grad.norm() == 0, n loss_objective.backward() named_parameters = loss_fn.named_parameters() for name, p in named_parameters: @@ -6900,20 +6914,20 @@ def test_reinforce_value_net(self, advantage, gradient_mode, delay_value, td_est advantage = GAE( gamma=gamma, lmbda=0.9, - value_network=get_functional(value_net), + value_network=value_net, differentiable=gradient_mode, ) elif advantage == "td": advantage = TD1Estimator( gamma=gamma, - value_network=get_functional(value_net), + value_network=value_net, differentiable=gradient_mode, ) elif advantage == "td_lambda": advantage = TDLambdaEstimator( gamma=0.9, lmbda=0.9, - value_network=get_functional(value_net), + value_network=value_net, differentiable=gradient_mode, ) elif advantage is None: @@ -9829,9 +9843,6 @@ def test_tdlambda_tensor_gamma(self, device, gamma, lmbda, N, T, has_done): next_state_value = torch.randn(*N, T, 1, device=device) gamma_tensor = torch.full((*N, T, 1), gamma, device=device) - # if len(N) == 2: - # print(terminated[4, 0, -10:]) - # print(done[4, 0, -10:]) v1 = vec_td_lambda_advantage_estimate( gamma, lmbda, diff --git a/torchrl/data/rlhf/dataset.py b/torchrl/data/rlhf/dataset.py index adc2ddcf0d7..aa8f02d98cb 100644 --- a/torchrl/data/rlhf/dataset.py +++ b/torchrl/data/rlhf/dataset.py @@ -137,6 +137,7 @@ def load(self): data_dir = root_dir / str(Path(self.dataset_name).name).split("-")[0] data_dir_total = data_dir / split / str(max_length) # search for data + print(data_dir_total) if os.path.exists(data_dir_total): dataset = TensorDict.load_memmap(data_dir_total) return dataset diff --git a/torchrl/envs/transforms/rlhf.py b/torchrl/envs/transforms/rlhf.py index 240c1029486..48464d9f9c4 100644 --- a/torchrl/envs/transforms/rlhf.py +++ b/torchrl/envs/transforms/rlhf.py @@ -5,18 +5,13 @@ from copy import copy, deepcopy import torch -from tensordict import TensorDictBase, unravel_key -from tensordict.nn import ( - make_functional, - ProbabilisticTensorDictModule, - repopulate_module, - TensorDictParams, -) +from tensordict import TensorDict, TensorDictBase, unravel_key +from tensordict.nn import ProbabilisticTensorDictModule, TensorDictParams from tensordict.utils import is_seq_of_nested_key from torch import nn from torchrl.data.tensor_specs import CompositeSpec, UnboundedContinuousTensorSpec from torchrl.envs.transforms.transforms import Transform -from torchrl.envs.transforms.utils import _set_missing_tolerance +from torchrl.envs.transforms.utils import _set_missing_tolerance, _stateless_param class KLRewardTransform(Transform): @@ -116,11 +111,10 @@ def __init__( self.in_keys = self.in_keys + actor.in_keys # check that the model has parameters - params = make_functional( - actor, keep_params=False, funs_to_decorate=["forward", "get_dist"] - ) - self.functional_actor = deepcopy(actor) - repopulate_module(actor, params) + params = TensorDict.from_module(actor) + with params.apply(_stateless_param).to_module(actor): + # copy a stateless actor + self.__dict__["functional_actor"] = deepcopy(actor) # we need to register these params as buffer to have `to` and similar # methods work properly @@ -170,9 +164,8 @@ def _call(self, tensordict: TensorDictBase) -> TensorDictBase: if self.out_keys[0] != ("reward",) and self.parent is not None: tensordict.set(self.out_keys[0], self.parent.reward_spec.zero()) return tensordict - dist = self.functional_actor.get_dist( - tensordict.clone(False), params=self.frozen_params - ) + with self.frozen_params.to_module(self.functional_actor): + dist = self.functional_actor.get_dist(tensordict.clone(False)) # get the log_prob given the original model log_prob = dist.log_prob(action) reward_key = self.in_keys[0] diff --git a/torchrl/envs/transforms/utils.py b/torchrl/envs/transforms/utils.py index a99c22a87da..a1b30cb1aca 100644 --- a/torchrl/envs/transforms/utils.py +++ b/torchrl/envs/transforms/utils.py @@ -5,6 +5,7 @@ import torch +from torch import nn def check_finite(tensor: torch.Tensor): @@ -59,3 +60,11 @@ def _get_reset(reset_key, tensordict): if _reset.ndim > parent_td.ndim: _reset = _reset.flatten(parent_td.ndim, -1).any(-1) return _reset + + +def _stateless_param(param): + is_param = isinstance(param, nn.Parameter) + param = param.data.to("meta") + if is_param: + return nn.Parameter(param, requires_grad=False) + return param diff --git a/torchrl/modules/tensordict_module/actors.py b/torchrl/modules/tensordict_module/actors.py index 1e5a557546a..bf81cfd5dfd 100644 --- a/torchrl/modules/tensordict_module/actors.py +++ b/torchrl/modules/tensordict_module/actors.py @@ -183,7 +183,7 @@ class ProbabilisticActor(SafeProbabilisticTensorDictSequential): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn import TensorDictModule, make_functional + >>> from tensordict.nn import TensorDictModule >>> from torchrl.data import BoundedTensorSpec >>> from torchrl.modules import ProbabilisticActor, NormalParamWrapper, TanhNormal >>> td = TensorDict({"observation": torch.randn(3, 4)}, [3,]) @@ -197,8 +197,9 @@ class ProbabilisticActor(SafeProbabilisticTensorDictSequential): ... in_keys=["loc", "scale"], ... distribution_class=TanhNormal, ... ) - >>> params = make_functional(td_module) - >>> td = td_module(td, params=params) + >>> params = TensorDict.from_module(td_module) + >>> with params.to_module(td_module): + ... td = td_module(td) >>> td TensorDict( fields={ @@ -319,7 +320,6 @@ class ValueOperator(TensorDictModule): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn import make_functional >>> from torch import nn >>> from torchrl.data import UnboundedContinuousTensorSpec >>> from torchrl.modules import ValueOperator @@ -334,8 +334,9 @@ class ValueOperator(TensorDictModule): >>> td_module = ValueOperator( ... in_keys=["observation", "action"], module=module ... ) - >>> params = make_functional(td_module) - >>> td = td_module(td, params=params) + >>> params = TensorDict.from_module(td_module) + >>> with params.to_module(td_module): + ... td = td_module(td) >>> print(td) TensorDict( fields={ @@ -792,7 +793,6 @@ class QValueHook: Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn.functional_modules import make_functional >>> from torch import nn >>> from torchrl.data import OneHotDiscreteTensorSpec >>> from torchrl.modules.tensordict_module.actors import QValueHook, Actor @@ -878,7 +878,6 @@ class DistributionalQValueHook(QValueHook): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn.functional_modules import make_functional >>> from torch import nn >>> from torchrl.data import OneHotDiscreteTensorSpec >>> from torchrl.modules.tensordict_module.actors import DistributionalQValueHook, Actor @@ -893,12 +892,13 @@ class DistributionalQValueHook(QValueHook): ... return self.linear(x).view(-1, nbins, 4).log_softmax(-2) ... >>> module = CustomDistributionalQval() - >>> params = make_functional(module) + >>> params = TensorDict.from_module(module) >>> action_spec = OneHotDiscreteTensorSpec(4) >>> hook = DistributionalQValueHook("one_hot", support = torch.arange(nbins)) >>> module.register_forward_hook(hook) >>> qvalue_actor = Actor(module=module, spec=action_spec, out_keys=["action", "action_value"]) - >>> qvalue_actor(td, params=params) + >>> with params.to_module(module): + ... qvalue_actor(td) >>> print(td) TensorDict( fields={ @@ -992,7 +992,6 @@ class QValueActor(SafeSequential): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn.functional_modules import make_functional >>> from torch import nn >>> from torchrl.data import OneHotDiscreteTensorSpec >>> from torchrl.modules.tensordict_module.actors import QValueActor diff --git a/torchrl/modules/tensordict_module/common.py b/torchrl/modules/tensordict_module/common.py index c5f34a7774d..22786519681 100644 --- a/torchrl/modules/tensordict_module/common.py +++ b/torchrl/modules/tensordict_module/common.py @@ -138,7 +138,6 @@ class SafeModule(TensorDictModule): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn.functional_modules import make_functional >>> from torchrl.data import UnboundedContinuousTensorSpec >>> from torchrl.modules import TensorDictModule >>> td = TensorDict({"input": torch.randn(3, 4), "hidden": torch.randn(3, 8)}, [3,]) @@ -150,8 +149,9 @@ class SafeModule(TensorDictModule): ... in_keys=["input", "hidden"], ... out_keys=["output"], ... ) - >>> params = make_functional(td_fmodule) - >>> td_functional = td_fmodule(td.clone(), params=params) + >>> params = TensorDict.from_module(td_fmodule) + >>> with params.to_module(td_module): + ... td_functional = td_fmodule(td.clone()) >>> print(td_functional) TensorDict( fields={ diff --git a/torchrl/modules/tensordict_module/sequence.py b/torchrl/modules/tensordict_module/sequence.py index 71167c5106f..28f721ba6a1 100644 --- a/torchrl/modules/tensordict_module/sequence.py +++ b/torchrl/modules/tensordict_module/sequence.py @@ -33,7 +33,6 @@ class SafeSequential(TensorDictSequential, SafeModule): Examples: >>> import torch >>> from tensordict import TensorDict - >>> from tensordict.nn.functional_modules import make_functional >>> from torchrl.data import CompositeSpec, UnboundedContinuousTensorSpec >>> from torchrl.modules import TanhNormal, SafeSequential, TensorDictModule, NormalParamWrapper >>> from torchrl.modules.tensordict_module import SafeProbabilisticModule @@ -58,8 +57,9 @@ class SafeSequential(TensorDictSequential, SafeModule): ... out_keys=["output"], ... ) >>> td_module = SafeSequential(td_module1, td_module2) - >>> params = make_functional(td_module) - >>> td_module(td, params=params) + >>> params = TensorDict.from_module(td_module) + >>> with params.to_module(td_module): + ... td_module(td) >>> print(td) TensorDict( fields={ diff --git a/torchrl/objectives/a2c.py b/torchrl/objectives/a2c.py index 92955d4cab3..4384ccef282 100644 --- a/torchrl/objectives/a2c.py +++ b/torchrl/objectives/a2c.py @@ -327,8 +327,8 @@ def _log_probs( f"tensordict stored {self.tensor_keys.action} require grad." ) tensordict_clone = tensordict.select(*self.actor.in_keys).clone() - - dist = self.actor.get_dist(tensordict_clone, params=self.actor_params) + with self.actor_params.to_module(self.actor): + dist = self.actor.get_dist(tensordict_clone) log_prob = dist.log_prob(action) log_prob = log_prob.unsqueeze(-1) return log_prob, dist @@ -339,10 +339,10 @@ def loss_critic(self, tensordict: TensorDictBase) -> torch.Tensor: # overhead that we could easily reduce. target_return = tensordict.get(self.tensor_keys.value_target) tensordict_select = tensordict.select(*self.critic.in_keys) - state_value = self.critic( - tensordict_select, - params=self.critic_params, - ).get(self.tensor_keys.value) + with self.critic_params.to_module(self.critic): + state_value = self.critic( + tensordict_select, + ).get(self.tensor_keys.value) loss_value = distance_loss( target_return, state_value, @@ -374,6 +374,7 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: target_params=self.target_critic_params, ) advantage = tensordict.get(self.tensor_keys.advantage) + assert not advantage.requires_grad log_probs, dist = self._log_probs(tensordict) loss = -(log_probs * advantage) td_out = TensorDict({"loss_objective": loss.mean()}, []) @@ -392,6 +393,7 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams self.value_type = value_type hp = dict(default_value_kwargs(value_type)) hp.update(hyperparams) + if hasattr(self, "gamma"): hp["gamma"] = self.gamma if value_type == ValueEstimators.TD1: diff --git a/torchrl/objectives/common.py b/torchrl/objectives/common.py index 37c5e820d23..367882a5bca 100644 --- a/torchrl/objectives/common.py +++ b/torchrl/objectives/common.py @@ -10,15 +10,9 @@ from dataclasses import dataclass from typing import Iterator, List, Optional, Tuple -from tensordict import TensorDictBase - -from tensordict.nn import ( - make_functional, - repopulate_module, - TensorDictModule, - TensorDictModuleBase, - TensorDictParams, -) +from tensordict import TensorDict, TensorDictBase + +from tensordict.nn import TensorDictModule, TensorDictModuleBase, TensorDictParams from torch import nn from torch.nn import Parameter @@ -87,7 +81,7 @@ class _AcceptedKeys: pass default_value_estimator: ValueEstimators = None - SEP = "_sep_" + SEP = "." TARGET_NET_WARNING = ( "No target network updater has been associated " "with this loss module, but target parameters have been found. " @@ -178,7 +172,7 @@ def convert_to_functional( expand_dim: Optional[int] = None, create_target_params: bool = False, compare_against: Optional[List[Parameter]] = None, - funs_to_decorate=None, + **kwargs, ) -> None: """Converts a module to functional to be used in the loss. @@ -191,7 +185,7 @@ def convert_to_functional( >>> module(tensordict, params=params) ``params`` is a :class:`tensordict.TensorDict` instance with parameters - stuctured as the output of :func:`tensordict.nn.make_functional` + stuctured as the output of :func:`tensordict.TensorDict.from_module` is. module_name (str): name where the module will be found. The parameters of the module will be found under ``loss_module._params`` @@ -223,45 +217,27 @@ def convert_to_functional( the resulting parameters will be a detached version of the original parameters. If ``None``, the resulting parameters will carry gradients as expected. - funs_to_decorate (list of str, optional): if provided, the list of - methods of ``module`` to make functional, ie the list of - methods that will accept the ``params`` keyword argument. """ - if funs_to_decorate is None: - funs_to_decorate = ["forward"] + if kwargs.pop("funs_to_decorate", None) is not None: + warnings.warn( + "funs_to_decorate is without effect with the new objective API.", + category=DeprecationWarning, + ) + if kwargs: + raise TypeError(f"Unrecognised keyword arguments {list(kwargs.keys())}") # To make it robust to device casting, we must register list of # tensors as lazy calls to `getattr(self, name_of_tensor)`. # Otherwise, casting the module to a device will keep old references # to uncast tensors sep = self.SEP - params = make_functional(module, funs_to_decorate=funs_to_decorate) - # buffer_names = next(itertools.islice(zip(*module.named_buffers()), 1)) - buffer_names = [] - for key, value in params.items(True, True): - # we just consider all that is not param as a buffer, but if the module has been made - # functional and the params have been replaced this may break - if not isinstance(value, nn.Parameter): - key = sep.join(key) if not isinstance(key, str) else key - buffer_names.append(key) - functional_module = deepcopy(module) - repopulate_module(module, params) - - params_and_buffers = params - # we transform the buffers in params to make sure they follow the device - # as tensor = nn.Parameter(tensor) keeps its identity when moved to another device - - # separate params and buffers - params_and_buffers = TensorDictParams(params_and_buffers, no_convert=True) - # sanity check - for key in params_and_buffers.keys(True): + params = TensorDict.from_module(module, as_module=True) + + for key in params.keys(True): if sep in key: raise KeyError( f"The key {key} contains the '_sep_' pattern which is prohibited. Consider renaming the parameter / buffer." ) - params_and_buffers_flat = params_and_buffers.flatten_keys(sep) - buffers = params_and_buffers_flat.select(*buffer_names) - params = params_and_buffers_flat.exclude(*buffer_names) if compare_against is not None: compare_against = set(compare_against) else: @@ -273,6 +249,9 @@ def convert_to_functional( # For buffers, a cloned expansion (or equivalently a repeat) is returned. def _compare_and_expand(param): + if not isinstance(param, nn.Parameter): + buffer = param.expand(expand_dim, *param.shape).clone() + return buffer if param in compare_against: expanded_param = param.data.expand(expand_dim, *param.shape) # the expanded parameter must be sent to device when to() @@ -287,45 +266,40 @@ def _compare_and_expand(param): ) return p_out - params = params.apply( - _compare_and_expand, batch_size=[expand_dim, *params.shape] - ) - - buffers = buffers.apply( - lambda buffer: buffer.expand(expand_dim, *buffer.shape).clone(), - batch_size=[expand_dim, *buffers.shape], + params = TensorDictParams( + params.apply( + _compare_and_expand, batch_size=[expand_dim, *params.shape] + ), + no_convert=True, ) - params_and_buffers.update(params.unflatten_keys(sep)) - params_and_buffers.update(buffers.unflatten_keys(sep)) - params_and_buffers.batch_size = params.batch_size - - # self.params_to_map = params_to_map - param_name = module_name + "_params" prev_set_params = set(self.parameters()) # register parameters and buffers - for key, parameter in list(params_and_buffers.items(True, True)): + for key, parameter in list(params.items(True, True)): if parameter not in prev_set_params: pass elif compare_against is not None and parameter in compare_against: - params_and_buffers.set(key, parameter.data) + params.set(key, parameter.data) - setattr(self, param_name, params_and_buffers) + setattr(self, param_name, params) - # set the functional module - setattr(self, module_name, functional_module) + # set the functional module: we need to convert the params to non-differentiable params + # otherwise they will appear twice in parameters + p = TensorDict.from_module(module) + with params.detach().to("meta").to_module(module): + # avoid buffers and params being exposed + self.__dict__[module_name] = deepcopy(module) + assert (p == TensorDict.from_module(module)).all() name_params_target = "target_" + module_name if create_target_params: # if create_target_params: # we create a TensorDictParams to keep the target params as Buffer instances target_params = TensorDictParams( - params_and_buffers.apply( - _make_target_param(clone=create_target_params) - ), + params.apply(_make_target_param(clone=create_target_params)), no_convert=True, ) setattr(self, name_params_target + "_params", target_params) @@ -458,84 +432,6 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams else: raise NotImplementedError(f"Unknown value type {value_type}") - # def _apply(self, fn, recurse=True): - # """Modifies torch.nn.Module._apply to work with Buffer class.""" - # if recurse: - # for module in self.children(): - # module._apply(fn) - # - # def compute_should_use_set_data(tensor, tensor_applied): - # if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): - # # If the new tensor has compatible tensor type as the existing tensor, - # # the current behavior is to change the tensor in-place using `.data =`, - # # and the future behavior is to overwrite the existing tensor. However, - # # changing the current behavior is a BC-breaking change, and we want it - # # to happen in future releases. So for now we introduce the - # # `torch.__future__.get_overwrite_module_params_on_conversion()` - # # global flag to let the user control whether they want the future - # # behavior of overwriting the existing tensor or not. - # return not torch.__future__.get_overwrite_module_params_on_conversion() - # else: - # return False - # - # for key, param in self._parameters.items(): - # if param is None: - # continue - # # Tensors stored in modules are graph leaves, and we don't want to - # # track autograd history of `param_applied`, so we have to use - # # `with torch.no_grad():` - # with torch.no_grad(): - # param_applied = fn(param) - # should_use_set_data = compute_should_use_set_data(param, param_applied) - # if should_use_set_data: - # param.data = param_applied - # out_param = param - # else: - # assert isinstance(param, Parameter) - # assert param.is_leaf - # out_param = Parameter(param_applied, param.requires_grad) - # self._parameters[key] = out_param - # - # if param.grad is not None: - # with torch.no_grad(): - # grad_applied = fn(param.grad) - # should_use_set_data = compute_should_use_set_data(param.grad, grad_applied) - # if should_use_set_data: - # assert out_param.grad is not None - # out_param.grad.data = grad_applied - # else: - # assert param.grad.is_leaf - # out_param.grad = grad_applied.requires_grad_(param.grad.requires_grad) - # - # for key, buffer in self._buffers.items(): - # if buffer is None: - # continue - # # Tensors stored in modules are graph leaves, and we don't want to - # # track autograd history of `buffer_applied`, so we have to use - # # `with torch.no_grad():` - # with torch.no_grad(): - # buffer_applied = fn(buffer) - # should_use_set_data = compute_should_use_set_data(buffer, buffer_applied) - # if should_use_set_data: - # buffer.data = buffer_applied - # out_buffer = buffer - # else: - # assert isinstance(buffer, Buffer) - # assert buffer.is_leaf - # out_buffer = Buffer(buffer_applied, buffer.requires_grad) - # self._buffers[key] = out_buffer - # - # if buffer.grad is not None: - # with torch.no_grad(): - # grad_applied = fn(buffer.grad) - # should_use_set_data = compute_should_use_set_data(buffer.grad, grad_applied) - # if should_use_set_data: - # assert out_buffer.grad is not None - # out_buffer.grad.data = grad_applied - # else: - # assert buffer.grad.is_leaf - # out_buffer.grad = grad_applied.requires_grad_(buffer.grad.requires_grad) - return self diff --git a/torchrl/objectives/cql.py b/torchrl/objectives/cql.py index 9055e5464c6..0c8caa5a60b 100644 --- a/torchrl/objectives/cql.py +++ b/torchrl/objectives/cql.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. import math import warnings +from copy import deepcopy from dataclasses import dataclass from typing import Optional, Tuple, Union @@ -26,6 +27,7 @@ from torchrl.objectives.utils import ( _cache_values, _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, default_value_kwargs, distance_loss, ValueEstimators, @@ -33,18 +35,6 @@ from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - _has_functorch = True - err = "" -except ImportError as err: - _has_functorch = False - FUNCTORCH_ERROR = err - class CQLLoss(LossModule): """TorchRL implementation of the continuous CQL loss. @@ -266,8 +256,6 @@ def __init__( priority_key: str = None, ) -> None: self._out_keys = None - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERROR super().__init__() self._set_deprecated_ctor_keys(priority_key=priority_key) @@ -277,7 +265,6 @@ def __init__( actor_network, "actor_network", create_target_params=self.delay_actor, - funs_to_decorate=["forward", "get_dist"], ) # Q value @@ -348,8 +335,8 @@ def __init__( torch.nn.Parameter(torch.tensor(math.log(1.0), device=device)), ) - self._vmap_qvalue_networkN0 = vmap(self.qvalue_network, (None, 0)) - self._vmap_qvalue_network00 = vmap(self.qvalue_network) + self._vmap_qvalue_networkN0 = _vmap_func(self.qvalue_network, (None, 0)) + self._vmap_qvalue_network00 = _vmap_func(self.qvalue_network) @property def target_entropy(self): @@ -523,12 +510,11 @@ def _cached_detach_qvalue_params(self): return self.qvalue_network_params.detach() def _loss_actor(self, tensordict: TensorDictBase) -> Tensor: - with set_exploration_type(ExplorationType.RANDOM): - dist = self.actor_network.get_dist( - tensordict, - params=self.actor_network_params, - ) - a_reparm = dist.rsample() + with set_exploration_type( + ExplorationType.RANDOM + ), self.actor_network_params.to_module(self.actor_network): + dist = self.actor_network.get_dist(tensordict) + a_reparm = dist.rsample() log_prob = dist.log_prob(a_reparm) td_q = tensordict.select(*self.qvalue_network.in_keys) @@ -558,8 +544,10 @@ def _get_policy_actions(self, data, actor_params, num_actions=10): batch_size=batch_size, ) with torch.no_grad(): - with set_exploration_type(ExplorationType.RANDOM): - dist = self.actor_network.get_dist(tensordict, params=actor_params) + with set_exploration_type(ExplorationType.RANDOM), actor_params.to_module( + self.actor_network + ): + dist = self.actor_network.get_dist(tensordict) action = dist.rsample() tensordict.set(self.tensor_keys.action, action) sample_log_prob = dist.log_prob(action) @@ -575,11 +563,11 @@ def _get_value_v(self, tensordict, _alpha, actor_params, qval_params): tensordict = tensordict.clone(False) # get actions and log-probs with torch.no_grad(): - with set_exploration_type(ExplorationType.RANDOM): + with set_exploration_type(ExplorationType.RANDOM), actor_params.to_module( + self.actor_network + ): next_tensordict = tensordict.get("next").clone(False) - next_dist = self.actor_network.get_dist( - next_tensordict, params=actor_params - ) + next_dist = self.actor_network.get_dist(next_tensordict) next_action = next_dist.rsample() next_tensordict.set(self.tensor_keys.action, next_action) next_sample_log_prob = next_dist.log_prob(next_action) @@ -1003,7 +991,8 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams self.value_type = value_type # we will take care of computing the next value inside this module - value_net = self.value_network + value_net = deepcopy(self.value_network) + self.value_network_params.to_module(value_net, return_swap=False) hp = dict(default_value_kwargs(value_type)) hp.update(hyperparams) @@ -1054,10 +1043,8 @@ def value_loss( tensordict: TensorDictBase, ) -> Tuple[torch.Tensor, dict]: td_copy = tensordict.clone(False) - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) action = tensordict.get(self.tensor_keys.action) pred_val = td_copy.get(self.tensor_keys.action_value) @@ -1074,8 +1061,7 @@ def value_loss( # calculate target value with torch.no_grad(): target_value = self.value_estimator.value_estimate( - td_copy, - target_params=self._cached_detached_target_value_params, + td_copy, params=self._cached_detached_target_value_params ).squeeze(-1) with torch.no_grad(): diff --git a/torchrl/objectives/ddpg.py b/torchrl/objectives/ddpg.py index 1795f785716..7d94a5eb07b 100644 --- a/torchrl/objectives/ddpg.py +++ b/torchrl/objectives/ddpg.py @@ -11,7 +11,7 @@ from typing import Tuple import torch -from tensordict.nn import dispatch, make_functional, repopulate_module, TensorDictModule +from tensordict.nn import dispatch, TensorDictModule from tensordict.tensordict import TensorDict, TensorDictBase from tensordict.utils import NestedKey, unravel_key @@ -197,10 +197,10 @@ def __init__( self.delay_value = delay_value actor_critic = ActorCriticWrapper(actor_network, value_network) - params = make_functional(actor_critic) - self.actor_critic = deepcopy(actor_critic) - repopulate_module(actor_network, params["module", "0"]) - repopulate_module(value_network, params["module", "1"]) + params = TensorDict.from_module(actor_critic) + params_meta = params.detach().to("meta") + with params_meta.to_module(actor_critic): + self.actor_critic = deepcopy(actor_critic) self.convert_to_functional( actor_network, @@ -295,14 +295,10 @@ def loss_actor( td_copy = tensordict.select( *self.actor_in_keys, *self.value_exclusive_keys ).detach() - td_copy = self.actor_network( - td_copy, - params=self.actor_network_params, - ) - td_copy = self.value_network( - td_copy, - params=self._cached_detached_value_params, - ) + with self.actor_network_params.to_module(self.actor_network): + td_copy = self.actor_network(td_copy) + with self._cached_detached_value_params.to_module(self.value_network): + td_copy = self.value_network(td_copy) loss_actor = -td_copy.get(self.tensor_keys.state_action_value) metadata = {} return loss_actor.mean(), metadata @@ -313,10 +309,8 @@ def loss_value( ) -> Tuple[torch.Tensor, dict]: # value loss td_copy = tensordict.select(*self.value_network.in_keys).detach() - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) pred_val = td_copy.get(self.tensor_keys.state_action_value).squeeze(-1) target_value = self.value_estimator.value_estimate( diff --git a/torchrl/objectives/decision_transformer.py b/torchrl/objectives/decision_transformer.py index db3cf633aef..ba7e2d4ba3f 100644 --- a/torchrl/objectives/decision_transformer.py +++ b/torchrl/objectives/decision_transformer.py @@ -88,7 +88,6 @@ def __init__( actor_network, "actor_network", create_target_params=False, - funs_to_decorate=["forward", "get_dist"], ) try: device = next(self.parameters()).device @@ -208,9 +207,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: if target_actions.requires_grad: raise RuntimeError("target action cannot be part of a graph.") - action_dist = self.actor_network.get_dist( - tensordict, params=self.actor_network_params - ) + with self.actor_network_params.to_module(self.actor_network): + action_dist = self.actor_network.get_dist(tensordict) log_likelihood = action_dist.log_prob(target_actions).mean() entropy = self.get_entropy_bonus(action_dist).mean() diff --git a/torchrl/objectives/deprecated.py b/torchrl/objectives/deprecated.py index 696efbdc650..947a7574967 100644 --- a/torchrl/objectives/deprecated.py +++ b/torchrl/objectives/deprecated.py @@ -21,21 +21,13 @@ from torchrl.envs.utils import ExplorationType, set_exploration_type, step_mdp from torchrl.objectives import default_value_kwargs, distance_loss, ValueEstimators from torchrl.objectives.common import LossModule -from torchrl.objectives.utils import _cache_values, _GAMMA_LMBDA_DEPREC_WARNING +from torchrl.objectives.utils import ( + _cache_values, + _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, +) from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - FUNCTORCH_ERR = "" - _has_functorch = True -except ImportError as err: - FUNCTORCH_ERR = str(err) - _has_functorch = False - class REDQLoss_deprecated(LossModule): """REDQ Loss module. @@ -149,8 +141,6 @@ def __init__( ): self._in_keys = None self._out_keys = None - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERR super().__init__() self._set_deprecated_ctor_keys(priority_key=priority_key) @@ -208,7 +198,7 @@ def __init__( self.target_entropy_buffer = None self.gSDE = gSDE - self._vmap_qvalue_networkN0 = vmap(self.qvalue_network, (None, 0)) + self._vmap_qvalue_networkN0 = _vmap_func(self.qvalue_network, (None, 0)) if gamma is not None: warnings.warn(_GAMMA_LMBDA_DEPREC_WARNING, category=DeprecationWarning) @@ -328,11 +318,10 @@ def _cached_detach_qvalue_network_params(self): def _actor_loss(self, tensordict: TensorDictBase) -> Tuple[Tensor, Tensor]: obs_keys = self.actor_network.in_keys tensordict_clone = tensordict.select(*obs_keys) - with set_exploration_type(ExplorationType.RANDOM): - self.actor_network( - tensordict_clone, - params=self.actor_network_params, - ) + with set_exploration_type( + ExplorationType.RANDOM + ), self.actor_network_params.to_module(self.actor_network): + self.actor_network(tensordict_clone) tensordict_expand = self._vmap_qvalue_networkN0( tensordict_clone.select(*self.qvalue_network.in_keys), @@ -364,11 +353,10 @@ def _qvalue_loss(self, tensordict: TensorDictBase) -> Tensor: ) # next_observation -> # observation # select pseudo-action - with set_exploration_type(ExplorationType.RANDOM): - self.actor_network( - next_td, - params=self.target_actor_network_params, - ) + with set_exploration_type( + ExplorationType.RANDOM + ), self.target_actor_network_params.to_module(self.actor_network): + self.actor_network(next_td) sample_log_prob = next_td.get("sample_log_prob") # get q-values next_td = self._vmap_qvalue_networkN0( diff --git a/torchrl/objectives/dqn.py b/torchrl/objectives/dqn.py index 225d5d553bd..07ffd7f463c 100644 --- a/torchrl/objectives/dqn.py +++ b/torchrl/objectives/dqn.py @@ -289,10 +289,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDict: """ td_copy = tensordict.clone(False) - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) action = tensordict.get(self.tensor_keys.action) pred_val = td_copy.get(self.tensor_keys.action_value) @@ -463,10 +461,10 @@ def forward(self, input_tensordict: TensorDictBase) -> TensorDict: # Calculate current state probabilities (online network noise already # sampled) td_clone = tensordict.clone() - self.value_network( - td_clone, - params=self.value_network_params, - ) # Log probabilities log p(s_t, ·; θonline) + with self.value_network_params.to_module(self.value_network): + self.value_network( + td_clone, + ) # Log probabilities log p(s_t, ·; θonline) action_log_softmax = td_clone.get(self.tensor_keys.action_value) if self.action_space == "categorical": @@ -476,24 +474,18 @@ def forward(self, input_tensordict: TensorDictBase) -> TensorDict: action, action_log_softmax, batch_size, atoms ) - with torch.no_grad(): + with torch.no_grad(), self.value_network_params.to_module(self.value_network): # Calculate nth next state probabilities next_td = step_mdp(tensordict) - self.value_network( - next_td, - params=self.value_network_params, - ) # Probabilities p(s_t+n, ·; θonline) + self.value_network(next_td) # Probabilities p(s_t+n, ·; θonline) next_td_action = next_td.get(self.tensor_keys.action) if self.action_space == "categorical": argmax_indices_ns = next_td_action.squeeze(-1) else: argmax_indices_ns = next_td_action.argmax(-1) # one-hot encoding - - self.value_network( - next_td, - params=self.target_value_network_params, - ) # Probabilities p(s_t+n, ·; θtarget) + with self.target_value_network_params.to_module(self.value_network): + self.value_network(next_td) # Probabilities p(s_t+n, ·; θtarget) pns = next_td.get(self.tensor_keys.action_value).exp() # Double-Q probabilities # p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget) diff --git a/torchrl/objectives/iql.py b/torchrl/objectives/iql.py index 966550e21e5..e64dfa11f2d 100644 --- a/torchrl/objectives/iql.py +++ b/torchrl/objectives/iql.py @@ -14,26 +14,16 @@ from torchrl.modules import ProbabilisticActor from torchrl.objectives.common import LossModule + from torchrl.objectives.utils import ( _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, default_value_kwargs, distance_loss, ValueEstimators, ) from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - _has_functorch = True - err = "" -except ImportError as err: - _has_functorch = False - FUNCTORCH_ERROR = err - class IQLLoss(LossModule): r"""TorchRL implementation of the IQL loss. @@ -248,8 +238,6 @@ def __init__( ) -> None: self._in_keys = None self._out_keys = None - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERROR super().__init__() self._set_deprecated_ctor_keys(priority=priority_key) @@ -262,7 +250,6 @@ def __init__( actor_network, "actor_network", create_target_params=False, - funs_to_decorate=["forward", "get_dist"], ) if separate_losses: # we want to make sure there are no duplicates in the params: the @@ -299,7 +286,7 @@ def __init__( if gamma is not None: warnings.warn(_GAMMA_LMBDA_DEPREC_WARNING, category=DeprecationWarning) self.gamma = gamma - self._vmap_qvalue_networkN0 = vmap(self.qvalue_network, (None, 0)) + self._vmap_qvalue_networkN0 = _vmap_func(self.qvalue_network, (None, 0)) @property def device(self) -> torch.device: @@ -387,10 +374,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: def _loss_actor(self, tensordict: TensorDictBase) -> Tensor: # KL loss - dist = self.actor_network.get_dist( - tensordict, - params=self.actor_network_params, - ) + with self.actor_network_params.to_module(self.actor_network): + dist = self.actor_network.get_dist(tensordict) log_prob = dist.log_prob(tensordict[self.tensor_keys.action]) @@ -406,10 +391,8 @@ def _loss_actor(self, tensordict: TensorDictBase) -> Tensor: # state value with torch.no_grad(): td_copy = tensordict.select(*self.value_network.in_keys).detach() - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) value = td_copy.get(self.tensor_keys.value).squeeze( -1 ) # assert has no gradient @@ -428,10 +411,8 @@ def _loss_value(self, tensordict: TensorDictBase) -> Tuple[Tensor, Tensor]: min_q = td_q.get(self.tensor_keys.state_action_value).min(0)[0].squeeze(-1) # state value td_copy = tensordict.select(*self.value_network.in_keys) - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) value = td_copy.get(self.tensor_keys.value).squeeze(-1) value_loss = self.loss_value_diff(min_q - value, self.expectile).mean() return value_loss diff --git a/torchrl/objectives/multiagent/qmixer.py b/torchrl/objectives/multiagent/qmixer.py index 00106571744..61abab6216f 100644 --- a/torchrl/objectives/multiagent/qmixer.py +++ b/torchrl/objectives/multiagent/qmixer.py @@ -12,7 +12,7 @@ import torch from tensordict import TensorDict, TensorDictBase -from tensordict.nn import dispatch, make_functional, repopulate_module, TensorDictModule +from tensordict.nn import dispatch, TensorDictModule from tensordict.utils import NestedKey from torch import nn @@ -212,10 +212,9 @@ def __init__( ) global_value_network = SafeSequential(local_value_network, mixer_network) - params = make_functional(global_value_network) - self.global_value_network = deepcopy(global_value_network) - repopulate_module(local_value_network, params["module", "0"]) - repopulate_module(mixer_network, params["module", "1"]) + params = TensorDict.from_module(global_value_network) + with params.detach().to("meta").to_module(global_value_network): + self.global_value_network = deepcopy(global_value_network) self.convert_to_functional( local_value_network, diff --git a/torchrl/objectives/ppo.py b/torchrl/objectives/ppo.py index 2a2cc2fdb6e..11b5fef2ae7 100644 --- a/torchrl/objectives/ppo.py +++ b/torchrl/objectives/ppo.py @@ -277,9 +277,7 @@ def __init__( self._in_keys = None self._out_keys = None super().__init__() - self.convert_to_functional( - actor, "actor", funs_to_decorate=["forward", "get_dist"] - ) + self.convert_to_functional(actor, "actor") if separate_losses: # we want to make sure there are no duplicates in the params: the # params of critic must be refs to actor if they're shared @@ -380,7 +378,8 @@ def _log_weight( f"tensordict stored {self.tensor_keys.action} requires grad." ) - dist = self.actor.get_dist(tensordict, params=self.actor_params) + with self.actor_params.to_module(self.actor): + dist = self.actor.get_dist(tensordict) log_prob = dist.log_prob(action) prev_log_prob = tensordict.get(self.tensor_keys.sample_log_prob) @@ -406,10 +405,8 @@ def loss_critic(self, tensordict: TensorDictBase) -> torch.Tensor: f"can be used for the value loss." ) - state_value_td = self.critic( - tensordict, - params=self.critic_params, - ) + with self.critic_params.to_module(self.critic): + state_value_td = self.critic(tensordict) try: state_value = state_value_td.get(self.tensor_keys.value) @@ -863,7 +860,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDict: neg_loss = log_weight.exp() * advantage previous_dist = self.actor.build_dist_from_params(tensordict) - current_dist = self.actor.get_dist(tensordict, params=self.actor_params) + with self.actor_params.to_module(self.actor): + current_dist = self.actor.get_dist(tensordict) try: kl = torch.distributions.kl.kl_divergence(previous_dist, current_dist) except NotImplementedError: diff --git a/torchrl/objectives/redq.py b/torchrl/objectives/redq.py index dd64a4bc033..347becc24ae 100644 --- a/torchrl/objectives/redq.py +++ b/torchrl/objectives/redq.py @@ -18,27 +18,17 @@ from torchrl.data import CompositeSpec from torchrl.envs.utils import ExplorationType, set_exploration_type, step_mdp from torchrl.objectives.common import LossModule + from torchrl.objectives.utils import ( _cache_values, _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, default_value_kwargs, distance_loss, ValueEstimators, ) from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - FUNCTORCH_ERR = "" - _has_functorch = True -except ImportError as err: - FUNCTORCH_ERR = str(err) - _has_functorch = False - class REDQLoss(LossModule): """REDQ Loss module. @@ -265,8 +255,6 @@ def __init__( priority_key: str = None, separate_losses: bool = False, ): - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERR super().__init__() self._in_keys = None @@ -276,7 +264,6 @@ def __init__( actor_network, "actor_network", create_target_params=self.delay_actor, - funs_to_decorate=["forward", "get_dist_params"], ) # let's make sure that actor_network has `return_log_prob` set to True @@ -331,8 +318,8 @@ def __init__( warnings.warn(_GAMMA_LMBDA_DEPREC_WARNING, category=DeprecationWarning) self.gamma = gamma - self._vmap_qvalue_network00 = vmap(self.qvalue_network) - self._vmap_getdist = vmap(self.actor_network.get_dist_params) + self._vmap_qvalue_network00 = _vmap_func(self.qvalue_network) + self._vmap_getdist = _vmap_func(self.actor_network, func="get_dist_params") @property def target_entropy(self): diff --git a/torchrl/objectives/reinforce.py b/torchrl/objectives/reinforce.py index 1ae9c1e8252..832af829c64 100644 --- a/torchrl/objectives/reinforce.py +++ b/torchrl/objectives/reinforce.py @@ -297,10 +297,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: advantage = tensordict.get(self.tensor_keys.advantage) # compute log-prob - tensordict = self.actor_network( - tensordict, - params=self.actor_network_params, - ) + with self.actor_network_params.to_module(self.actor_network): + tensordict = self.actor_network(tensordict) log_prob = tensordict.get(self.tensor_keys.sample_log_prob) if log_prob.shape == advantage.shape[:-1]: @@ -317,10 +315,8 @@ def loss_critic(self, tensordict: TensorDictBase) -> torch.Tensor: try: target_return = tensordict.get(self.tensor_keys.value_target) tensordict_select = tensordict.select(*self.critic.in_keys) - state_value = self.critic( - tensordict_select, - params=self.critic_params, - ).get(self.tensor_keys.value) + with self.critic_params.to_module(self.critic): + state_value = self.critic(tensordict_select).get(self.tensor_keys.value) loss_value = distance_loss( target_return, state_value, diff --git a/torchrl/objectives/sac.py b/torchrl/objectives/sac.py index 076df1c54a4..0752acb3be8 100644 --- a/torchrl/objectives/sac.py +++ b/torchrl/objectives/sac.py @@ -12,7 +12,7 @@ import numpy as np import torch -from tensordict.nn import dispatch, make_functional, TensorDictModule +from tensordict.nn import dispatch, TensorDictModule from tensordict.tensordict import TensorDict, TensorDictBase from tensordict.utils import NestedKey from torch import Tensor @@ -22,27 +22,17 @@ from torchrl.modules import ProbabilisticActor from torchrl.modules.tensordict_module.actors import ActorCriticWrapper from torchrl.objectives.common import LossModule + from torchrl.objectives.utils import ( _cache_values, _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, default_value_kwargs, distance_loss, ValueEstimators, ) from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - _has_functorch = True - err = "" -except ImportError as err: - _has_functorch = False - FUNCTORCH_ERROR = err - def _delezify(func): @wraps(func) @@ -293,8 +283,6 @@ def __init__( ) -> None: self._in_keys = None self._out_keys = None - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERROR super().__init__() self._set_deprecated_ctor_keys(priority_key=priority_key) @@ -385,13 +373,12 @@ def __init__( self.actor_critic = ActorCriticWrapper( self.actor_network, self.value_network ) - make_functional(self.actor_critic) if gamma is not None: warnings.warn(_GAMMA_LMBDA_DEPREC_WARNING, category=DeprecationWarning) self.gamma = gamma - self._vmap_qnetworkN0 = vmap(self.qvalue_network, (None, 0)) + self._vmap_qnetworkN0 = _vmap_func(self.qvalue_network, (None, 0)) if self._version == 1: - self._vmap_qnetwork00 = vmap(qvalue_network) + self._vmap_qnetwork00 = _vmap_func(qvalue_network) @property def target_entropy_buffer(self): @@ -589,11 +576,10 @@ def _cached_detached_qvalue_params(self): def _actor_loss( self, tensordict: TensorDictBase ) -> Tuple[Tensor, Dict[str, Tensor]]: - with set_exploration_type(ExplorationType.RANDOM): - dist = self.actor_network.get_dist( - tensordict, - params=self.actor_network_params, - ) + with set_exploration_type( + ExplorationType.RANDOM + ), self.actor_network_params.to_module(self.actor_network): + dist = self.actor_network.get_dist(tensordict) a_reparm = dist.rsample() log_prob = dist.log_prob(a_reparm) @@ -680,11 +666,11 @@ def _compute_target_v2(self, tensordict) -> Tensor: tensordict = tensordict.clone(False) # get actions and log-probs with torch.no_grad(): - with set_exploration_type(ExplorationType.RANDOM): + with set_exploration_type( + ExplorationType.RANDOM + ), self.actor_network_params.to_module(self.actor_network): next_tensordict = tensordict.get("next").clone(False) - next_dist = self.actor_network.get_dist( - next_tensordict, params=self.actor_network_params - ) + next_dist = self.actor_network.get_dist(next_tensordict) next_action = next_dist.rsample() next_tensordict.set(self.tensor_keys.action, next_action) next_sample_log_prob = next_dist.log_prob(next_action) @@ -736,16 +722,11 @@ def _value_loss( ) -> Tuple[Tensor, Dict[str, Tensor]]: # value loss td_copy = tensordict.select(*self.value_network.in_keys).detach() - self.value_network( - td_copy, - params=self.value_network_params, - ) + with self.value_network_params.to_module(self.value_network): + self.value_network(td_copy) pred_val = td_copy.get(self.tensor_keys.value).squeeze(-1) - - action_dist = self.actor_network.get_dist( - td_copy, - params=self.target_actor_network_params, - ) # resample an action + with self.target_actor_network_params.to_module(self.actor_network): + action_dist = self.actor_network.get_dist(td_copy) # resample an action action = action_dist.rsample() td_copy.set(self.tensor_keys.action, action, inplace=False) @@ -991,8 +972,6 @@ def __init__( separate_losses: bool = False, ): self._in_keys = None - if not _has_functorch: - raise ImportError("Failed to import functorch.") from FUNCTORCH_ERROR super().__init__() self._set_deprecated_ctor_keys(priority_key=priority_key) @@ -1070,7 +1049,7 @@ def __init__( self.register_buffer( "target_entropy", torch.tensor(target_entropy, device=device) ) - self._vmap_qnetworkN0 = vmap(self.qvalue_network, (None, 0)) + self._vmap_qnetworkN0 = _vmap_func(self.qvalue_network, (None, 0)) def _forward_value_estimator_keys(self, **kwargs) -> None: if self._value_estimator is not None: @@ -1154,9 +1133,8 @@ def _compute_target(self, tensordict) -> Tensor: next_tensordict = tensordict.get("next").clone(False) # get probs and log probs for actions computed from "next" - next_dist = self.actor_network.get_dist( - next_tensordict, params=self.actor_network_params - ) + with self.actor_network_params.to_module(self.actor_network): + next_dist = self.actor_network.get_dist(next_tensordict) next_prob = next_dist.probs next_log_prob = torch.log(torch.where(next_prob == 0, 1e-8, next_prob)) @@ -1221,10 +1199,8 @@ def _actor_loss( self, tensordict: TensorDictBase ) -> Tuple[Tensor, Dict[str, Tensor]]: # get probs and log probs for actions - dist = self.actor_network.get_dist( - tensordict, - params=self.actor_network_params, - ) + with self.actor_network_params.to_module(self.actor_network): + dist = self.actor_network.get_dist(tensordict) prob = dist.probs log_prob = torch.log(torch.where(prob == 0, 1e-8, prob)) diff --git a/torchrl/objectives/td3.py b/torchrl/objectives/td3.py index 9912c143ae6..082873a2358 100644 --- a/torchrl/objectives/td3.py +++ b/torchrl/objectives/td3.py @@ -15,27 +15,17 @@ from torchrl.envs.utils import step_mdp from torchrl.objectives.common import LossModule + from torchrl.objectives.utils import ( _cache_values, _GAMMA_LMBDA_DEPREC_WARNING, + _vmap_func, default_value_kwargs, distance_loss, ValueEstimators, ) from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator -try: - try: - from torch import vmap - except ImportError: - from functorch import vmap - - FUNCTORCH_ERR = "" - _has_functorch = True -except ImportError as err: - FUNCTORCH_ERR = str(err) - _has_functorch = False - class TD3Loss(LossModule): """TD3 Loss module. @@ -229,10 +219,6 @@ def __init__( priority_key: str = None, separate_losses: bool = False, ) -> None: - if not _has_functorch: - raise ImportError( - f"Failed to import functorch with error message:\n{FUNCTORCH_ERR}" - ) super().__init__() self._in_keys = None @@ -310,8 +296,8 @@ def __init__( if gamma is not None: warnings.warn(_GAMMA_LMBDA_DEPREC_WARNING, category=DeprecationWarning) self.gamma = gamma - self._vmap_qvalue_network00 = vmap(self.qvalue_network) - self._vmap_actor_network00 = vmap(self.actor_network) + self._vmap_qvalue_network00 = _vmap_func(self.qvalue_network) + self._vmap_actor_network00 = _vmap_func(self.actor_network) def _forward_value_estimator_keys(self, **kwargs) -> None: if self._value_estimator is not None: @@ -359,9 +345,8 @@ def _cached_stack_actor_params(self): def actor_loss(self, tensordict): tensordict_actor_grad = tensordict.select(*self.actor_network.in_keys) - tensordict_actor_grad = self.actor_network( - tensordict_actor_grad, self.actor_network_params - ) + with self.actor_network_params.to_module(self.actor_network): + tensordict_actor_grad = self.actor_network(tensordict_actor_grad) actor_loss_td = tensordict_actor_grad.select( *self.qvalue_network.in_keys ).expand( @@ -395,9 +380,8 @@ def value_loss(self, tensordict): next_td_actor = step_mdp(tensordict).select( *self.actor_network.in_keys ) # next_observation -> - next_td_actor = self.actor_network( - next_td_actor, self.target_actor_network_params - ) + with self.target_actor_network_params.to_module(self.actor_network): + next_td_actor = self.actor_network(next_td_actor) next_action = (next_td_actor.get(self.tensor_keys.action) + noise).clamp( self.min_action, self.max_action ) diff --git a/torchrl/objectives/utils.py b/torchrl/objectives/utils.py index b8ec5ec7c32..1f1fc04e58d 100644 --- a/torchrl/objectives/utils.py +++ b/torchrl/objectives/utils.py @@ -10,10 +10,17 @@ import torch from tensordict.nn import TensorDictModule -from tensordict.tensordict import is_tensor_collection, TensorDict, TensorDictBase +from tensordict.tensordict import TensorDict, TensorDictBase from torch import nn, Tensor from torch.nn import functional as F +try: + from torch import vmap +except ImportError as err: + try: + from functorch import vmap + except ImportError as err_ft: + raise err_ft from err from torchrl.envs.utils import step_mdp _GAMMA_LMBDA_DEPREC_WARNING = ( @@ -356,18 +363,13 @@ class hold_out_net(_context_manager): def __init__(self, network: nn.Module) -> None: self.network = network - try: - self.p_example = next(network.parameters()) - except (AttributeError, StopIteration): - self.p_example = torch.tensor([]) - self._prev_state = [] def __enter__(self) -> None: - self._prev_state.append(self.p_example.requires_grad) - self.network.requires_grad_(False) + self.params = TensorDict.from_module(self.network) + self.params.detach().to_module(self.network, return_swap=False) def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.network.requires_grad_(self._prev_state.pop()) + self.params.to_module(self.network, return_swap=False) class hold_out_params(_context_manager): @@ -460,9 +462,23 @@ def new_fun(self, netname=None): out = fun(self, netname) else: out = fun(self) - if is_tensor_collection(out): - out.lock_() + # TODO: decide what to do with locked tds in functional calls + # if is_tensor_collection(out): + # out.lock_() _cache[attr_name] = out return out return new_fun + + +def _vmap_func(module, *args, func=None, **kwargs): + def decorated_module(*module_args_params): + params = module_args_params[-1] + module_args = module_args_params[:-1] + with params.to_module(module): + if func is None: + return module(*module_args) + else: + return getattr(module, func)(*module_args) + + return vmap(decorated_module, *args, **kwargs) # noqa: TOR101 diff --git a/torchrl/objectives/value/advantages.py b/torchrl/objectives/value/advantages.py index 42ba404c05d..fee53b5f4d4 100644 --- a/torchrl/objectives/value/advantages.py +++ b/torchrl/objectives/value/advantages.py @@ -7,6 +7,7 @@ import abc import functools import warnings +from contextlib import nullcontext from dataclasses import asdict, dataclass from functools import wraps from typing import Callable, List, Optional, Union @@ -26,7 +27,7 @@ from torchrl._utils import RL_WARNINGS from torchrl.envs.utils import step_mdp -from torchrl.objectives.utils import hold_out_net +from torchrl.objectives.utils import _vmap_func, hold_out_net from torchrl.objectives.value.functional import ( generalized_advantage_estimate, td0_return_estimate, @@ -121,7 +122,8 @@ def _call_value_nets( "the value at t and t+1 cannot be retrieved in a single call without recurring to vmap when both params and next params are passed." ) if params is not None: - value_est = value_net(data_in, params).get(value_key) + with params.to_module(value_net): + value_est = value_net(data_in).get(value_key) else: value_est = value_net(data_in).get(value_key) value, value_ = value_est[idx], value_est[idx_] @@ -138,8 +140,8 @@ def _call_value_nets( "params and next_params must be either both provided or not." ) elif params is not None: - params_stack = torch.stack([params, next_params], 0) - data_out = vmap(value_net, (0, 0))(data_in, params_stack) + params_stack = torch.stack([params, next_params], 0).contiguous() + data_out = _vmap_func(value_net, (0, 0))(data_in, params_stack) else: data_out = vmap(value_net, (0,))(data_in) value_est = data_out.get(value_key) @@ -425,10 +427,10 @@ def is_stateless(self): def _next_value(self, tensordict, target_params, kwargs): step_td = step_mdp(tensordict, keep_other=False) if self.value_network is not None: - if target_params is not None: - kwargs["params"] = target_params - with hold_out_net(self.value_network): - self.value_network(step_td, **kwargs) + with hold_out_net( + self.value_network + ) if target_params is None else target_params.to_module(self.value_network): + self.value_network(step_td) next_value = step_td.get(self.tensor_keys.value) return next_value @@ -582,7 +584,9 @@ def forward( params = params.detach() if target_params is None: target_params = params.clone(False) - with hold_out_net(self.value_network): + with hold_out_net(self.value_network) if ( + params is None and target_params is None + ) else nullcontext(): # we may still need to pass gradient, but we don't want to assign grads to # value net params value, next_value = _call_value_nets( @@ -783,7 +787,9 @@ def forward( params = params.detach() if target_params is None: target_params = params.clone(False) - with hold_out_net(self.value_network): + with hold_out_net(self.value_network) if ( + params is None and target_params is None + ) else nullcontext(): # we may still need to pass gradient, but we don't want to assign grads to # value net params value, next_value = _call_value_nets( @@ -994,7 +1000,9 @@ def forward( params = params.detach() if target_params is None: target_params = params.clone(False) - with hold_out_net(self.value_network): + with hold_out_net(self.value_network) if ( + params is None and target_params is None + ) else nullcontext(): # we may still need to pass gradient, but we don't want to assign grads to # value net params value, next_value = _call_value_nets( @@ -1240,7 +1248,9 @@ def forward( params = params.detach() if target_params is None: target_params = params.clone(False) - with hold_out_net(self.value_network): + with hold_out_net(self.value_network) if ( + params is None and target_params is None + ) else nullcontext(): # we may still need to pass gradient, but we don't want to assign grads to # value net params value, next_value = _call_value_nets( @@ -1320,7 +1330,9 @@ def value_estimate( params = params.detach() if target_params is None: target_params = params.clone(False) - with hold_out_net(self.value_network): + with hold_out_net(self.value_network) if ( + params is None and target_params is None + ) else nullcontext(): # we may still need to pass gradient, but we don't want to assign grads to # value net params value, next_value = _call_value_nets( From 0f9394343085e705451950dbe4589322c0fbb308 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Mon, 27 Nov 2023 11:43:53 +0000 Subject: [PATCH 15/21] [CI] Fix CI (#1711) --- test/assets/generate.py | 12 +++++-- test/assets/tldr_batch.zip | Bin 308163 -> 3114 bytes test/test_cost.py | 36 +++++++++++++-------- torchrl/data/rlhf/dataset.py | 2 +- torchrl/objectives/common.py | 17 ++++++++-- torchrl/objectives/sac.py | 2 +- torchrl/objectives/value/advantages.py | 33 +++++++------------ tutorials/sphinx-tutorials/rb_tutorial.py | 12 ++++--- tutorials/sphinx-tutorials/torchrl_envs.py | 1 + 9 files changed, 66 insertions(+), 49 deletions(-) diff --git a/test/assets/generate.py b/test/assets/generate.py index 45006e57a84..deb47f95999 100644 --- a/test/assets/generate.py +++ b/test/assets/generate.py @@ -4,6 +4,12 @@ # LICENSE file in the root directory of this source tree. """Script used to generate the mini datasets.""" +import multiprocessing as mp + +try: + mp.set_start_method("spawn") +except Exception: + pass from tempfile import TemporaryDirectory from datasets import Dataset, DatasetDict, load_dataset @@ -36,12 +42,13 @@ def get_minibatch(): batch_size=16, block_size=33, tensorclass_type=PromptData, - dataset_name="CarperAI/openai_summarize_tldr", + dataset_name="../datasets_mini/openai_summarize_tldr", device="cpu", + num_workers=2, infinite=False, prefetch=0, split="train", - from_disk=False, + from_disk=True, root_dir=tmpdir, ) for data in dl: @@ -51,5 +58,4 @@ def get_minibatch(): if __name__ == "__main__": - # generate_small_dataset() get_minibatch() diff --git a/test/assets/tldr_batch.zip b/test/assets/tldr_batch.zip index 6293560f5808ab77a19ba0db2d70cc31e10564c3..252b9e3c9995c333f42c9db3d91698fc3b24fc25 100644 GIT binary patch literal 3114 zcmWIWW@Zs#0D;vG)!|?Ul;CDiU?|B+DT+@@EJ@DL4-MgEU_a!WlDZ~5B^5-MR&X;g zvV3J^U;rBs05*t&fdj>$1_1`3F(?KlmXxICm1O4U#pfm#XY1vr=H@0Az)c64jK%b~ zM;8Ju;$d3w^Z#Y;dkM`>>ymD7%e}qL_7`J-H#7HJmOtf{<-knOymDEEscxM z>3+MZUp;Z-qJtHs7iJu+%AR64_2V4G@AvsF+;j5ckAr`X)*gEovCsT|#GxNnRdpc}-9PVzJPx>$e5TiM znz8-W9XW^BRlbq#F*Y;m(>30A^nT9#@cgXh;rA1ouAdW-QT3~Q!TX@X^6~%P!v_~! zU)TBP>d%)lc^l$R|KWPVXLxQ_Vo%bXX%_$fnWgW4Qf)E+@yBD=-vrP5dd|gvli?kU zHLuOuX0yM&5h?$4YrSUK%RB5RrceHHvE|OshH$@+M{@5=f14=x|IyN~33JM8cYRve z@l*VW^YfdBDi+yj%t)C3%R}$q^^I|d_dUPT`f1Uw>k(lYJm!4t&!n_FvpcNj>Bw=1 zOI=mJDn8HhG0zX)RsZikcR2o`-1M{m2mjIuhrVB_pSEG2;Lj2+m!A@OGFD|0(&iHC zpX>Ra3+9-aZI|3GbUb+T-cRm7?zQS98x2uP=GBNw)rq{8IP7=k}VH{F_=8Z*czO@4ip}0{^;b@g2V2qx#}y_s3s1`B$&p z{#0T{)0|J0;#X<|n(MySeiiyzXA*q5i(?Wge9p0oWW;P|w5@@Mre^2}Ezxwy#v(8^#zmDnrblm%g6_k^|H5~IzWG0l8 zB~Wv6PGVAO&S*9s&BmkIc<5wfV0A5znvDyJ@^cGH;)^o#Qc^33tdxTj8W>s`7lB&= zN!Ga@9H3SJ5Myrz_yHRcBB++;rj{h?WfkY=5o&_04m)*5TSK?=ypLwsrl1vJUY@$A zef(IBrcUDanpz?l?2>a>iw)n8w72 zph>gBC(WEMK7V$^9N(Rltgb(IvZ{V`omr{5Qk13laCV4aQcxD3=$?)|`L4*VKyR>f zY`V7maw*VDAjb!IGct)V<8H74m4U%qM-W9)^9^D!tO1AIq5?JGV4$JV6V+g{nsYF3 zz-&V<^+ByP7-(p`glZe9Z8m%sfhrLgXlR_nf)ZXNRVnDUASXmn8i#>~#$;gnM2Z!1 oQark4$jJkgwqamN;|)~Hh)(GN-mGjOQ`mqom5G62Jtv3<05Gp2jsO4v literal 308163 zcma&M2~^Ts_cvZydCQc#-maAjb+x=@N@eE6Pwx?0v-fB3bIv~ddpzIs_c5*I zzb*Ok*?hL|+&{$6ADtzem)yK@B@}V_(oLW1;In6RmuMTire6VSzCZqkN9inCx?Hq$ z$&!Bn{{9E(mnFaaH_)v$8kqkAx^(mA)u5Yx$RI@ErLX|Yz^j3QmxBL;uHW&0M4#=p z3pKp3cK?A%RT?nv)duIc`+8VHm;;OT7|8F({3j;ntg|d%cH)Uf^haq@ zk}GBMpS8E9Sza;BBR?ghcX(ZZvVY1&Z}lsksydo{<)i5&YpTjsBNOV$$XCIBwmUBr z+n-aae&~Ah{_JAJYWUn8k36eE8sd%lBzY=k+rRzzSH(N$tu%-C%l~3$BAok5WhxCw z{-?K$u4qk@!`8{UsVdDN**!?kC$YkkP2El<3(I9EQhvfTal690ct5)SpEBT_cNXgU zqe>IaOz|9lI15#uRaX4$T7Hh{*)B2uKl1+)r|>y!fJUonp8QD`tr?V~ec3YBkKxsn z(TSrkNU|VJZSGTvkBIfev?4C~Eq*Hc_`im^VZte{#@Ge(Ny=1=+5d9rAB4~q@62^zU3S&Yq$sXqt>1 zCzGb^{xbp!x?2t${Ev^CMw3q!Iot%(Cby@iZC*-;gBn!UtA-B*t^auRUxeUO=_l@g zP4B<9lSgg+c~er8e}Si?BSqq^nwetxlcrEV*-~=yTka>GwR5uh4rf`V4EC?D|7gv8 zFPswpT;HLXt$v@Us{Td$Va2X0c`=Uy|7ozv?%Cmd7T~9Mo?gykHy%_Yh2P$GrW&k? zG4HLDT~k$ul1DclO&3Oq7`b<+@$<{xud zI88ptPZmasynf6H`()TThOVZ2H;$sIXxtA!DRa49j7J)A%-ily1hXHW3FJ3KKT?A! z=;Qy4+k)j*QCH+G4Qmub@laMYJ$YvOBMqt9KUcje^Y2X`(LZ7;@^oJXTK#j8{L@YR z4(GEX<rqNXD>h_H5~h`ekD^?8Y4~C&nTY=$26mylzcN>l=Yu^{2w&UFQ(fo zhv=XI-uZ7j#hLpJPRW0S%s+(`SvJLW(LlkUoy%u|fAB`o5hC#xO|xCTuNw}eX*>H= z7$YM5tMWgd7AKukLjLV2_s>c7RLoDv3PSQ3H}-)a;?wexCh8Bbnh?M|l|sY$Ph5`% z9JO-e{m2#nyEJ(_f9vU@>T8$~C%?N>&>w!F2yfir8b6#vw~ESTN=^R2X1@*p#9O85 z*V&_+NHWXErcE(V-^%oUjOUtn4xhN0KT&&kD!1@QH>D13C{{Jm#8Z_>e91~S*7N}_j1x`>n{3ENW*%7P6pudl4|5BINcQs90 zvuV&g>i_SX0K@;@1o#C7hu=i_T?zYteE!c=qsm#G6>6$gi|G;5-KNR+&h5Ru;lZzt z_fUj@d#l}j9(_zdwd3q*k23)`j(vxG9{bb!jK|Sur|#%o-=6LMz5NuH$&-;0jPSg) zYKQ}cT_+_Ji{SxqhCrG-j+5TU{7KBt zR(cYu3L|^Lf(&tasCRWYvad-BDGQE5_ZO-AWTa;DHd3n8#lH_T(}TXz8I&CU$h+K+ zb`?^0wc5`m+Oq+a#g6SvK?PAMV+yj$N4D1@SpG8jxg_sB^h%VSw=S@F+|Qj;$5uQsZtx+-5L$*s)*Xg70l&EUnXebz%7_t zEenbJW^c}Nm$J805n}x@Tc!w1o3HR;X?)5cbe5c6KxHYg64h1p>;%`VmlP|-`m_nA zw=BUm3ViwAWJKIYV3)!_q-MPUs1kC>?wE>y+)oyiM={3Hdq&5ng%xlZWo)KRQ1R)!FkzDZsbDP<>Gv-|B zSJ%`LQi=%r0;SjHJ293gNae|4(hv0d3OfUSSIy9eR$MH^#{DLs@(8@n(V!LXXx6<- zF;rLA5RTa{o>y$|3 z%YZ)F(u{}`4R$4n3PN5vBF={I+j2E?tezN4A|0X5U+Ihfj@>gdNJV!p)8<6Ra`u|B ziO(ax?ijOYuT~Yn*-dg+E~dsc7+wUN_?kg`3m4kO@*bCrl#tZTcd~aAx42Y2=gbF> ztBY&mm50=|m(Ds3m*r#gFdDQEF; zJP#Jw_5RU|4?a&IH5Kej(jv9v5Z4ij)dG%TwW*(VAT_v&+hY&xWH%s)gW^e_kr#_c zXASwW#4A}D?7_i^vcbSeIieqX9@9hFasvS^^uza-`VEyKe^K`Po$u6Z5s!yap2z{{ z8!>cool_0Kc#xEda8W0c_ubszOBKybYcI)@Ll}-+c{~A35X-J?o=eMGfTH(GTAlI( z`m`4uFypELK=j^O2b8J?B2H*ER4OIp>oD#S;k5K2j3+nTJFC=rDu^XPy%oNAjLN6a zCDoEyb#~k;BJDkNFVO>YinH5EUU^0mJ6Pn+k)HVNeWNdb>qD_S@GsR~gpj$SSsDk< zuG0?uJISrna2ajA6e}eZ|`GnRJbLMxcB2PG3vtF8QBN-&k za7zhe0*)0Gl+NIoPalFg5!rD5EqKC--K35wv7}Yo)~G$l(;!N4kwV=jxBXiJStSMY zEaC=D7*O*H{%|8G&EKn=Xoc*KzJVp~fW8*%lC6|qljx$>8=x@VM%Zp7=LKEExTrIu%3$S*LO4k>#(%S((?WK;@@R|Y+B#5t|KxI(fJmCb-?X*r1rG|wjZqS zGSl<8pn9{?G5e*U>K8mEiwC!RDG=qqP+p?8GZAlN4v|_blODbD8EeHWXH!JD<57av zhOzRRUT&Hgdk5Fe3H8c(=jA9^MoniU!x`;7-*?n&F*T@E3UNdEQXESz;V)E-{JxVp z5=QkVa5HD`8}G9a+bVQbx-&ddQPRmLDHUmvEvUa>xxGr>J|N;}tqj7<1wcf!q}aw_1k-ZyM7u&QVjBJ;__Jzy0Q zMu7Q^F)YoyNUI}CF(xVayoJa$oD8{Y0MY=?^SvN_R-K}W%f=mtoouIKEBS^koppD z0VhHJE`~pH)&aH2d{Cvfo?RFu1f0JJr~Havby6tXx+lCKpY1_d4(@*LWf*X zOJRDlY56LpfxP08WOYkf*MIm&06+Q*pgfCB5-n@Z zcH*y5iMk=w{hZw{n61Jkvg`09yO@^w!}k%SL7z;<$Y01wu44Xz8!Ylj&<0~1NVtNz z(>+O$*E-Uc3y?Rzd%?i{#f}Kr#^=q%DP7=UhdOUNRgKAUgsu{YoqsQ$PSiPw*ma2z z3ZZYgIoUF~r24mo-(=f4dk%7YM=ohM?+d?^Svs!mMNA39zMYE{Cr~}rhnmF|q#8s1 za`Hi(fR1%(`AjKt={yYWE%cyk%RYH5=aQPdcS9L*paJ>BWYUH+^hDI0KDvCoT#O)z ziOMm;xOBQ`spOEZUQMB6ZCbHwTCEK?n%-hG<0Qoxq6fp9r4LgH8CZI=U6k44%Z(Fu z;dFmYuLaS9y9#oFTcr~Yn>(;Ef!f;#;H-eYRcqfNUP^JN0(vNZwJq-CB?=?)Q2(Zc zm{@P2F6$yAUT;Agp*?VchA~eaDaJ~6@pWJU!{aUi4pwrgj%o*Q_7>z%)Y8gbrUK8S zsynnBf5UJXdeh-cN7_3LLAKdGchi(vDdJqvfV1#X%*y=tnxJ*qAtVzgmNTkl9~a~{ zOiMWYo$@eM5!$U)eT@RMxvTbzHNl92UTO0g%oaie^n53yASvy34f7oo!mcyOe2(3Q z_j8}KBDU1s#Ip3@s}SBJWo4}th8eWC`kO-y?ThOgJ=JT{M#8*!1A?v(sR4>3wq3@? zatW(rt|AC+wyDhdumY<6;&1&q_OG2BNfFnvYWz(9`sB#t?4i@`RxksHT@lMCM6u}r z%opq=16!OKcW7B!S{#EjMp|LRe;_v|+wV};7`$>S$qJn2htyP$G5@IGfW9FuNZ5!} zpnwGtnK^7$v@pZGgBp2_yJ4=47S@n=?7Eey)1&h3 z7tADT^DLx9Yhu(V`B*P*<|hAi$~`#J(JVKi--`IksYFnvK-m3FleZ zm(MD$;-Krr(#i(k>c};g+%^>T5-T5yBrjGsQHCRz5|?1+1?lfX3L+K96&}RamZ*~s zU~9W;o$ZNRUcg5{rqMdG_58+&)AF|O3SC{ZxvkTIw4^Ca)__CfvBY@wrw)VeVx)mc zkDG|C1(ZY+rTPheGU4a$SmQVm3N&G$(#0IG+_=Y8N}>4>Z*?NRAeWj6a@yIr0ANa- zncH=Wf)zjztBoZN-Ud~8>Eq5F!BH_crJij;CAiVwCUTh-ly1N3wNa@3tF>4`MeDwEWK@08~S5Ohqvi^0%-(V~zj7H*e;Mrs)!#+$ z#XI?@eSmL9 zcqy^ar9VlJ^?)5L!|fOAaR*dv+I{b^IhB<}R>H55Uh|LAHfo~=!}HCmjFcWcEn|?W z-G?zMDGWoMQBjB!fa*W671j_?mPm^j{g;zFc14Fx9eadZ=(c1;{winDfiG{uG%#W% zi3wtVcg2-^j-f^e0*QKEcDGzN;Er^LBKI@J-O5IJyT|aeR%Tezb9Kh;kz9_0c1aK!qDZ2bfE~ZU;k~j}~lC7xRU2>OqGN z-bShs{!}NNYC=6ajQ#vgiTCQpC?Ac^J5NJaz|?$bcAEzbcbX!G3M(wXX%U@x`oYwY z^A+9`>s1?T{MI*B+$D{uVEJTgArjpZomBP(uN70Ig#J$d+5=b|YUShkR%xJ<`Eih& z+_3?Ie#|!EwWC*w-pvrKLFHwYj?}+>*d-D8T$Gs_=HMXi(E4`z0`Crc8f0^$Z;MHb z=rZE4YaOOHa>1q@6awJ-4t{kB$wxi}u2ML%@oZhmZ3@VN{@A)j?2q0z9YFgC7w#H} zlZdx~f|T>`R}ji{chc-!u!67It=+J0dW?nTGHA>r;z97PmTI-y-E zdmNqkMHzXGa~X4W5b*%_Mf;t}B5xFUTRo(z0TmCAn2H96?i&!YwK+n`!zKPTO0L~?I#rHR*g;PIY=PIMUN1xq+ zJK~3;e}l~}a@H?;DwrJCjW|X7LX3RXdCOwZ5`{@H<)G@qXR2@w)hn^n;|SV+xaAdK z!W!)B*<`|R$t5xJHD@xx&;P_eRkGYdzF0%zupz`OqxzL0WP03+>I3XLUZdmeC@pXg zZc*D#S81v!8aqcom_}eEG5lIdsP{O|Zw38s_H2j+p|GMohtz60OO zj(jx-P-JpOdI6ml-O)$XbX=L&3)B$eE5*BuBtS(ISY|VZNrvcM$}Pm#ZU;`}_13lW z)~2oT&fBwZy0x;0ogUYM3IW5OD?jwsFK+b8&zL{Ao)QeUgU!=Eyy$mYAF;?1{ zQNRI8S&W3zfB2~~7@iFOHqT3vUm+TtVa3M2E}nrarhSSMJzxikld@k@BZoZFnRDOj zMgYyQ*s^ijMs3O}YW+Fb-PM_GT)vSY^NGkuybM%hFkysV*&n%Y91`=+FF@#%ojH;K z^QPmmGYD-~v}cPStBhc8kfcxCsalGo*3T2W$s1^j@HPxs*u?Nxp>dLv{ZJ!)W1hdPl&*qmv zYC1CTd&wJXs>>cdX2|L!Et1LO2LF zMX#U&Cf)KgB(2qNa5UGH@Q@QjI_~0t@p$6|`B#NXl^mw`tLEU!x4^O+3X}E1zLoL* zutmZ7Ow`Sf?^p14;$}>-glMIxag4XPooF*Sk8TJ3fqVfhre>NW;cPFsVzP>4QvurZDX(DOU{h@V6qWY=QgnJ66d*p)K)vdgCqL~RSZA)4Y@XE z1EzZRLwk<1l;WlMO}M5Kkzx)rMj!FoH}Pla;BK8XL5b>%>~5grF8~m`L}F6Xsiqss zu-E%mjfPD2N(DV5j}tT1;nk;EI0%b9-|rFt{eAi%rHlno=Wj{whk%7&K0DQrE)YWC z6TVVo*gA-vCEzvKkvA00z`*8LAb){hr_tsR0=!qr?QA}jZzgcItOtECneKBB{WM#` zj~tym)nPHX z6sRr+eR^JdmQ#bN)s}ZWF{6)ozE_y#>Zs3k;uFQ@w7jmdT4GkrW?bRq{n>|~c`wy$ zThyA`=`7ka2Ivp?26g~kXW+4~e63S$&n$a5>SfJ?Lg2)+`%Mc@kxh-$`O1cg?=X5+!kXb%np zdJ|7~v&i>-&#^l~j3K!rII{Rz;kAIRY6mfh8|dC3lJ29x&U`!;5A&x`vebH6MTGUR z$6=4CwWgEC&p|3F*NH`vc6JhasERDncgZjP2DcntTrX3YSiajMXZUNYupEPQ4@ykX z{CFK5w*m-xfwIHimVw$02p|&rE`(|+O}$C^5Y~Hon&pOBFWtR?XZDI@a+^0%H7OoE z3b&4-a1UV&QFof+h7~)Hy^Dv#=U;}BdmkLTOI7~Gaxfja@!WoTc42)n+#0@J*xzU& zIkvg%W|wOV-~F7Z)n8m7t#=>Pr*FaTbx2^`qT^2!R(C$qE~T=}M$67oz8n2Q-@=!a z4A%y_X|rn(oEGIB;}xjR1#rkihpj{Fqgcag2YVtMOKjgR6gHY6cUZ=f@q9z=t;luK zTmHeOFq%l~rBk%VEz{+{ls^$~s%VoeIEhB0CgVo3XUPp@ZME{~I3D|qI}0fIRZ1zv zO8_sCl+%J5bzZ93@3{3d%Ouu4Cu&q1^hzN%?S|ZntAj$^sfjm&`^A=w5tpH@{1W0( z2iZ9JgX^a}XqPKW2b?7@FfF0 z!a4fLi+9bUTags@%eYYkz7iTU7HZr*5><`Rt+vJ*kv%KuR?FZEPrt$C^BB(eK-^?d zRliz116ri^nF~2{CHb85oG4a`A$A*kefSM0h)Lqq^gm@GNF8_nt&=oF`k}uGKIP;Y}du zEz;As1a~{$lnz^&FMMIVv7H3lg`(OF%fT45G(DWpN`5A-IRkT8Hf%(j;;XYf0w=FiQk12!e=(OM>ESz;J=2<`m9&CLGg&V zk(3K*v$Kh-k^X`)VD+C-3m>hESr}Yz@|dDSKW6xS5xaD~&A9Tak;2a{O;w9N+nLfw znForuq1QF^-lT*QyxMZQipo};_-d%~pccbAJC{Ci4IX$c`ZCLJRczs20?qu<5~0G( zlAqw-)${Iw7Zit^2wceT1W>rP&05X&bs)fP5lr)||2QtrZwM>G!Epn(m8`Wlj9tOHgNNI=g7YvleFroj%ATSx!a(n@kNOdSWg}?M+ zV#Nr@EnG|Vn_%EpYgmg)a|}<*|2V)F;=QEUNo`#)7hGp~P>pA~=ld$bsnZ8tA7zbw zIzACDioVdjlzd*CSl1GM+b!TIs9E-*_%F{6C5mVt7P*}C*zjC;Mf6s5)JXC4i#<42 zzn!=-ptMaCqVu)(a_@wo^K=85 z?qF?@S5C@eJl~4LzBqPubI@f+X%(YA<_g)tRv<<#0i8$c@;1^$Q|H!a+NEWhVY!ry zy!rd!{a{v+;Jr1g)-4tXZzF$XkH>_;V2bx2Mw&3xjrO63-VrNGnT*y$XX0EPD$mCm zgiaj&#w?kR44VHUNg|FIeqn-F>`sydbtb{fjPUayh?T(q1>Te0^YeWX`<+sAXddy= zr27QR_ETi%7bov^xD68$UY#UnsNsD*AbJ)ezAR~{=606wm6BRkx_oZQBn{ajP6S&O zIH4X7;4r{XJ+CTjt2%7@sLNp>X-Q7hG$xu(`*wndL5@2dimJzGj!at=Xmf`0@q#iW z8Sb!*MyqGFagyU_Hs8+OjF)2eYxEB8 z+=0p3${qLuYCCR$rR7RL;KtCs39Ov6w`kwWBm$rCoL)JduARK z8Lx66XhrKXs##S6?uX8bhFO#)1Nl-OenxRAko`{W)d>%O1Jx57F6PLGNgR3g?|>#q zIom%}H{?&(YcPJ(v4o}3ho#f#d#}bPh0F-^xPloaW`ASkOOJaGvz-90%G>UP;PJ7_^O%+R6YJS4fLch!I&4*R zr==xD*I_?#HTwi6TMU3&%G{VkrX;wWdYXkeCRvG*u%wAzWv|A0T~<^R(r5mz2w(aN zNvMbu%E}doWoGnb*}CP!Bd&G3wNSki!B?@z5<-^iX#=J(M&rGf3^ZPuu}Hk{aQ?VF z4l)LjXZjG-X4utIMaZm28}HS&$aTCs16r(K%kClcsV3RGVe|Qmq`D;Z)vK7%XK&=; zETR&4i^h8G@YSRc#G#HkGjRz?B|?J-2A?{z)0|RAENqHqaQ8if+ry@8f)T=Xs@#l_ z3b228+TBn+foF4t8_%Gj3Y}4ec-<@XS>WN%fKTNw;6>W|?j!i&@#2^-;PW!f?jxy3zVxaV74@gH1p6Vs% zqxb{WMW^GC16mRR`;9z9zJe_IFbsbT=UaWKkMYEWa(Q-hbt>xjvL&v-MlnAn5A$m* zUMb1)Z9+>A^go^c3@JP#3>$6pJfx}~HNH78>?KGOsN98o(n|Vh+Gq7yvfkfgF2QuKQK8+c|q0^fzs~(Ny#S*6jy!0VkSnDz3>T%35mg}(52I4#!_YhC) zc1g}jYEc5RaAC6xf+wiph09~&B;GB(WL%x}^t^a0V?3I(O=XOp!VHX*SWXQWvZTL8 zo@tSvNs-$+eYeQza(;C}83*@wUx@8qxVRf3>07abn;7!4({Yi9sV-~tpa8ONbW|yd z-?2ObJ`ko)!)TCm&N$-3m2tP#9fgE>o7hNaqq-|mAO5fCihVR-E`D3M$GEp2Zv5|74vfo4kFB3ne zl^Xl@dvYdf9}~T3<4>(KekUs;>659?WTKvt6U{u2m6RD;`2CTHtO{n|um$7k3>)b6Q z9uC4jhaqZ;8?tRd!*7KQ73O%9cIKM22&r>GND*0la=Kr$o%Ehfu)vMlejaA@cgcgv zmDB5AySdvC`^Wqaf-ueRT>3wmiO1g8gX77GDGQ2w{Emz#*=wa2pM|o%o4*Uv6`OF; zq4oGzIpOWxh-$vUMBHjM{@6mVcy|bl0wl<@q6!I zLZVYj5mpN<*(Lxqch--Z3|m*d5e=Gq*ZGBO?~Qp^vjU1{H&^PdWsx>RmY-nWVW*#) z)W%$}!DI>6E^`|_r+D_5%?}GDdxC0pY=c4iyg7(xX0+t4yLUyGngC^jv@A5k+M8=D|z&JVYn&15=; zTlT`I_B#Ns1yU=e>2sf}W`FG0%lZL3a7M)*ohR7&e*I0L#p#N@;x(uX9c9b~WUlhj z`%=p9xUhwh(iW%s=>ETmS_n1wEE%mX{5kPfiI3N3IS zT(JO5$KM9@#H7>Z)Jtve<8hF!0`&fcB*Ckfq%py7Qm3fQEgNYOOywE3cpmbYcFwH3 ziZ$grUzwXC@#IE|6@k12HR2VHJ5mUk*l5ry8=~PiU0AHMk2qhHNh{HRF8MiWc$@GxcOSm zV*gN+>3x|W3~sX?DL&^1$X%sFXQoeP<2ZMWKSdB?<@di(V>_z!^nnMnRiUB1I9=)t zcZJtpAamvf$42l4C(zEYwva2PA!4Rt4kGtinI{G6*5O%gohkd~-M68M8X0mp69 zo_Yl$s}VUVm&h^@p;OOd95u;?BA=V+Q-2BTET;L%fYe;ttyBsg8S%Pp4WEO-M#Ed$zvMTf`nBI&8v7_$@qa zUVaOpVt;a#7R-QpcIUT)sH%9#N;cV%!-#!y%dW+u!WagHMksIRUQ=cj5FY{FK$GYkhj(gE^g#zXzf zd`F2?Un8b3jXGD@mGOIdW@f~6ZdfR6#4VH~42AJ=W!G4eH0E%8R}X@UoRPw!B2&KllK#wjQS=;x-&(BTvK*M$}#C6?d6Ss{+$r&gqN{ z^U%y+$0Us#^G{#!FhfOx^J>DLv@?Jg%7#|L*AR*0^{VL&Ca6Q&RS9g3hmFtE=PhrcMXcpN`*MIa>67tL0427tl1AVsO}! z8BTlmaeDROz@Cv5K(e4HVc{GQj8i&wTM#3S&rj^aqFa$I4~!}UCT6xY+HEvIt)t!w zv~uMSyR@}vQj`?g1O{L#;OJN9jMFf#z&EIPcK^Nn&EO*ZKxenUi`{3x`DjlozAu&X ztkSCHYC@}TZR2TqGrpg(g@Hz-b1zPqVwSM8sUAhHqxCl_H--L0>CMhRxO>3zicACS zN?5=Z{)-s7+qf>K-1noK?7$dcVMm_6ZLYlOeV|#w&J1ac&7!=%&g`I>!NWKa?3+XW zb6h!-;0pI@bBoVJD#79+d3-+U_l5{o%!|+UM04O8_@bYk0rLScFC#kFjiQe3)~Ozc zv>B4M^^V2!q?@Al6lJn!hj6;Cl9SzGZ|P0@HsT?qqgcpz&f~Hf|hw?t(IMeG75XVvyYl6euCHk%)Twy_8@T{TI9YM zUp^jKjE@n)P$!MkB0Mvq*}>BC(Z3p&A{KQpFVhyYtKRDpiiuTD`Ci)ucZmvpR6#va z&zU+(S^@+M&8ap7vDkNfyKClWOKlZYqJWLw;eU{GYTo%l0_9CU_hh7ocmTd*0_g; ztkmnfX+%A1N0fF_ryI&T0}#K%-j?DoFdZg&FDE(!Fv~-y-(DCKGt60uVhk_XHS3wK z>KLcgkLAPmxi(X7q4U9JH%(pfjo{ z^97~7RK@?A5YYu_kqZsw$WIEHVYM>q+Fx z5lIMsbxgz{DebHF%WNyc-?VYV3fv4RkV=@36fKmB!U$EgdK6e|``7@#hUqMQSXHN-? zJ26eA2HP>5^_x|CPmPs(Crbl(9;9jX&U}W1*?wAhQv6BvU@~rKwW>&6_LY8{9PZCw zSUDDZD@a1b9+Hdi(~)%G4Xa8cko1xGzM&MTcsgTHV;BYO zSqbWi96#W7Qs|MC5*ZU+Hz>URb|FQe(C>WFEnU}bguuG8{#sn2WQNyTjTSS*V1QJ|!O4k7a8rcOF?yWP}&Wy#btZv-#%3x;N=Rv@Tk(RHV z2YEZMUR%|tZ;$WkjM9VD;U9(EfPWgxq8wM zjVlw{U|A#YDa57;RvUYr3C_>L=V+%24k7+-@u7Zu{(i*?DgY)Z`+K5ib8VeSa6Yn) zl%eFX>Io|eHN1g!vOCp!Rm;(!Ivt0C>D7y8u-FFVJnGU#`iGk0ETh@zi@8(VZkVes zy`Ov5R~_elksyprTGT;nsjYX27oV5Dh_KEANstqIp+fk}PUEX;7_$|ZQ`3pE?i&)R zKdRR+(#yBdOCd#<3DmvP3k}P8b4k#t$(}I;VA+h->lx+(E$F^>B=88auBlgh=85YS zIPV$RM}2{B9Q{&Iw%Giar_-x*RK*w8RjMRB*wuR1S-z#3G6- z_!M0`#thwtTLb256J8ta@k(IzscVD@qVV;Wi^rDI$>RGMZ#XXm_|l4&JqB6xMEN-j z*Ad#|`wbwvoDNrPmhW1PE5IDi-uhT;JaPJpTVIH%7*|?4CxkP0GH{(4#=D`*Tdw7Km8x8G zD_!dr^$reM@ArNyY$3ZKkB&|=VeopNt}P1>U2LW z%zM$VUsB&antd!3*#4$i{XPAXI!pXp`4V=b6@uqv0g`KOU~?tdV0IC^e`I3*LYMjT zE#%!^xgxigumDJb=K~xx_%JTUd!m!)I1wv2+mZN~2DH6plPf3o(zb8Xc9?RU1wV~MS^3#dNOX=;a7eGZUwk%@!6yJP5=zQm~A#iNg(C;(e zehF{K2wn$OtGu2We9-#4#dn$NVj2fCxKpjr4~N+~aq!ZVOgx zX4SBFpn=Ud++k@i`hf5y?<07N@=q}gWfSy-vK(ft1SuxtKJl(#uPdIo=ot5-Jh_5G zf-dg!Gnh98A+Qlu2FOers&@-TzRx5U4K6ZCYD_{nE9$)6+*;}MMFwQ3J5)8EkSng- zE6Ye4I1RYRyNG3by;fjnTfuW}c~Qv4$YRg)jAQs#C|&lUZYkBPpJ^4UcTeB9dK8@3 ztCi`rMtY-iZZhSI8hcu1r8sKj)+irUOMIquH<9!Vy%)LYKMi*9)(h%@yUvO?D=X*P zUdgg-8{}tPS;59HKza=9Anv3cVMO*JU|eaxR}7ORDepunQofLCd1OmWEn{i*YGqj> zZ!aj3yqaornH>rt2GEycXCDHpQ4rH=)b>!@cDv$)BjqMxjJkPJMO7H*1$fwz1N){6vM`f?3%@ zsrXO1^7g1GTA*lotvvu%qZ~U!akZehNJ;z?^;h(7eC}jpeA%d=RdT3gA=Z?VOg8dn zyuSTLnSa;MOUD=1e+>wWq_{ru;2U1UZHSzdyom!@J2;5e#p-jL6<^)z(|Qm?qFwd? zt}4XKy?<4IQ^0$0;yt?N|B<^*@G*2I;|_4{F$x851rfk%&@d)18*((_3N6vJ@gyLt>CqYA{RbhewXS_d7g@kS+Y3s%B7035EiP3 z%?WFOmD0WH0bj8d4i+91@gyHEaecVyWD-2uzn7hZ(nFW@lE-*3cJ&*EDGaZCQ4nT_ zCrsk)eQE}=z=^ou&t(Z?E>ECMl{<;@tL2>9YT;u(_XA<9JQ{c^|IJypr80>@K^|gh ziJ0cWUy=9a!||gl$o3K_+1HMJD31GGU*$JpAw(hvJ!W{!I2`?0Ev7qX6m*G7Z{>Qy zJ4-kn-(#=B*)8PMBTKf4?#;5%5#vLcKEp)tkkouV`^4Z{4kZA)pPE4F!fv4VqebIm z(c_5~;w*glX6YaCxSFAR7R}fwpfXPoP?XeB;pY2+x@i!NWxl@($G(eRQ*lRg-?m3O z(Q1}L`>ita!{}L>-vEJY?7ECyW3Mi}BHgICoMq^HFvE3+ykfuN_wd3j_BC3j_M+VB zf^VB;*zD~w36(flVxFWqb^uGllBbc6K!5NTE6~1G3#e{E0e|+@Xp8$9VX|~f!XSGb zs0Atq=u*3u+tAkuj5oq}PVGRZC$5d^3u;!)X6gkJmk`rw#D-~W7}A7#yTycSKZh+28LOpze(s~wn}{KVxyiCF9Jnj(8qh-d3(802od@%`K)B_cc+jnd z2o9QyQP`v-%?U7}wsNOsMTLo`mF1em-)<7<5~OMATcnqP_T2wKQfM zca2P&eMI$^=&PdB+!IY+QE7V^)OTCe)Y(uXAk%kJ?pH}l2QQlOK#O>DAz`dc^;(>u z?QOOKM}!vmJ>rphGVY8PO0QjO1Q%#q{So{Bk#sH&Ew}&wM;M|ZgbqXKCbc_mI&Ug; zlM1198bT7SgIcQ{ltYr*omA*B2}x=?>9n>|SqHX~&b3wBTD8?qwL?4o_WAz)hrM@Q zuh;YOd|dy5>_jR?o7Ba-j_`@;=+9XrQ&~y*mWeIp?PhY~+PE2FQr^9A>d=h&OTLa~ z%sjS3Zid^UWNrQJyOj$6@mJy7zQXo)JnbXvo2$Ut+M@weUNbtYCH zLvX*OWeq=aQNjZ{&cCy#T8l2o*0c;@F7%6g;hsZDXb8u~qg*51MLictXQcR2|cc^(}O5in=BFwe&bfc&)WT?H_k+GV3Xq zmAe_*c%2Z5$*ml&kLBmo0ct(x82^<1(t@F6=4bL-RI8-&h*3UH39UgXKJG|z2>D`Q z&Z<>2;My9DyhDWGrW%8fH&)t?<`}c>4jI&j?c~JG^xPzm>I(*#g++ho zIfTiH$9@SFg^sB4yhhP^JS~3CvnpgO=hCA#-9{Gl63IPNJK8}U)-ePy5@_f z&FHyTkU_I%1!?9?^F$LoTNT)FaR^&#GNAvgfkVVVgjc5w%L&iDj1I~AL@sBKPX#4M zUHMDN!&l)EHFeI` zdJ68a3P^Z?4hs|0V~w?QsU&d33KLptcY7un(L4}$d6uQ}Sp-YE<@9}mW#Er%EXPoO z+WkRHjpovvmV^-bP)_!kS1AME8O*N93pL9=^ulJfVgmX)K@0K#Kg(6Bz@;)Mg?frK zkK*}QsnjoP>(e6hv+Pd=4bJ>UaUo0Qs;1?E=ZIJ5@&mw;+m!3Lpo7p1>lI+$Y6!IawtJSefjxN$#A%b;*ZMPj) zPj82Q$(Gxup+Cts^>^^UAbtnB``H7j`knD3o_{e$lmZd5>wqADYQ`0(LN|*xiMj~s z3`G`AsQA0ji!gBss5~~A-EFGYZ}*FONqZGhZs?5d#vLM{ZcsBEjw~Dg7qiOn(1<*~ z+g$YTR~R*8eChu9@<*@G6n@=QHV$7gLFE7%Vgu(WH#O5q`w&B*qLSF#brt)IY&6qo zJC5Qh$=Jw|`-ykt{?__6#*s>AbI(|s6c5xwU54Z}OW3KPo0^6Ai)`Cz{>rw{D``Q2 z3qkfe3*u(kz6S=+DYjz}MT)BReyuS^c{I{0n%?ah0D>%6wI8354yC+Qd0FR>%Q~^S zFhPkoP9(M}03F3V6w}lApN9NKCah5a#h|G2Qtm8iEKV^m!8$3P})#q-l_?Pmtbh+lh^v38tD4_C`>b!JvLhy@R z+}=1h+Xl;i%(oy8`NnJ~x#lm*U%jTujK?;s9*ehZFKb+uOE_ST9~&Z&errO!gYZb5hZS%mFL0i)AZeT?050H$heQ(;yE)UIu&= zN}%3XO3oq}k!~imb#nIOris|;?SlHNXXzQ}*Nazydoj&O)D9r_^nARs!Y`&Xvy+ny z^j&;P<F5Hq>UaZLfDwX$j)Rv!8mKJqDLi9VL- zDeE0Gw+kUUVWUa2(ZaeXt`bBW{&Cz!3GOKU3hXq>ZXs5)jx->sn!*N(yAt`$n5Yn$ zjIxF7UrD>ST9roM68jc9h32KOmM7Ap8rQXiCaud~GOD&`h-V$@Fe^9{_w(=#m6{)j zDHTYbhrXd2^>kgdR1|5eLc6iQ&=T)o26aiqW`Q&}c-F_$_q`F2oC~|d*q>^_aU#8f z-Ne8@;CC8}&|OO%)@c8E(DdRy^OwwWtS;sqkDP_rqP|(=-WAaide`pAVeT@T!87$* zGz2w!)v@MA|AzcurHpM6h(QqjAvxX6Ko7gd=<*Bl9dEuFXKZd3UGY_v;FzU#QrLBB z$Lz*C|NrPv{VxdiA33S{ypt?+VPC4wdnF6!;nA(&H}n>uK6%g_6vbI{me%f_=Un|Pr1leh;INTP5af&<4>o{VL zI*e4$d7jMo;%!Om!lS%;slE+H4AgdMci?M-@W#8&w!no8=+^Aq9DYw1&l0;%bnm%- zZnt+Lz&^LXikB@;T|XvP4LJGhlWBy-s|a7~$Ua0RxM@!D-Mjy!$@Zyx4)eT8;%6KDnh$u;smpbhNfgdiMaqxk*t9=n$+Yfs^sN2ro+nD9nM zhOSZ$v>4pWjT(4?sF-(*Bz4zZ#SR|l*N6x=N!d8K@9b3Iw~#uIt_B#MoC{vmX*R^u zVvyil3BfZQl>B^{COJ}lR0cwv>&<^HKAl$xH?l*N&ItQ^TAXpb{2b82pXpIkYcoJIN0#=yL{O zj?_)Q!(Bh%l`|?@LX-TdVS93wcWN*nm3N888Iqr0;uEUIuceywi$pQ3ctxsX^krKh z?`dU4_ihpMJqVHmEyvz=gN&7iB=p)BfLR}+m9H5iu(%}SmmCAkBs3HE3lsB=Uz;EL z3%8Wk%#J5hW&KIuk8-GS{a_mZyXSXg1HKQ9{wy*!r2uL5^z&q@c>;Df=kBGMzUN80 z_?$(SxM1yj06&($zJsYdqozZJZG_g`~7+uw$$Zu-y0Qqqfps~T8|0y9e}z7X)!#ufnGyzR~o4=a@f+M1x3Up zJ~T2_{7oB;FbFUrV`t8#}pv;ta( zIjTUVD8J57kl#K&f}4hAEv{b&5`UUp>raSC{_9L$w$gFYWB8AEt6n6DmEhLu8H?hD zbr)nD4tFQa)(&S@kP08x{0aHeX5pBL*70CPSv3X)m}ARAiYXO}uOhUB%_*r(dl7Wk_uoiS|`7}}G!<*c(F z$0Yv^dV3jq5ov3qQd@DB#|fYDhN!*P`xhEH&NsEthk9)Tvkl!T3`{q=MgDM^exL`f zpe@Ju{@$cW#qX3j^1iane(*Oq9A(hNe;jiUEm8T_Hv(8a|KJ&}$}e)Zuj&fNEC}lZ zQrxTO+i?U?#jSjCi18@*XOQ`X;&+5ilBU^{X(y-srX50`0<+?q<4ma(8Q9u_LxO2< z_JzYk8$zSgXc;65YE8K9R>f@*9b>H%U?y|o0I4=}({)T$w+j=|ERVP07MN^O)M9do zd%A(Mr>kdwe!L0$_^Hg>TRPEF5rjD>52A#)+t&z9+Fv(CG)|vSP$VWdJjAMEl+~WI zUTGS3#A^h^Gp3(!biOeUj}@*tyD}4=I$Q%Q*UeB zqWnhFQTt(2^48z~G69?OU)*36$5lB^7LBJQ{i_%52&HOh_dSO^nrv?|-r{sGE7_@@ zjU6?6)oF3|AHhEfrv^~qHXrClZk3J7-jdquG$g4=NRqyj( z^1+*;DCvYBi8c?GCv@Ihee5!Iev-Ol?dW{K~ zu$K|tZjJ{Uw~HtryRw+@>WjAXtv{LNqeW9X&s`lfg+ewe6!V?h6=okufd4b+@!7f| zWIJrW$EqV~9K~u%K!1`KRHO~`PyeO9Bm67Zl(%2gr}j2X2FINa-^ z`g~~TGR*UR%2D$viY&Z5FY4<<2~FW?v<}nxQ`gZf=4*_4vGvLX%(?NASO>enZek|$ z^yU9Nc@mh&Idans7FQsW&t>sZf$?nACT|baL%FeXw@DWSa~2unyLjQf8RoJe_fKK& zz&}&ja{eA-6a0zHWF925lQJq?IV8s;+|}@yp2nN#gMwvx@&9_>!V+V+bM{}w$ZnuK z&ma6CHtFcfq~$^Ocry2tE#&$yo7R9ukbArj1ZG>z1E>w{C%jVc3R%2~v)40lpWIg|~$P4uF0%weRaD?Ata+ z)Vxu}b$ux+i5n;=`p05%YdJVkirtn!l6GB*JYca14OG9wpCBg?Jp#`w(#Jhd=G_*NyTM!==$djQbf5IN}qdiQ3!T93P;t zF6ti>B`EG*i9|HL zl!70qI-NZdHxGN(`+>Z+*<3QFa*V#;JE>iKG6xEB23DT?X3&@+jtUS=zY5BUc}>J& zHmTSniOeN@nD2D$L<&(CCCrwRt^q!pJmX@{C8aJ}fn))!6@uHL6HLc+*jp~*H14|U z;KEMSq>;xT;voYU=%<>ss%yChw7b3%!?-iXzmqP5v55SXB*Y=&7UsgwG@5Fb1ep&n9jz z=p`*euZL{3pc2VHIBNp!$sy#IG+A;2yy;$s(IC@BW9?LUxL?Hw5{ z*8S8<@#P)RuO)Mv>LcD`uuhU+?yX?V0puxbntW%e)VjcgD8t|UKwIunqGvJSg1)M4 zk!CACP?km=*vbjoxbBd5%T|)0F0%_+NoMIrG`fjS_sh-?=$6b_BLF^x=^MS~KRSE+h3L|D8uXAn=<<|H{y|w3hg{K!LtT=tvNXW^OCI_khqAh&jm5f` z06=I)S-&a%*jppSA3-F2{J=tJ(a(osiC^1!d^RU+Q=^ui(u?rhffN74XxxM?qYcx9 z%js(Fx4XHj7v(xZNyDbf6Qw3U(p|dv2;<(W?o#(k56(5>t&2lARd~Tb;S^XAMZZi< z5qpvLs|V!Rj^Je)9B?_X^@Kap&Du z2$cnhB*MsXvY&DB5!jC)d;)EiueHL>-$=TVn+V)tRc6HL;aQ^|EuQY0 zBCXK8(rg!cJ}F{*ODeN8>ufd+P>B!%QKuRmgpu`KgIlR8VS8kUYhr)D09Bsj^|Dg0 zV*hK&3*Hgigs7$-5M6;F`YeytAXgPI9-g3-!@!k(Q#opWCuEoRM$g3D+F7dmNGE>i ziDd#-7A42XJ}olG<}^G>3zeLbuV2SYEO$?#uc-IPhjK%WL1}rQ#uZA&-yQLDqJhI8 z%6S*uw+?aADb1ftb*pSE39JIZR$PeVs-=%JOX_ao)rR%L?bWe|My_BWUwH-%&s6RH z6k7E9%lpIj;>$f_9h9BvrX|YgjlA3Pl>W(s9pWY2_->(3wQ>i~K2~{oc4h*Q0{wY- z(Be_Hqgl3?VW-(8wZi+-YlJ;I5EJZ;$y2s~GM(jtyNWzsfU3nyq+E^_@zpaY>X|@$5nk$@p z6|>Rdumm%fq0c$_>h*YlSZQvTEih-fz0O1<9D&n)7rsL&-Vs0>%WkAuF6BGsZ8y0K ztd(qq_L?cN*aDl4 zXh}-A6`GSjd`6kG*otl#Br_{$Ycw^Z?V@1Qq%ucntndU$7dgEqIzKoW-|0Pohkw?CbmnAU{ z*bg9>Z}B1zV{^82;3&OJj&h&UAqK8Dxz0E|c0FK02XdE2I%TPYZFXCz93_|5>aqI@ z^s<(+EM3Ju=(B1P#5JtJi z7Q4=v?4=jP=oxtr^@;7u4x4bomJhjj>yNz_7k`Y-KR&%{;WHa=N-i-{Y%}qJo^8Ct z!Re3`!JsDC!k{OY#Q&T>ggpDe%bTxHxo3wR%P)YY8BC$fQiHju0i{t>(y;otrWjw* zd3uKdxm&3|&ppsE@_!u(4ZH@FrP5Il7A1ls2y9UZ82SyBRzLmCZG@3ev}$j3wdz@U z_m5~njKS008y?KBnl$M)9EPPy`5vU@3gfVi$l8yzhRbs6JEE;IYbO8t^;Bxp=a4UAvVrYWA9a8eq;A&o(HwwMFBJVrOAcFD*PrP_!Gwd1t zszHYVjgY~qpk5xWoXxGL-tFRB;DvzW{1amyO5UduCr2+{?DVF)zzT=!(6h=bpBUdA zygQ;;mEZZ>8Wg9mRbk3+EzL}$AWfNzk%3=eg(il~46`(}L@Y~3TcV~vOddwTfN(ZESKI+hsqn|!zUV5W?l*BD2Ub&`Tb%Obvd4SiKwltlOpM=+m-l+9TQ{e1V>w;7*@Q2S0}^O@GM1`ipOVBl~!~$8Xa{ zq<(3WMsnr2>!Z<%SCS0OUCIygG8K_|P+)ikx+u6Kkrxbt;$HGM5x6!^qW%6^QTM%B zCIYsoDYSr@lZA%bG6-3c3(Xs1tCWlru+;}3?Tf$PnhLld&E}NWn9cGxvV9917FtkA zhrFnU_0ux?%ecJ})NDh462fowe%xnvmQbW= z#$Ve|8eF)T(~r6HMW?ZA*F>S}j-FS58ON-2VrB<3XIt!VtUd82hJ}BhcD*Z1KW;|_ zU0BJgZcR%H{Iz&7*&E@n7neFFYEqLfQ75=x&I|I`d1EPq+>S=_C&ALqR~|8X#oTx3;MUwtHoW1VPl#< zA0L<9`x{%Q$0>|^B;%p>V@UgO(J%4B$~|xc%wDS3ILe20?SB6Kg?L)oMG_e)c&WJk zOgSp>JBmf6$>J0$tnGs~k86+=B$Lw>JCXr|sEj|WUqb}}c!w#S&WKdtsOy}lArd$O zeW@Ig9T~h%Ytbh%8zGpoAP0Nz2S5ACTx#S_(WgGwEFv8k801354{;qo1^P=G8 zH8pHn7vy-&G#@5%K>=*{aSO`wRlVQVtqWI)Sek zEERjQW-``|wKNQAC|f6#PNfv5OX4Zufioig|T0wE8(+SlFsp8MAbD)xJTJLuBpwnxjcXh_ypk{8mfeK z62I2G2);8q?4A-(^calP@K6axO|`iVDM5_8?95K34K&w;W+oZFE( zKBy=jD#cRJn@ITDGxO*H^BPi^p%B-4U>Rcm=JXu?I(B|v!Vc_k9M%Rg1*S`zP~YZ$3r_x-InH15mpoP-8=EavtktBv-t^Cd6qU~x6=k-<7&Xb5*N~!fNO!qH^HsMrvA%7He zzdN$x#(6HKVNFRlHE;9vMOK`5m!PBfJr=sS&bWk8>FxfYIvlaIP->26nA7|qr5#p< zMJ(xJC$hSvp|=nDhV(6`NccRSE~M*6W8RkJMs?Md6$We6@zG?H59q|eaeieSmAMc3 zpw>*(Rp@OK)T$s;>YpOQ9o$PGXIwer(PQB!Fb=LuP77DMp3=0TI2<-{0F^F8+}ZDJ zTyojr!|XO*I>S%6y|I(g0XXUs{-MkfWwS8x!+~5Vdb-ooz7^Y)-Q?LfJ2z*lRsuf< zVd5rF=aKK0bUP{ArZ%Z4{B*^r;+E3xqSXBUN^N%R$^c&X0jEO7q=XsfPok&Ek@mhRHBl6FNJfmx{M0XJb7xj>YS>DQ!XSAB6FK zH)SfHA4OYo+u$)Tr)?L>c%#6w3QVCvZ*t7}UP}|qYSMc+6)20l@gv6+yQ{a$JrSO>8joEU zpa)HA+=;s1NLZ!!Gj_Cc5NA7(;PQ)Fi<=xSW(<~j+;$K9Zuxwacy@^i3G_rJjtCDp z#%F6+GD4}-MqYkpd+2LgyL&5AZLx3_D_lh+KbW^-ct$Bm;u!^+Kwg~`6+|#DjRx+x zf?-ttisgCTGg3PEM)b`D*IBuTBJ&+yj<*drbF$-N)T1GdoqQyaIO0UkFcar%od6z_-_f&F~ZN8`PyveJjh z=bIT6+bW}a_kQt@H`)m@EKGPkd}=I=Jdxoid4f7MzYBVI1DR^*MI}55Za8!)txMXp z1G_~qpL{J37r!=k;QY6v4)F)qsJoqmllCwu(}h7wN9_oLp5 zRTuCJxH>`sqDfOZ{VE3n(k-bTcK%V z*-9Jz$DSWepYv(czbvo?gxwy!Qip|=j=SX(-QEjnq|h+2cN31DE%_e(ra$K5Y+#@4 zOn-!*h_MTrx0fs{9{Mzvl%?9Pu2v9?u`|j`(_ZPc{q1zc`7UG7H{~_7dfN*#e{U(b z04MtZ$$VtmUFRh1RYY!o<6?>p2UdwWhshDQAh6G3>niLJSmD~}%=OFd?w*%$mhJ-oBcGHuC)Uxr$>%iNZqh*TU(IOw(@0?8Wd%g9s z(aGa1hc^A%B(dW6vqGnYnrpt}H{$c zwy|NJ&B7N_IDylqkml%!X*}}JyE7W}CZdt(V; zQmtY+SmcTyGISJ?F4H8r<9KrZHpoNOtf}oYq%xjMRTRsIoRi?YHIcKu!?@sF2;P)< zo1M^Gf9rtOa9Y&JLkHxTJCm)*oKIj!-|Fu0P$ zl#Gv9x@%bOj|p-YNweBCpAyCROM%}6sPlyTV{nW?Qc%t|RFwa&h%j$}K-`A;mZ}`} zXI$%go`jdXP3jUgfRFT-;^jtyyvV#Vgl5W)?I5`rctnyX^Ho6ikkKm~@A8~_KY;v> zEgc(wr1|;8v@5zC#$r-{U$#lVOIwe=1!w^T?8f^mY(SgqAE?JT3F?s^obTwWxHZZ_ zU%~c(G)2DMtBR7>^Xb9Z7}>9#fmZ<$DjkV|+R(0RsYmgxvc?x0Y%3FPyg*TehkXgY ziTw(`0*$*$6Co)xuT0AG$Erc&SXwgM;YmBWXiCFX`$ng)*gl&L-r;P2POL@ue#Kfi zR?qs+l|=!GWQX$HZ0Xv@IZQeO;pK2FmQr7w!bJ<@s~DVq@@mzF@HWil23MZ}av|u6 zM}$U3tZC4MB3EPI+BB9e?Lhre`u;C>@VxUs#vU4R>U?2Urv2rg3Mfu>;7)lKR6p)m zZ!!51KdCGnWbsy!6E9Ef%aKF{9X>w&w|eR*Qit3_UU4OOpO6<)Zpz_zkWv zRU0|a7?$e&RJY^)$E4}ZM(uh@`0xRU?j|vr??oOdV;8~>^1pJ;e$%#=>ujPl$9)1Q zon=i~n)|H09I;~O8`_`owjwX+2I5D{q3U#|F>Mun5F+%M-7uv7&M))6=V+*myQiL0 zR+j7E+e_R{?qC!Tyynw7qn>nKc3LSbcSjt=k2rK$h2Ufz#9EYu%GeoHD zgpzOg{fNwS66pvXU;Ul;MY+>Lb;2adp;_w@J{mX-S&{E!S{9VQBmB`D4J6z%aAwR&z>)Wolb7jUO=eq$9C@H0W)EoyZ_!{gN zP8>!dvXxx>HGLYhRX+FhVa_(}@u`Mw5%UE=iTG1nHAGw<83@-6L$88jT=BXXQX$9I zwHmc1tt8%CF7gu&g{0yQPig?@)j(U0M2hlMcZ~0dB+fCb(NlqkpE^743@eqJn(@6AR-j8tz#`Nr2`%hkM=`~@ zOhVoO5cM9zPf54#@5bmQLo3AGjjqpO?)K2)nCKsQoHcbfpDwhpjwV2u0DJMDNR#JD zn-tSD#pFA zFBR+UB)syQSnFq=QKPGbN=QwPveutAnOMk`%nTmA~N3^Wa1nBnZ$7^gli+uzHvc9!KqH^wCxuW*nDPEQbvtSykn4g`zGW`65H zKg`4AmwCSx@1YZy%;8Ss&ZBE0E;b=f%p*mG(9-zjq}Ih(PilsW+~8rpO4yNiYm9&GJ79KlXW;Igy+lb?i9NWr( zRJ1yusyH*FIVD_<+X&0!2!d~Dd?S(lE`gWFt0U*Hjk}m^`+;^rH8Dryk(yHz05x1C zmP5N67XG3%z|_Fg;Qso7Iqx$8^pulQA*G41oz$khluF;@8T=}%@ShkTg|0q9C%2e} zyvwp!c+cAH0`=|ks{SLZ(707nO|>BF7=B-^4{#aA@&MLv-fy9!sYP~?8Qjfz3q=@p zW@{h&@UBGk;ulX#{2c!l+24PcHSkfXHmW0+W3VU-FmZdvv4Kbe$e@ zApt<{pxtP^;f}9>Eo}C>kFlglo0?_i@e*Uwd|A_sd!8vP8U6vYHvECdr~zquC)@aD zex>NU>YeBGFCMKrOHwk0UzK+f|JxMVS?Wo4!kmK!zDLoNtj=dO;i#=~JCvE_-n*dM zPM6boib;t0ry;6FJ&2(Rz*Iuaz3GD}Dy+~uir$J8K_bo_d1yfU;v><8o^e>cc%Iq=mT~45+)yhPecwuhJ;9s}qu5LiDAz|4s$hJm z=dZpA!tN)nZ3ITVAFmTT*NDP9NY0S?;^rE7Qrd&0%9p{vIErFH4P`ApI}Nizc4(V= zS?pIBuPZ>lysV2|`# z0A(*?{DeIv6iugc`utYp)vdy4!Wqx-T)|?|yAh5cT-1wVAxNPx$kB|EdPT*ER=(}u4+feW^ zP;N$d&}53`S(Ed#n^5s*d$l5?Xl{~qKxr;Wv>LPNr#|HBBjvSk8_0uNyjydQNj4@7 zeC~SDFJC5bEywR=hMPz}4R$rdCzm&uFjm4E>fsCa`|*fQ)K@~dN8B{M5H|gcpP6Oi zEBRj5xrsNjat!!0>iZ|^z@qkA_%I6lB?Ij*&CoI|k?jYN>ZNOJ)cz-+-JWJrE0IC$ z@A*cvz1BWqwny)?=1RNxv@BJQs@6^Zs}!`9H)VBJzP}v$oq@NE3s$UIs-CeU1E(5o zec``A%j#2%B5&~{A=63ijOV#MqME?BsQLRdBaBba`AZ9a9hxW6UmUdZjndxE==erl zGWG-g`@2lcYpE`>IU?sv2FJf7;Ov1D7bB?l39#{B&0Qxi|AmDfl^^v} zn|}vWdzUwM(m1#M`&*Whb+3fgMCS2K6;Wr$dGQFi{IT(7)KyL0KsjZ+=mb4dw4>VI zY>?uJ6^zKG^>KWfe}xgj7@`AWil_Z~-CL;y+Pw{)6TSZ-W;pgiZ?-L6EOgV=J47j0 zWyaUqm(^>HR2ohd=$mG1#0{3kq?Wo8-k!IYT@wv4gHI_RaEC2l#E%brc6HYb+K-aO zi&*fZ2;Qg2fUKZxrLvNZ@PST^S8`~Trl~oB_V+K!;`IrA2e>FU2>%2}VX*bu&co~7 zhrsGfWf=fX0zDa13Y(;(FGs%!K0|v<58+wn;c%c;i_w@o8t#!N<1Yi@hfL)5vfhV4 z*C|yG&^UucUw-UkG{w(e<@uD+)PAa7co*L}TDPNeN_qU73-I5DwDC!$k_Qss zAKe%OW|>KJpt{qEstG^Ee#gy>29rTiB4Ft-3oJpt?oPi<56pwHhS~1`4;exJyugpx zDX$*fg#ns^?U2d?V~D-gVIwo`U5`vsk>l{$h@VN7dK9uDRhCd|8ENiWt~}HMrq;<; zEqaO_Vmx_J+=2O5Qk+M=?Lr3Y^&lbG7Ru|Ze4Ml(Hk;k})0>nuCVZX?AHWy)Wlo{F zJr)N3hltL6VPEu{nD~5J6x^HcCtHXN_(9l7Iu1;rg_i3mPekrf)D|k&`(YX#yVQDy zJ|yYYCG0-VIz@_jeGn{q%t+B;&j(A+QUZ=!zaCg8&QcB1*F(bjvcL**H8;j%C9v1z z0P2_tIFnz{rT7V>f*@Bz+N$Xg>H3Z>V{@vAit z;H8*w@5P-^2+;^$c=t9`=s9JbM_6O0qpiL)LzLILKWNXf)t6jPTvdxRBi*c-68M?ntS%jn zmy9eSKFl-x+Q(+3hH^|J+a8;bx;E%_`6cW%``13_r-KZON!O}>slCi$YgRF1+&Xug zjm1CTLK?M_Mwflrk}aeQ){lj0o$|-(+}ewBWCyyF1Jrwa2ybGkK`M`Eifb>45+H`? zYTZYig0=7+>X%kMMB(aIYsP6=X zZlC6^+`6iOkd>3110avPc84~A#*ZEo#9J|kEa$~RnE5YicKsEw*l#oR7Yb;+qgdG~ zbNiBGAl(Z6dJ7VTy=ooOoDesfprib~^_`A@|1(Fm*W(qsRr46x_|BoXdu}W#Q$?%1 z)H_V_DLGP067H40X1)lct{UKIg7}S0wGm&fd3s`x^WNQitzN9Dt~;9OTd>mo`DW8o zpROD%PG4CaeBhF?BqsZ>GhV)*efRmEDZX&V>rS+H^_>!#N{Do%F_^LSX_&HUv>P8z zP)4zo{v6~_sgUjxJzfijW(PjPcR?_& zX&_a{R$ucwo{6WZ{Wqf`PmF(;!j(>JR4uu+2CALqv@efbZkr)JCccng4X!WPkZWrF zx(vH-$x3U~S`Tvc;~c)s^kGDOnqRv(5h|~91W_#Pp>%XtHrWrjB0vr9J9$;jN^zwz ziSTi6Z5z?C$#E%>ZN9Jq(z_T`vjsiwU(hn$c1M0CJ*At^J3eX_5z}dX0-dXR48J4# zd1ePLy2Wf|Abt+)?%#hs6ZP)N!5Q*(SmmA+;w^sr#wDj0Os8su+ zeC$=D(Ow!PndP;B`PQN@u;wE)SD?sM1XibLKX6BHB!mFEK#ezL*>yy;E*47b4{=wU2*@MRJ|%4)L-t&k zGNL1@hU>t18T|(T1M_34hA<6ymCfDw9nn8&XhC+w$2u;wx>?eNTAlM+63b{vGnCkH zpd)FW!*JPc)kx(IfMXU{?zkyd6O#`-%eBkp^(yUZbPxv__Rg`hH9Hwxdt2F7avED2 zTK%IC;=O>uW}t8Jo?dRq>YuQ4Vi6)$)(mm!^=v=O`^8hRRiqMj%X@Oc4n)pRZR1Or z4hoegg$hq8rbz|ayYu5qqPrpll}virCdZwe+1TQJ^dbgD`+uHF$^cHNe&=u#L6K+q z6ZUsFO>sWZ@Ee-tljXTJR|Cb!fP3Bw{vZh^5Aq*&kHi`{S+?_!%9+o%4^JpEJl@q6 z<$P!~k6lJ|&Tn+Yg;YyghAm_7bP2BXCL&xv;NeMd2vk@0adK>&aqQ=bW~d*G!@z(_fQcV6qttsxmSd{7C4PXg;dOv=*5KS9Kmnyf5` zo?ND7OK0?-1x-7F<*8B$B1!FQ8J$nU3|0#3MwzLfy#FKq#aeu%2Y#v4Ye>Sr%1Wt^ zT^56pm49Kr(7r>+g4cP)5Qot>8@Vf+YQ2LhJVZx4Q#D)U?&YqkM%N*bq~;eB5L+Nx z@m!aDsl|Hm3bY}dB6jqPSmLfMxMpxSDfmCa9fkb;WX>y^i)ci5J7R?OInwb@L5t^q zc7IYyP{m`|8?lai2NL)awi%;>b=jGWl_}{8n;PXE3!IfyGB#~h>q!9wwr3Hl$bF8)mS|NpO4>u8lq<*>?Cu2NP>j@v4gtNP&TL~>Z=k|O4i!|X(q zPAo~vVM{70tcW?xMh+uvRLpUk~iUu;W-kw%$H4-dl=$=^{v?b z{H^O=(uGc-5iX2w_oF5qWPo<4rBz&9YSau`^1b^$tuaB|dfmOd@(7HZt3mP}5B26yE2Qv%~wxEJl4^gSFU&r@0Q7 zq(*#$U=<(J77JyJP4rx7@5n9vmi-x-j#fwq}e!NYX}t6ACBXLn?|nvJT|T zC!20>^b_+N)pHZ}l~F7~qM!81|9#SBSukzk7%Xg~+1z%D0ag9KWj6H|u~umxC-n`` z8RLqlCQFGgz}XaN*WuBGT>28KZuv;9)n?XMd_rw^&0t8 z-*jM~oy3yDlDDUjMB09Ey^4-9K1Bk6T!$((ec$M&;U^tnyo^PxQS92hLVu-HUzDu2 zRK-x-C1d!RHyHW3g+({hg3*CEVN2>yWceKDz%>T{hTd<1oVqoARe{NoKeS)wYpi5j zDaN~FGl(~a-DFeMWU-aP>mCWfYav$&Hb7oj+vkz6WN&IdFUHoc(HQ)o+&#?^&yEjRE5Hf!Ugany7-dHN<0;ee~%M0 ze}Q z_(87LIxeii3>uKh1D)d7UYIS8gU-Z38C~oN^{cby>X^t4e81}rk-qXU!CG*07~Z#u zY8!p_2jDtx7vUpL{pqodWogS|ek=T|D=pHzsqrg3L^>TukOxMj!Pa4LyHyW0%dpRC zdKXYP1t%-e-%DZ4nn%VB2u#QX{;f`t$tr4Vcg;;%$h2r;zT*YtY5prLgRc^GBGbuC z#rw8c01wWr*{W@wvuWPoS2lIq|1z%0Pg+k{m zXxQJJVH4Qr$E|Dc|NdwlacyCteymH>#sCDw#Qc~ob?`#$=TX5p=w zS(B756o~Njh%fR-E5AWgqS$gV>jf%7tCPH8nf)@E-G>ZK#&QnfzSiwVWC@vdunfzGyX1we;3S+4RNBWa~Jz$|CK$8veF zCwCi-9PmEN>6%K>Lm^M^o^Z^W|4rlv{{kwH^A`_TUgB(tD_{p76kdr<6{;#vOgin4 zUpfBEW8Hop6KT3z^#}W8Z2@BsuTe7wwcuETs&?$GevK)hz8rf|#Tthk8Y;jo$A0tb zFKw-9Ma-8MkNw-g4}ACA7IvK;bi%mRX+8Bi@vCaYH6(sIiY|O5!~+dH8|hOOUqP&? zg`t@Xj_u5nqN>Xx4=3uEGnT>X*DN6PFeur42M!^z;Mz^C!@eW!VtZ+nXt6M z+Mc=Us4t2%UOI7;@%a_FM zgw8k5O~}`C3gvP1X}7kz)4Dnj<=_Ek|HCk=6(B<01qa&NOyl0gtJB12;tm>Gx>0fC zzI1y$s%G?Hm9fotcQIoS4yaj1>qNReA*cy~?ep63s}6AP=qmyJd#;6QwR(W~Xnuz- z?YmX-+{qN6R^R2R6%U_&{B#<3?#mvKq-v+3G{Bv^gYr~;PKQ5Do%F?SbuRas`Zjy0 zY2$UK4Lb@fnWi-o7 zqfclF^ITy)iMU=JK)yu;X8JyJemJy_kiE%ihp4*!yC>?&=q~}Fr8;y|9E2R?0G2M3rI1dc>y+hZXHM5)QCu$u&7D?U;3F-&~zevqhI5@!Kl zx9xK2Z}hQn0A~8l;x%h++RUX9VMooUj3>^3Jg42*U0n{70r4KNQma#hb97Qh76LV! zq4;6E1>xwAT%TyewuCvR=w01T<)CqW)?>MPnZM$bxUM*d=5c`#G__m)1d|YUXpT~8rH0>u{jCW2 zEy8p`xJ!_g<&g&BZ;KvW4|%@H{hdX^;Zo>qIXtYQkH_4ILmrBG;~Z;TmyD=^L?lT= zPSUz8RgtCt2R7V=cS1atkY6+K?aqU8RC&`|#4~#`FMFR~Pe+jgEHe zZ?02{ABQJ|Hv3hO7iiyXJ6;`L8E97c2>3~H#Yl+3FD$SDYW&=0U3Y)PqmTqnf$o;h z)=>E}AUdpm@K-?lymgN{E6Z<+(3)6JoDMjMHIEZk-*dorVkgm?Tr2>2`3fgOVi3n< zu25Ezmy7u(NKC~##T66<9fK?KB+UnN=NX;CKP$MoVb9^*HTfP!_q5N-!{zcA(T}QB z+_eRp-b=nOG}QePdqGVkwkhD?UdkmWgv#j+9DQuv${25Xjon_21i2I;{}xXfi@(xq z30Wq;c{mR&1lx#+GFh=$UG5>uE0W%CmJ8#+B{R-G+FUFt)4D&te_tW;0v!MFkxu zDl^ejSo^bm6OO?=d)xRF&>rCCg=E!u&E@)bIrqVIrs@u6oBFd5s5}s6h9R` z<7RxKA8#XV%lIg80`1x3aIjE$0ZosZb((293|u(YCk%=1oHj-)_DcQ6aMO^&d~mzO z%OXg7S?VJV3Kkj0;~^gkBZl2aZQ_5Dnqy!Ko6-zqYFLH_`9D=JS5GJbi08vedLV)d z`>wW6mw!p{`~DV9Osen!2oOf-RI2{Nm3G%CiwG@m1}g_v{|XXTdiH~*wY>eR6V$7! z{G;~*8*tj#!OqSsL=SCD*f7}I7*a?)uQ&BI^Ssj{a!dIa^0uQ^i>H8_UQfmyWbGR< zpVg<8qF-nIc6|J_g)|*Af}wX`#BX(4CNofMgVxViRmjq5S6O%4Lb{f+8_my}y?)5p z3*Ans#c?A2`m%gInEZ0x=$jz&fZ{%gTNp+u$pvddOB~Nv0%xtY*H5?I00Q9X(~H%8 z9-{u~`=l>MDYU}TnNP4|CzXml(Ymcah2s%J$Hk^9o1!ugPN#N>u5WQ06Kju2_UL!z zSV?EqePcu~w6jDO%bYMb+o*ko(W5RwvWdS&Xgkm)AmoFpdo!=z3AfQiFd}V{GgUM= zGWc;0PWAD?#@StF{Em3+rQgA3Pyx{VV~DBMyq{HU0KVNJiIb3FW~<9PP4**Ys+Lmi z!Sm;MGv*+otUE}djUW`8(B>K&LFGWpWTL|XPH4_n2cB<3#l-u=)ihLa-A5QBk!BtgiK$)>A5x_)oyjS_Aa@Tc~M z{5l6}rq(~lIKu~c=h+_-CI3>rGE7>U(_E<_UWlx&);jW-_H%_s5C0riYUOZKaNI=g z`FBKgIgYL=qaN;O5`#3N3!I-&!y@_ml4Z0#RUvY6pUiSOv>#vb3mmtV_S?JLXcWE{ zj`n__+qNVAfwlv=%FCbjgm3Cg3|MMpoODEstJMHO_7Xi?Dl1}9=TB6X?j z4pDp@2%4lR#I7FXg!%gC!%{#uEfHt!SC)%&aZSl$5`?2aY1_$#VdNJ4;wv<~J!#>L z=qFvQy1;c2_X2f1{v;vnKwba4M3+&ziOzSJ4|?LUY)*e{enfquQ9Y9H-~uVK0yT#G z6BA(G9t;N>4nZ=7iS*hPhz`WHxsmUOjs|@97p<019fdl9$P*Q&?xqq}yZ>_Zx=)>q zU5Jo-fqKhxbvQrI(%73VK09U6*Qt#jsFC8>`H?`F-5&R51zT@DGHB*RSsNq+TET|Y z8B18^c1Ij9K1>9GTB7C zQ$GF6-$yBnMSm;?4r|@(ON0R!NL% zpB{MQarZ4X%1jQuGw(#FaX0jvyr#ctf2nCq8Auz8pccpvjt|3uAr*ZQ!@y{85vf_R zu1(u{8Pisj%MUB5oJ?7Pq5YkVM@63JZiWoTo&~yqQ1K&M|Wu*rckw zCKcK)#`;ZdSqx3^5d-xcE0y^>W0*DnwTB2zo;rly8oe@9R!&p3iMD*UT4fewCPKiFgvi{ac`$vSp_VWKDlFq7o3n%c` zo_ZZ+uS68XKAI9G$AQgqe`EP}NQETA$9)M%_!lJ!bkYrkkgH0?zlUjOzcho|HL9Tz zum{s79Is;>ab)f#A++wptZZz*V#3b!q7>+#Xuhs+4!wdX+yTj)O|HPyT1fgFRk|u= zqv4EWirY}qlmqXveu;V9y`&*;qR?T+*nuJX))5>?vkJY2B})?`*;12w(yi3uDB;pT zoWNlIS6??8pFp~?ed;x&U2#mECOB6Sg*?pbI~dQtM%f)V9owy1DlFd&fco%8RF5G` zXd{Pz*yLrlb3q}7Rav_ zA~MPl_*H~?P`r)mhCTagctf3r^CSdG6Vs3J$OAHd>;p|=jS)8yvHXUfvI$5h_tUd; z>rmWf6rT)}SG#Fv2&9YJc&sIDtL$40IF^aB#TN29JVWXZC}1h$b&7YgaHti}=RWSv z%(pgNev!c|gL1X#wqRZaccOcs8l4-xzOi8ssr9r0a9}{m(`d9ygw2#k&=}|w)_Rrm z2Kd*=CAbyMS-Wv}^UW?&?`1L$v;+_ULsROcOb624`~ZkPB7#+6m2GER2&+4APsOA4!hF?>$zV zqVe%z#q-HQenbKf9!PwP2h$Cw)Q1;R>Y5jKh=4Kb<-Dphy(kyaz*stboo?-O;tF(M zof_k!2}4=RBPy{lN1;n$ZVK-{*Ggop6gV9@(Tjk#qpcNbUkwHjx#&>UDo~F!q;gV7 z@moLK85LlBK16fQDgL&6*zR7vbmQo%zN`i@M~w7_CV)TXJ>~egAcw=&(s+^v&eFQS zw7$S1^D7znrMS<}m=IxR9ECAA7?z3I?Y|>#+qQVhU48*;SU~g(^n^$Qq_VOM?=GfH)(6$Z2 ztVR92D=%PgR_pKJV$6aQ>herlKhPLC!XF(psma!Md7ZTD_8H2RjQbCpC)M*a7o);D zWR=BZ8NXDUaDjEZV-7X)E2}-$!zJ(uN{8&Kul#k!^QckZ$BoMW7)HnuX>&XBgh$JMbEKkm3ulNX5c0YK z+`@q$FVl?EX_cI`@iI};5PU-FD%_Rm~NEa&?4PO zPoaf*UZEekEipmz5B!I(#(KHNIIwyI;Kg>bj9((E5emO4RQG=Y-;n9UfZG9$9$J81IaKS#*j9|gH~}dOlQP)bo7&j){kD} zCVPlQyx5pIGf^n+2x}x78_IpI5?@gqw3FtF-teWfaprxB2A3&b6@b5=-4VG(Lb<|w zmN&nAmNY9zSmD0%f^_PDzep!6t6b$@97+?T?ZMpt^>i1_qH68Ug#}}OCBo;tOTxlZ z@i%qWc-$!?4*9Ob96`y5&vSLU!y1IDsy>ZAPDrx7XUM(4v235UIt?^{Sb!rB0$)!I zk^_UjQB>%ePje;CXBGl#PO@#sPmce(GmXdOc+G=aYFkmFP~I)7T`4gl!LnzVdK5ij zd@?J@Q~o|?@xQ?d&b7S{2@e(s5*Fj9y@)?f#Nnxbp;H!Hn-NUGotpRvdTw`dm zax#TUV3LLVUvY-m;5gAz@v-?b*E2fNNx73;clsW-Da5T++izG8F+*SMXKwE&ei5&O zs11~OHFn~@{%o8gKfv~J3@4Y3uaf3OxQ>8dV{>)C;(o@*EYvbk_i^jPQB4O+Drb!M z$N>o5m$=u`tFGSpbSkR^;e;hOtP^#hY_L^>UaFJ!MdnH;5qHv z(&I*!<~2)(3$1_n1XT=VHNu{gum47q12)*iCnDZ~xWXoWtQ9X1`9ncTXX31MXSME_ zZ7S3?yf$Gbk5i5M(rmtjbC|5v;m?LuXrDPJXd2;lDav zK(elrJP7WP8zPZq3&wW){l``|48LzJ9zZn@AhJEPKbp= zPLq$-k1zsMer>VpZgb!^;McSPZ8`H#W*M!Oo9ndrUd}^ZtfjNgdEBfysl6ulE;V=j z?(5)d!yfzvxX*E4JtEV>#0t-+&JX=rib`TfRcyOqRNiQG91g)KjODR){0k+oqF#T$ zt)L1Is*-&B$7e542T{L)5#t8QA$|bH9su5A-rbX>4l9nFZnlC z8MF}=ejmFMiHmKplvpByQ_XWp^3(F|lS0Sm64=;Y!SpnR@7im_ zp)^-r>6ih=!ATl1W+(Ic&3qwK8g_@w1ivVpIy^!om0n{=2;(Or(Z48S+V0XHS}#5H z_q$t7pXFRQ&hx*!wPTZNNVZ)3Ec z=T~za442nD(j;O0>}0Lc!-bY# zr;eRX+^#}C7afsjqLSI#JMz~%kFs*){+q%^gVvev28oL1$3B?9oXxOork?gc+{^Lf zSJ3Ka`zPtKf2yzI1Vp3F7?*4#Rz2^Yu-6(oF+)ZzIZ+53o#xDY zm}2m2DIl5@EW3l6g&)KpjXvYwgIz`(zkzaCzE!m$<`mJT7In3j3AvoI)xPGqXjSg_ z#zJm&bf6{WD1RHYxM%mnLpoCFMMgnU?8arMAuV{r{Tu^g+1v`1P9mF)eKKU=hjU)G zHttc}@$p-XPX0A3t<`u`wD|Y_ld;%-a;{}`|A=uvNK_|5 z_=&qcoxH;zs-NU-h7?bn5bcyWT)h`C(YD=7bP6U-vhaAXFk6V{rekdJm}EU1Iu&|R z9+vHAM3+f#l6WJSYvEeiwxT(2JqARDUW3z zt)0aNvazQ{A#KQ`i{9lMTX)e~q1FD66lEpl;E%s&XHJKMAIX}CDfUZd_ADjNjwkz} z?uGf=(mqGz4S}eQ9BP{i>*CfdbsDeZps{Y1zN8Aq)*{GMGXLr=# zRsTWd>wV18J8@)Ome5rw056)g8lTjj5p0rYIzrKpKRyrrl4zZ1%q8`%ftHz`&)Xmb zEk2*WG+d-_=re$5WSt1GSmSllun}9JD6p%bffuqhL+8-9#{hrDoud9=OlcBGI{C>7 z3T$)JH=6{)fgdiK8{Xi-W-`ds`W2 z@kJj#%7!sIkEh+mN zB7v%)`%3~I%Aw_gc(s}8J8yY>%;%nK?t9%gslLj(@2#O2z@I{2M#<_pC&sp!vpXC- zl0CMt4XEyTwCaQcj=6R93j~VZ77p;WU@i@svic@_O%R)5^`wlF)&gyzIzg8r2{Oxf zY_`R1opQg#vow*MTsYnb>&6-I#?8J$D=VQ)1I@EB!pO&fWL}rg7LyXc7yMn;-iR@?{tR zQ`SSxkm(mnMov!3RV=?VhLC_Pw1*~g=EtN{W4~o2@frQCqzg#rg_a>H>2-Rc%wu1Rm{H9j?trRZ}MF zvwh+eO>hu(Hg^*@=)zQ^>0e@obhpOJ2;6*&S2J-GzBj3bP7uVcR$r`#0Z-9Y<&c`G%M!XOh(6JOkT}2&eGgLI7aU6k1NvZ zcSqvtA9e*Phx5#d5!lU+OEE3fWSE8(=>cb8D$Q=BiT562zuY*1B6dJr!SOe?_PHX{ zy7_tgs|=W%Xl*te8|yez%y{QFpCz!?ayRTyR|JE1uI+)ukvht$!|0bGGez_v_qnSO z#cFPdgBLwr=daj+#kx)Q=&4AN&ASsh{Y&e;U$C;#c5MYh$_kJndZ|Y$#!eQD?Z$)7 z5yinG^9WWSn9;1w6jC$vzKkY`7Rm=*d2Hw4Z17x{IB!X(JwpGJiu*XT*6p$X4@FrY zDOO?-c@`>^o|$~$mx%gF!R86y#=fG>1N>~6mLG`uNbD}zVsmjomKvIkS>unqgmuEN z{MW%wtaDSH`tkrVuZ_`vitIUVM|6+dYUMZj3(j0o?{sr*B@dBKf+v&l25fJu2!IJ3mfa2O*Mu?f_+Zgl~)!j1R!K@j(y)0Ezzpcf$wl1^Dp6~ zayCy!?2x&!@oEhHXWa+rb|t+oIpyTToW*=n1Qj5Nw>*9QzD}JXa-XGePJ`2<)9KPz z=Y0;MTY)Dn>pW~xdllmjSrItgLXE8eucqVYW}1_EH*P;o0rduXRp!R4@x zs@t?GVURWz^nGFDGgbMJF(ZXTv#slS2ZkvN0(BLVxP$6N=?>oeC;qQdcQEP|R+$po zKgcBt&H0AHA>*0thvtT-L9ma9ow6icYLb2pE-%{S0=jE#pu?U)8;ymTREwTCgZ@UT z{Sw!TYfK|ud$Tw+S7&JSyZBc^{Xp1=+|utgr<$s0S0vK+B77ONZ#te6i#)30tav-i z?DB{a#wa?z7FZDNg&Q>fJ!D^^#CC&@DHECfxiJDACSYE_UWEOnSrb_?8|<%3T&oAi z`-{KWMO$`{tpu06S5oRhs%ggo*lIL>Befo!79F|?8r-mYB0g=~1!#b)uIGsv6JTHF z3-RO^=kb5^d(4>El*-oS2_fDry5uf2CXDaANq1d`=8iuRIS33ELeI5CsV%Xy>N+C# z1AOl~SVLW~E6`0STtod>nm*mV>jzPwqvfde4;HVc94DP|yxr)Nki}KZt;1ZbNFx!` zq97+nQG;zZ9sI+il1TTo+8B)!3t)?-9+FS}JyCu*Z@Hl%L$0TipB|&vL~Z@Y)T@)Y zM!QtYf8Au!xtYduJkB+k(TOyt9RcljO;*FNBH*)^q^goG*+=Y%I1rYKyCMJr9x=Xp zFnpj8W8wG2iJD?uNl(r)5Z9O+-yR#dYzp^jtWm~G1}RPM?UI~ve-BdSeI@163E3d& z{fuROv(nY_N`Gcd4$LZShi>n<&^#ZUH!Qm%R$ZPcCHa>&N}r)Df(CSK-oA!}TgMV^({I+$eA8oiWI$}TECB3QgcxktRDHZ4KZse7jdJjnk68mCSjP5?_o zEZZ{pg<~c*Lf$AKC#+_6uai+kid<8iyEaY7KUsrlve*SUrjT9KOH7RPB#8Fw#Ogw# z-*+@nuJja~n+(Dyz?l?M2AuipX{H2j$Ls~djjKwgw@f}IdNX&XZOqokztGon3pKUO zKZ?>rWdbs7oa$sHUMd$Y9VsH6PxC&IsA=(LdM?NpfBnbK|sZ&G-yA<&L8+ubjq`9iY+vT|_%DS1Z{_qwF|Ev1IeG z9!h~5*teMQKYX6v54}d+$?3+}!)QrfeWdjhu~#H6Db}S~m~btG1~gOE3h45>NG$;t z7jk|!NAoZg7(mIUj#>UBURU0Q#gS6x2f7jRQsOxDI4~gx)DF)tSEfFWxc87Ft!iE2 zm4bNe6xC>n`=8#4Gzj}72hQ;oppc4Gzq3js`lWUM=JvK}+J$XKOJGM2%K8_(%@D_( zZwBzzn_i5auuWxo*FM%jxGHbigMgn3r?dpeNdTxTc!X08g11!iZlTo3LF1`*+fQ15{meh{`g40O85m!Q*>(H8Z}Szmat-8 zOCIKRg8w(re=#dd253C*)fo`(=h|p|Z6Sp%=ux!QLh1vtQ6c#Mp5y zo8VXpeoT@=;1-%h9!98-@Sf15)LE%mt+j=f0|Q9R`y)8H%u0H<0@7sJsfhpbAZ-jE zMV=~>3?+bCVeMLhvGa5dzBNF^>RLa;@QaT45(qDChq%t!u#ZD^$ul9BNXua$HOw1; zU49~G_?R!rm+Agu?I-*QdYOQqUxO2jRRpvnx0mM2y3()d9vX4)$+){%$lcTxHIeRI zv1|HTuZ#YN1!aAmlfCe9Ve6%wOg?IFxajmm5Z7G4XKPZ$jWF%DYFprMs`d&XQ*Vl_47FtSBP=3j zp@vZ&M%{*6#j3ux2Rq;HHFL`bs@y-x@lzCV3vmQV5*82r*5imbh@{V?2T8mQJPIEn zEI9v^7!IuOwi0g9>=Rc_>;6_u_`s5I6n^?nk9x0B^0<1_LonZpxV|nLf{+?uRxZ4m zNBU2{^?Yi2y9QCGrQVoIuxdOU{xyPHdl22YC$TUa6b9^JHNvOjdP3Fj=YmpkI*c?= z6&bNmJ%rE5%zBO)uvsm7L_Dgti*Wc=8=B35KczB}7-ukpeUeE4O6D5eb}zIxhiR+F!Du}`GNI*4kvNAP)8{5&5P zW&W3FUHnjt<`+nDPsD37Z>#zkRjIn@m5wQc|1FEVtrqT39N;B6pN0Os^c|e1k9L~; znRiYFCDi)25$Uj*lAZKEEKpku77e)rf$DG$_dM1F@4RZ5dV@$_mp>7k3s}G;Gj6!H<#&Ko*4lq;VfQ2mTP+~|37Y5pXz0rTwqCfCB{Ea= z=X$)6-~!3TKIs1r8cyf9{FHI|u+8#^FrKM21D&WYA6{)nVxleh&U-bRK&#=dlmf|* ztME|t_s+x;>G6i8;V0MzqF4Nybmj`oa<$XqFwCr~);8wF#sRMlWDW1RaW#*@n^o>* z6u7~fzBnK%XTL9yKMVfJn>fz_n-$Fu3s(UZxuLGI>PT##N$g9=5?F^d1oQtB&wio( zj4#S0yzF*GDeN|6?Y)-hwSKW1;;B(0+Vlqval|L)G2EwsX$*R(6)vA2DQ|{7FT8nX zYOA(DD*MvrVhc@Dx&;Le_W{kHY0+5}R|rj&wV#)w77tIwmN4~1)%(~1?Jo;}p-!38 zSLf9cUk9EQdp`F9QG+r>zo$Yt460i@DmAx&k}0ymttq%~KG!_eAYe8+j=<-i+F#>U zA-5i{+aoX)xz&x6AAoXX-N=<$>A2U5I2D~Wb5bN@p6;%iJ~j+8w8vo(EfegNZiq~BrZO@NW8t~p>`R{ z7}=5SJf2US75`$VYyNNNX8}}XD#BL}vko?r5BRlZ1Y+Ec^ze@*!;?d1^c6$t#;k>n z@xy*&v_k9^ES=g^S5WG#I%zgCD+{g2_)U_m{%R=h@wUdgjeP@^>du&6He#j2o`X!Q z>Jf%&k*8qHiccseN*x3Uf=jivc=sVZ;GJ<4VY_IvdN*<; zSfQ4iLAw{7Wq-60x~}tA!B~Gb9|l(pg+a!15}4{>?W{(}Za|`KF ze4gUNLae*|kes+>x8>bp4NCu*9?7rG3G~Y$U1F5)O%c3^sKR+Xg@4zTX)>vYihH2P zaCBh?t_ge2HNwBaDQkw!5+j%B75BEElG24Cki#USWpspL z9A<}lxBc?cpjBA^wb(ZYotB3`tt&huYYPaj5|k=yIi8UEtn@EUg*fPv#eAq4-tLf5 z^})zhvZ%ZcqP;mQh`mcvjVy>9B7KJb?sOo{>YI;$m@9-Az^jl;<8f|tMrgCed|$8Z zxUtM;a*w*C)Khy-f+)@q79PAxWnzmh75de^>9@8Qdi;J8(=fl!COi~&R*Lv24&ZW8qssr!KNu2ag&4^0tv)BRN3c8$O z3}iUJ^8K32+~+84djby9y#+QMUTR5S-;O>)n0?asyNj6%{Vs_8pM2e5Dk}r~pvE=# z_a|qkxw|^GnKtCfcxC>Rf+KYphgFM;xj-5(g@Nyh>l$uJ+fIXzZ|3$y_Of4`#~Tgp z*6joi-9tOfrR*2CeRxv)5jPqbW6~cSBE2y6D||kL;_3_JZ_gH10aDh$>ve7$u|*A? z^ELGPed5gv_w;q0XxEVF_n$~VC<0mBzjIrpUUHv7PNLD|4XR+ve5~B=oyTi})VzOR zD4F1++J{b=UnyUTVRi>xn{C-*e+!BZqE^Nr`hYWCiC%{9-N9&MwXfC?bW>HXxHQ*I z(GPWqe%RzOEv1P0I?9DW&MTNV#uMH|k%^Bd0@~NQ>=%EWFc2K6UoEP>eynAFd>kL7 z<=--0SW`dd^5V-IyZExbi2DnFq465+8}mTe5IO4A0TrhB9sNk1;%D|mY`s-l?VrI4 zP@QCV1T2fGb$fx^H0A`&qm5+=>@(hM4}UiGc48$fC^~oc#nYFP3OH>n{!@tZ86&^V zB%7C>jp)RsRk_WaB<4ktO@B8Pj3XuLJXHACh0VAuHZ_K7tDO`{i@{)UV{Eln(%;lf(s#MHv2lyL9l5Yo;bY>n;E^ zo?c*Ws`QcX=GI4e1^wgyfzUVi!$)*572}P4QW3XPMT*zm{v?Bn-wxz!`{@>_qk@**b?M&Zh`IqRHpoW=y&wzSDS&c9MO5 z>cPab7{#~exFWA)(1O`7)wQ_>unBcp?GAOt#8&20g*nObeb))O#N|KyV*I?30RlwO zrd$zMC@G&Te>lf(BgjxMIpx4sh5XWS1SiAeJW}^a_mz-9X{~%(a+Kn0sn}I`3LF#{ z=P8P=<1U<-E~xaPIPp=CWMP%wsFjzcl#6s04_|SFUoe4rxK?ar+N&rCP%jkGrXw4` z*I_rYDLi&#);mBWZnkFeCoMKBFI$%GGLnO2YSD~>@C)K~&tqT4Zy>oHV0Fe<$!wvd z9I<)_wbB!^O&+9nvHoNKFw4K&c!{N(=2Zt#$$2JA<~A={JRf^y#O6YKW=ieBy|5SX z3So^I&dyVksPnrRIUjTWB_~Q3f^`_)U08g2=qdN+3w5+(PT{W@OX(K%eOUeU&suJL zDg_VKY#O!{g&*{=)l?H~MNfTQ{Qqrn5w6EBpG&YB4`-^T+~?Y#wzaP_4cn>qFXxZC z+El*C!ppj9Veb|Tz{`kr+t9}@BTqEccCBTdwSXBc&Txu4I1!7u%P@A9Qvvw~+}E{- zzXJA0T)iUi=)$nlyf@L;4O=f5j>M^@g2PZ$sp%@Vn0QpQIbBZ#L};)Fo}5s?w!*t1ieWd%+uI z0!<5C#ociKTHQsK*=eA3uy2A}&NaZM3ki%mCr`zdw>Z{{xmNN6|Hs<9!W57Gw?{}2 zX-I`iYQ-xLW(2(#-|sQ+N_nRnFP&?w!8Dob1K&0jfW z6WoNFS|Cbhc8)n9zG|L8v|gFaSE?>cjo<|ZX^{;4mZ28(dZJ7Amln;=a>4-pi$QX^!^1PhCvbMtlbWNoEt>7Ef z+an{5<9M+Btxw{c*efb$`fvZhr} z8#1lzjLWr=KA5|sTVnbjyS^bo;u8vQTbq{UYpuFmK(B0km-V?HDwfE4yLGcpfMF9M$i*#xS?nos<0pZuv7M?g0jULvdC*7wDg3!;oefKs}@Ax+p zFM+@MsU703dosnUFE4p7g(34iIPmy=9kC7li6@E?kcZZe=Ha(pU^K>X9LN>dq=AV; zs0Gid)9fzH16mm*?!P%ucozfrtY$x8LL*!bdv;kfcFk20DvWOEzp%tSXJ>m)xwPZlA0iUL45d&g*L+7;iU668)Tpx27) z%5*T2wL!mf{*36;GVz*g=w!vO)FXP~1;2Y_>@7)tg|*N+^{wAaUy`0>I7u%xx5J8~ z7hgbAGzEl(FA74D07(Wu0VGU7N4?@FA@*K_aN=)Bl^V-k3 zLeqJ?E#~9m0eJCLM3({M6(yNs=;yE-P<55rCC<+yyu(@VUtJo!<%V&p|mi2N>0;Z*K1 z)s|=6%sW;0*MeN2GVc!>-!ggl$h~1~d&oab>Djhjmo~9n)n@4Wor^+=x4kGB8}e5A zG+K+U*7s^rk~_g<3|aH=lGg3nFoXXF(?kIUAK}8s()a%50$~TF7nm zBW^#o-_6V5R^v%i;qbHF{Hn6zNN@0k1A08{Kv+hSi}ZO6o)?UDx|3Je|6{K`}RxzA4%up&vfJd|GR`rS)~%Obym4q<*qqycd6Xn z%Ia2B4y#luF=xgO4wa-VDLTxiR4TEMoMt02$4xnhZ4AS(6FVP%`+R?Y!rt4i_v5;* z*Y$j!4=EMPYQ6p><;5+h+(q=3izQVq+|%?yR3^LyGmg%qy$zrOxv6RAcw^SR;5gR1 zooA70XscNpb@NA?ii-jR-XE}Qm_Nh2ejI$4hfqBv2Br5pYcWl7uEtev?wV-3a9S<( zo=T3ye=tke0-X#go^@wb-B3dM?H3WPxd$sFz?`48SJMu-P0HZcpu$|t2eWJ%E|wOl zofUlUkeU1>uQp?w1U8ae6uV199(#n+|GaUt>EmJju$$hun^*eJtkTA2|0!SBlfJ6M zUnOl?5!z-B#Nai_%qJ6vU{wM+;)?Mo3+z*h%-g6y@_%$X;f5O;AU#d53Q*9kbwn=k zvFB?Q4UkfbE!~B{d`2jMSWogb(rdm@C5_vOZ4zlqf-Wa4USU;IjOw=2R%^V{wH3`c z@=HaDjq{yA9DeNtWf0e&{fBAgNzI2x$8%}AEy~vyp}{19$*AtNL_HFnkeF94xl#sZ z8WOvF$9xSjEHK~oA`ecRTFjG0PdBy6ipgJKU@5WmnSKiT1UcKY!2SVhAxai67w4dl z_f~co-)vB_9tFnctgf+>dW{$>}s>t$djXK>{S!Uuf zE55rJaTx?Zqh)t?1J^-A|6Vx6VP19y@lQ>GFliH-i^Fq_(e1ZlrbRaK3(v)F{e?QL zG)--bfLnPM+dq*cQ1c^3nE~*$#0P{+w7xSB%x=VKIezMF4)PIF_EWK>5I0Hu4iC1P z-4^1BOZ+%~4}5L=0D$`{;{XkQ2m2;X=Qoa=H6vO2OHB}7)GSM&JH82H16v}g-ou=m zv8?fWJck?hZa%DIm6&E}K9NdAL7j!-+T5} z8#UDeH>{@|b~tXpX6nIoYgrbEg7{qS1IfVneJ;Zf7r=hZ3;9#;CZ)j65NM^I@o*b9 zrS_c)Xz=a611t4G9u>kBH;UZ0aA%+Pp{`=r ztCOu?xONSNKs!`QA*ki>L8lv=aY9zef!}Ccn<8l zl4i9m!|e}h56VNtKY;Oblb#oDQQjN}h>?xppX?S_cfsVcz@mt6-zzJP<_X5;@eP^n z&An@=qp;BN%fJ$(4L;cp))50H*6j5bvo*l4d4mn5qw_NO^bLXp*|o_a7zn zgl;hJ8g}x-tOp`bG9`DpfV_#>&OOzGZM4}2`UOzw6yv>DbST_EQoe%_mV(c1^bJQ0 zR$3&GMEXh>+IP#zTn%EK$$Pn2{`#OBMG>bAqRi<>GPB^=x48~vQT;LH(0(K7ZO_V7 zNu2mWOfHLvMV@gBmj>@|Xzi}n83zZDd4Un+tXzAh{@!rAZUB%?s~4@ewY!*!OSA$$ z&aeW9C9YH^pZHFbH$k*x3LvKEF9rAwydIvUG~w`akL+_~gDYBXJKVmxUdDDu(CalU zNiGglB)JX>!B%NmI2*4aY-7d=@Nw+==HjMTW;(874ZNiWyT_$y`eg9*5_P2pUy^x6 zZ;!%=A#d$#CjJ^<)?bmFsjpz8%77ljo5leBkanwpXzLj7&eK+W=4YDt*~!=8+{WFe zoOy<y1=~?CH!5YwG^j{3XVH~OT81NYiSgg=F^Q7cSze_l3#Eqi6 z%rJS%-8K}rU-Dk;5I~4%fmfX2q$Vy6h{Mjc@04)_wZXzWn(LckH^i39adJwiafq*t zUX9I=xP-SSyyiHxQTz~s&X~^ z*!z>&BV%#WyK3#=RD(2w{fCh2N^co~rk&gV<)BtzLCe#XEIuXx)CF7xK`<$J0Js7h zg$EOU-g=#D%OjTIf)40FtC1C+ZxN~s0|N2FaBXk}Z1R;q{uOrqd>O&FlG}!2+iDAP z=+eDSTOENdkyg>By6>qDfSD<~ckH-!3EY8HS&rnS2j}jkr%|8K(ihnl&b4pFqu#z| zj)NzeNV`CWpz?+9udmTV2YKNiW^YmBD0kk{sYp_8;MX>LMAb8tNXwNzL>6v$hJ z{aesTX|YYUrlxx);RR>FuPg)WCf@s)Q2ozwk=D8`^mlH!D@Db5=F!vnUDW~l zU|QE&H9f#%qf5)>2JZN|mlpA(>Ys^UzyxE_TDh5-FJ}5g@kVG`#HcU1J$}6LMqHTL z4#0oLv1iA_r8*mDfOxE*^Qvld`_@eEVfzEPOLY<6kYkc9C6S~4SI73k*(F>7K8H3) z9Q8{|rRVJFukc%d2=~iJ9g*FZKV^smpRM{ux#in=;eij`^B$k_vh9bs3%_3JGj_cw z$V8Z-pf>IHRoP?WTj34ZhXBf0WxZxQ{IO=P%A-Sr7;6irGOTflJmN?*eZJpM)vV1IaI63=P$$MuQFq7 z&W~|o4j(PXC%VIAMrR@sS!t8Yp?SEc3B|c5^p}(0aw63{c$F3Me%pT`v-*5nLc^;8`1?h$F0efcX3HJ3IaarNERkda{KgVFpad5O zS8#$u0o1*kM<3K0>zRmP8oZ@Sco?)+TLIfEe<|4YfaDx@yV`W2VQzfe*c<(8qPo@0 z*-PaV>OTlV)xR(n(S;EMPJL!9s^Xp6$3LbaW3)ey{esDb zt-xzE&3NJ8c}r%mmW#Ef&rrk96ECSLpE<86^i#kjMH^~WkG28Q$hIfO>K)b}^CX21 zNb!{@Qv$zMn1Rw&P-rLalHPSzOiN2SA6fjX7`HY-eOwEBmiMyJFEm@=tP=ZA?TuWn zMYX&eM}8VLzX&z}VBA$#qjVd7J?d4YokLX3TBYE^q374F%*mm}FA45VejBMsG z#X3|9t+)rV8oCkKcMIGSC(nA!d~3v<>-%YsSgYNTQ&_tFH&~XNE-Qrzn>XO{8^dl_ zoFd*<6e=jpHVo~!25jbbq84-2&E-DpQLkf#`nm78I)5`qHetSpByxzb{(6z6eJAU5 z{#tVI_mA{aV)dh_vb?H~_^e>DxbAJd-Bf#4QI6!+)PD;`t96g1rw50W{~~LjN`qd{ z^6Z>^IU4v2W>Z01#BYVg!&XYZZkV}xgKWz6qFFY)Z` zWJUZ}TR2$dvQNl~kLEe{U!8Afook)XYh7_LQi48gE?;~aLU|2irjiO zdaru8WvV^TxKG&OHUVXtr>vx$razWnQmYAU8c6}1-GaKuem%cjVQ-u{9UrK+*4uuc z;vXmesFBZS&KksD5p*^Bo}~?9VV<_oo&-$4z=6^`Z=+7)1`D&JSO_7uSj+y1+NI?_ zY{RM&+gt0+6uH467=leg2-b$3tGU?5mv#b0=b4{1V-VikY?w=M$m#*@C!j6?YG(~q2t6Gzmkl7)=ou0uFmn z#}IRCi|nOUKZab8$IWCHA5X#bw*jHvis*gs+Gp_YXx z7xF;`34~|wlA1QZC{EzjcS3f0B*Q+K7z5qe5$Sr=GYFQaB>?KO=;Bq_IP4Lwn*gku zR!)z@qA);lYg8CF5;wdkyCzHw%#*U(x_^4rU*WR94G3g^LoHN;?3ny=?l>Z z?|Jl|)TQ6s@_us$zXxS|kFy{v#V+Xj=uUjCRItf|-R88f+sHJ;K(w;aza8qyOds(mWom&a1Fq324Nik6{? z&?NYEfALaPZ7k~Fu=_aUg)im2h>x6C>c`}(M?h!Q^rutjk_;Fm%(6gcrvPTvcMGr{ z^rh*kPXl0pcU3{0@DZPHw8^JG?#pVWr#1RgP{%UC< zoOnL77OsoT>(g-sbWAlUal*donk>SrC^?Jn94-D#O;;2)E0{f+&+{YT~U z5`#tbSmEnRVcgI%unPhbQC2^Su%C{1OS_I3EPT(xOVd#zpwxCO0QcM+n@y8V0E=uH3yow+5RzxAGKlZ7-=p+b-_awChpa*eklosjks1Um@uVWzN7@cf-M6w8H(Z z?;mUxMK*U1>%kgONiZ%|rFFdgAHf2>T($a>zp*6N6|ndi=4Dl*yi3|ijXB)0doJ@eObpB2^~y zF9!Z4)|F-Qzmu`2lySlh@y(I8XpUEkw;+hQ5Tb(0LOm2`oHMQ6Mk)>;ZH|I^us!?j zKxWnNgGrHfoV&58IenGbpKwDOk}&n-$%U@L2AJwkmdKgirAa|H17h5o9=VV+`J>^1 zxV1#P^~l()m>bg}&k)Wuti zCw{kDbUnH`N^X@YuO4H3Y}#pwuZEh%RDS?1SFeSa{`lgPcw_d=H$(w>aA){E>qa3{ zJ%Fzs;h{R|F%|mnv`LD125Ui(giT?uCh7{_fNt32KF7X+p`r}PpBrfk3ZQXO>*!^u(IvX<8^>;b%RbKT5X8A!FY zZx;Fk(wR<_^geX((D=;ARp1T4SYI z@IJ@*h^uPXLw#zzPoS4XG~OEt4V&2scyXFhU~N_R95tZh%YIcp41ABij`745_5Nnf zx!!oBJ3GHZxj}t_0GLR8=DK2T{C?#d*f{iv$4T@1{T$-#ZpzYtL70L(VBVKX8nkYM zuTUe_C-Kx><<_<+#i|jT$rHnHZ`dvYGerC8{{vf5il8tJd42_g+TU8dk(wQ!`%?4L zHM0a}45D~UdAsO7a^J=^fYhE3m?K_>Il=a>z`ECX13t$`$383y@RxHZiLbVYzg0pq)F=xI#Xjod~xR-|W$L?~M5Ed%V zIsHMEB(0rLzDyW{2;AOhq;hoODuy6V&9Hh$B+;25cBB~A#UP$8ac|m8t zV@CtGsZjqRzbl?{WO?QbUJWjykD1#W0I3?6^J*!nEKGHCVMXJGZgc+^F~dNNsSx6V z+S%t)OaLUPcq#PYyW|SQsm9B;khorXU8Y0s*40mn%`g5VH>+E>IFG_>DuJK#(4zXD zNOkl4+1Ekis}-?WIyS{Bv~~;cSI^z_fT-_ZeA=ObLxpN0#?bVY^YDu}oeTx88PEJ5 z)0Ni!zwQ5kF~_WfkvO~wHA}{2z7m5^xo(TyFqN~| zIh9qCg^ui>nQ)Xp%ZC&xWPe2rDloBX$i3Y_1LgUl{(v%&fta=N0ptg`V57~jywnhy zuR~XQ?gX{vSTa!$S=K&O9YO5jYF@86($|Zd&LrucM+JZ*9u8z3YO85ApH)O*dj!~^ z(o9|>bVP`ls1do3K}Q~15mF=aEO_akrZYOp5aEj$P?7r5Fl}pF>d|33`P*(b2(;`T z>#3_%CpB0XyMjIJVy#6DmFX-}d}p7caVO+^=|TUiF{)luECzPpVGASrR$r>R)zSPK zXpmMo&Tdu=v3>}?A%L*Yy@8KKt$^iNz(pKh)2&S(H{italDA{;u-#zdT+$Xd2se^# zZm16%dj!KCo3sF;YQ(mb?drR3Mm2v>kh>L&-zjN%OK4W}CbCtq?me$^`GjK4^*o&S zu$2;{3?IM zM{y~E#x%W}62=R`vAd6uh}opA@%JUvEhv--5(v@2ENPD(>C-sptTYjR6~A}EJ)6_0 zspO6A%Xa1YQsIG>L4dFdl>T7vMQrq?OM&5ab(%8M?mhFy>J;vdJVet1_6EL;y}2LU ze-~k-Hj@syEJJo7RJluyOAt5fZgL@8T=s`6?`r(!I`ny-YtF~fdl&AjjfQ!TqD?z$ zCrKTypJh%nJw8LmZs}9^7n{58uEw6ujEa*cYnm8NJ}6V@Zbc(nk@qIF0<8hS9&=4$e=_AaW6$8j! z%NTjc)!OvM%xUiu$Vf~g)?X3$%k7Yc6n$Y(=Vl?`j&ttJ_S7sDdjG3Z8uJ3-m`P=Wh^5lVnc)_w!kG{JB;{jqXom}L& z6;r4^(ReYGTyH1D_|qZ-&AwNPkxd;2*gLMs3P&tM_k6J(`@tnHqPZ}06DoHTNgW&{ zrgS?`u&p6?wPp#GA7=dit89!JC9P%E`A0V`MBEdbS#PU30z52sSxWj*@CEKGwWUSX z<&SJbg4O!J)N6=A;!^>BC53zh^Gcnmo|9(_j*sr&vwWGGbo%B{gLP!d=p)6jH54h+ zpCR}ZJr$5XCXSdwM=MR+G>H68M@Bb9l<5Vrm~KJs!LxU}j`hW5i~pv2kheHJQ5JktjqHN`!{#?5)@h!O zGyBCL`bhEmc-R5E$j6$qT|*=GI5#8) z-Ih)9lMK{nx@@YnwKCLgnjV8Vn<4c0VUdko2_td2zWW1Jr(l~iK!4N9(KD<_xCI`Q zS3=n^K9MI$_eu1beH*peICgaJgyTTyD5Ag3S?gHd%OJr%=g45Ybw!#Z$i#Of0lkDJ z-^x2S`-#(^nhNUbhoE*&g6W~=y16PJb+#RTEUit__zZn|PJFGvBhqSiw}`11!wy@c zu}7{^8ke$J`1d98O5AQ*loxq%HwnIBx&P3okTw%4^A@~; z{|wqqOSAxC`tW)zfFK>xM4tUwzxevQ-t6o86+J(AX`<8!ZtA6*(oMJU%Yg4c7Hp_O z99e9>AL4XQk2qh-f?gQ(7~4SsR8S5G*BblGn}@T#qmwn%-OG*9D|I^GpL##kJBPLX zF-0TD`6fnf;W=yl{Z0J!aSAcjc|KUZa)Q$7Yl$<1h;r)@r%3(tRGKvwRo-$fv)~wB zes$gTVM|8~>*`qOqJb{WYEk0e&Ze)-{1P(11JmNI7!jTwf=FBAuilUrljSU8N)#@x zaMte>@VX-HO8+ZsrNnRWU}ZH`O-vwTY31QDYJZ}MV)&K{fF^CBUM}w7XH;LJ#$|Fj zz(LRee+j8aE=n`sLsuPA7>MK8VEG9-WZs2$=x@=#(zF!7#eH)@)5!WSJ|Sp@*<5#4 z`!fv+(Xe^eMKlJYzVCgaBRFmsNQTAddOU#v4TzpsGR}hYH1@3zFcx)N%W9MUS*Ezr#&M`Fi==( z_NToDj2)b+wKg|(+o4^J+c!DtdYl2dOEq?R!QbjEsNnB!Bom%KVuhVA3V_J1>cl8< z+;0b6DQSMHv7AWp<&tJ?Mj#7wk2u@=Ln#NiwZP8piZJ223H2@QfPCjA*=Ge3oR^C8 zKy}Wt#~h!}|KhzNjU3U2^acF}x2XS=M$jutJ}lbpt(m3}e+1QdZA0BJ?jJc5Yfc_Q zg_v8Orlx@!JV$Gjy~iEIOV{Wp_FO@haaSWEJLU;4+11FrRj?TqLKbdr2`HhDjMMcN zECq26e4N%%&??wV%em;bRg(QJsDC_@_6|mmD~I)u`~NfnZa(S~qKH%*8$acm#YG-d z8khLf(@+Jl7@{k~z|n~KyCG|_woAY#`shQz7Wn#@aV zf2T4rt?F+k0VXKsK{(@QT){~ym}4`y?uj4X%>Q>-y@Vi46C&QbnA~lcB1)}CSMz1{ ze)0mlor)ZdHh#g0Vf%BiW(&baR%Qfv&-D$~Xa8KT9Cd;-Ch~g*^%*25cGd-RVLEZ* zzv;UGkgwFQh;N8-MhL8J(O*3n)f;nLpo54tT9Ie6FGqkDd4{Cf8~B>squLF%#t;dM z6uthVef$LKAgwanVb=*_61fn%lD|uEDE^MpPufi{Ne&P%>^^YM2=-TacQFBBmua9i zaU*FOgauncK6Wgw`2`#8IW*f|_l&>&2rr*3P4&>F;daD##WW=8gQZro&-Y+k0A}2( z`JcO9yZ3F@IVROZv*}MT1BrAW74KQQsQEbaz8l6sU0O|lrWk&y;>2H6FS);&M7`() z?swVzu_lU%zg=$9SEeJ@Rq~Rtt960ume>|m;w|kfPp=U1E?e?O zjT3&8Cemx|b{($9IIo3`yqM3$J_pEh6_0rx(F+rlu9p6hb}jx4aJe#uQp{{p6#5rh z=nTf~=%`W>0k(K%5|F~NswUZ9riL%s3Ht zxL(L`Kf=1lI!()6^b-hy(4BlvAm}l6vo5Qi{#ad?gO61wMPlmd9*rK`Ji_5%=$qci zFa-0*T}~aDxeZB zb_)Uv#Z|(p1qZPw4{9cAi=N`rOsniS#7|FlsC;IricHgzyVz^$CiA^2o|UY>jzg5L zV*{vzJ0GYG%b2N8eF5=D0XO?qAv(NgNN2Ccn~IUifrdxiM}_EPlT z2>U0JBM@F3Gf3L5u41Rkjv^^kg9<2Tar(jVJ#;wck8F8)z}a9E-Hd_xURon5uc{sX z95CZe^{D&N??C0V%8(<5IMoW8A5?j3SIs}fIrBg2U;#?6*?lG4W>f^J|0&Hp;=+_o zE8+p$fF~>;K-SZnH~+Yh0{-bSyB*qsg5$QTk~Lz}Gnyfd{yyU$D(-StA0ayP$S+RJ(3^8`lE_k0VF$Ppk@5z;Dc91MZ zg+KCAY|aN4$Z`-fEmQ0y@A34C#!4p=J%3)`*vIe20Q^~7p+M+8>jBaJY zyQhLywSDXbUib60wb1BhekzguXZ!@}Sx0VE{8{l~7v1;fFgq73IlnO7MY$p7awL?K zY6*5jsuATR;kFr~>K`Yhyaqeg?Qs`O zbBNGz=oa6*yUZ6H;8%4ejKo~eK=+bZ*12}GfpcEzuczi0RG$_kq?51#^Q7F+Ut_Rj z)>3T55n+=<7CrD$9V6C%YMV4j&X2`zWtiO2W(ZnvPcPTjJ$5|G`w0W{DiGu6<&TXg zd@rH?aQx8ws9=5vb}7L`RdZWf_mNLDxr8OSI=I!d2SH-Pj!@!F`WZ|6!1M3a*3JSf zr|_c-e3ow#RtQc886U(&2u-@+R%35`7chKI)mvk0e;~ zmzlP1ei+!Nw`>;?NqrIgVcXfX0fSw)bT-XG6SrVktY!DDL3Y$q8GXCV1Q&$c2WwW) za$wlZ5c?1p_Ex0VzLOH}I$J(d7t|KbH@X|N30!fi;z4j^BPA}z^h2D~ zx~_TaFJp)zxZB<>-ygzluo;VcG<*eGn1eH{E8uC87=y~*3!>D0!$>gHK($)DPRNhB zNUL3^oDv=^5nyi|6NKn$=j&Xs0EM~cS~1ZLy-7`CSCaDjj{!H6kzVtBuM26CAF~Ch z0^^;5%e9YUs21q^Xid1#OxrQ8y9~Rv`1`!TGDY%o_}=h;<=(yOhtRy3KR_)_<*D+4 z;F+Jg(5sWLu&oHLnl`Yto;hwnuyZs3g4&LANT%8;o5a8EXghgJi0H`lNTTE&8}ioX zI;_xiH?K)iZqIwYQ(ziDDyb=y8h zV#Ty7V$16dlzy)g@&ur`H}0qo=Q|!NHi_8?g(m}NSdUKEy*Xq2l;6nUsA=?e`w2xt z-S%<1La#kjxZu@h!Cx=Zb(_F2%fkS4rtZC@&VDauV{9}p>k9Ui^$vw$0dnAkwk}Y& z{kO6B`e5dyBWT96#7>7`G%gN3=K($nd@kU**1)3rVog^pu=m!S`6JK)$&nHswP(io z1vbjGFS}RFTo;_IkEtlMinz+%_IjhJ?Uc)PU(Q`g^;=&jQz2c8*bYru>=FL(dZvt= z8w;K6%e)8bHxAGlpe|c9UDyKtbGJ54vk;f#XXU$O1oxMsydlT~_pSFk-2b`E5k3AH{AcXS@}f1&(ueciHLsDGahBkkWkkvgJ+Tkb7+2wV zdLF=T;Jw}yoF}zS!Y3iWz+4jo7CtY8;64vs^`Pw#gqpyf)$FRz6WxYQUc1ewjfkb zB@jZkfT$T0$j#m$Fn{U!&jnEgN7eoS^s<e$Nv2kYKEx zWRc;-RU;naz@@%wYZq~nQ2$D+PiRi#oCl;>EyDx{eDgg3qFtPlDTRHT*Rl^oHwah6-=TPCzQXRO831xJ@=b(UIiA&!&zsS7 zlm)x9$L+<(c(eKad^cR{Md6o5O+jmD39xzsjkd`TNI-MO59U%k=}wG*ig?mxK6fH92EhH+^3QT zPiCh!;$hcbSh~T~|JdRBG8*8v8D2G+DO~(>SVBkLGQ<+yaqJ}$Y=;8*R-97C3Ni!n zDc8)?1M|)G!g15=Zg9N9;%B-gA)mdVB}tlmYD*`B6s@8gT{csPxKGTjq4!+Rfcjhq z1PmoATXv=Si%$k7bRfSBCeFyV>5L4|Mjl|AWiuBVf0O;*A!C3E>UbktP?2bp65a3d zx%XXui3NJS#+jU==ooxc-lsR2P{Pzl-KwJg!u##WT^HgF!@acKy3izb?);&d2mV8V)NUYi1Dk+Scia*w?0+(vuVF<=z%hCFD}ux=N7oB9|xhJIN+5k<{j3YRB;hhL^w2gZ-LtYj@XdDE7lQmQDTU%so_9pZ!g*ILsAixEWp zY5m(`i=c-3Ra3(YCSd)f ziS}hx{2(~uO_z=Eo`lS>qy4@>^Wj&h{>81UfT9`LcOBD6-k7U>F&r^9SgJE>G z>tKr6I9m@cyhgEvO*>XQ+%+G_HA{9EZ8(TBKn=uX_dTIC==#)~b(=c-HCtr_EBS|M z-wrn6qc%i)m{Cx#$`NRQ@oQA}=&j-5!Pqq9U;ck1K4d^JcOwHC!QZv&(ny4;7J4ea zIl)gEjBv~3c{hd(D6DY{1s#C8J4_SRbLI+dRV!QlI+DexY*78Ms5`D9biiP7F4+lJ z-uQmw4?h>^_bKCghi_-2l)%sk?(aBq2<(~N z)VMD0n))zIzhX*K(+K9BDz6d7ZAD(uaJw34QyAP#I4 zFG5aW|DyGzhc(#LYF$yJdiud}4@1Nk&fE8Sz6o}`P7X1N8Kpfc&vCm0>0oOQ_5=+U zFSTiZ)FSH;EK(;_C5U!40rJTQkj-|=CE_(6v%bL?C*H4^O&@a`6bE2)*!MSqQon2H znfq_(`7nNUC^(8Wa}(1pB0h3W&m+b-3d|SUB99g-$H$K~vxT)|kl#wXG1~EL#A$S! zkw|clSVanQ={ln_RAM24aTT=Rm`7|wxi+H{&oIgDL ziu)o&?Iv!0fBDbWxSHt6#k?N;%Zej_l1JNrVc)m}nk%*+FH!yETl$H(2XM~i;{bZA zQ()|8F=_zuA$#N!sJG%CbS?YL<~Z3@YyXFbo@mK*YcSV&Xy6%=p{2dz`S)|&MW8O1 z>7e@|(Ib61x^|U8G@GldnF?+bQ3MMrgNYSTS1y96IcdQFvLB9IZQ(=$&A|w%2dgzO z1Ty#RHKecB5dgxvwM_S%(GC9#sx$33C}l$gXApNa{yX@@y^b8?8!EofX#Yl2^an(k z^Q{C~UcKnoiV2Cyxc@LPvHt2vMyQ*#K2eBoU)(}|!dsPak~LzRieqF?Y4tA0RjT{6 zSKNqO5jLPU?z4@mcN43n068!ruzRV;!fPtnUuKPKDjdg+#+%KARAa%C^T$PuE}bTo z?Vf_DsIkbQK~8HLEJqAM10G|g6x>3p<_U)K&Y&(FExS6n+R7YvR2Yg@vqPkpq-mHS zYleBG*G|*?Y;8w$>q2$6;vmih9l~oA@6?lDmR4w9opEO6!3t~V!fJ=+zugSZQHx}| zzgEFYL;N>tFL$Hym{iv$;buPb92N4H2J}wW#mxg9ffK$PW#L#Ev^9}KE6dCdz=l5F ziiDp6_a* z@)CiYl3U*CtCw;rmWO5O&e$6nnv1%(gNkL(oGrbN8mp}qJ*IT8Ig8TtqJ!Gd*_u}uJUZ>aU7a+sN?6Y9 ze_p6|^-8D#*&(njUQ{;-RJF}l|1~Y4;4V)_Jyc)MWLMt=Sp;Hfr}_{sNIr0%MzN;f z^(_Bm!}KdItOnF3qJ1F9~G`M|5sa%eVJIVlvTb191c8!%OJh$cX3@v zPrQr0wXg!#W$CY+7%p+gUg=#ja9}||&iHHYSV*qsHKY=_qUL6utzsV~3>J5tSS!eh zq~zKr*sADDgJr;ADs^>p9Zz76#Hsn-uFc_UtxF7P zRF)p}ma!5|<>rfHHs>Z0byYtDUcpc41XaiSWG}~SOD09I45hR@hx=6<`GYobDTPTW z04U5n=bPsbvwZt1x?4}VoX}VZH~{1rRG)$19tB-?NtL`CL=q*jp;LXN^}b8d7~&uk z#bm0#>3gW^9bxGHE^A3s4n5u0*o*6tL&HrxEh)~%4%(>ibBqb#`q)cjUnYN7-ZUqb zqXAk|UBJ|)nE2RDe`yvMO>}2cZ6j!X4lx%6tP{6GVFbHtkSld1aRbeZm*9*4F5ZlC9W_ZD@N!At`3ISbkEN?)<5)$!0DGQ%|C1>!xk&eAV!5n7?m| z{G8Y^EMMSp2l48y#v&(SUkd`>nFlm6`A zeowhsx!qFYMHbmhdksG6Zw-x8XwHh@_p$|{Y}$D5cG3>*pR>j$-(aPR5(hC<>(YN5 ze#*^WY+Ik}L`+si4mC`;&XN2jzOZ+106URslIcEi5^y7BB)FtZSNug!+Z47e_U?Jw z=5zs;K-@fvXhm*Q{%Qpad+9?g zb3G4F1;pF!%hyLnEl4e@;a5E)AEs?Vuj*#gl!uzL<5B^30c{DQ;3MQy#Oa$R5IbH2 zZv?$wJKLX**la-1=V0q3HAf0J1tUk(`P2cx<~mzBoU&VVH52hwo#Bs(5>FE>k7@&{ z>{Xb5C*6maDW0uDPU>C^w#7sNk!$kqg2G*?ipyh7OEl_v! z{w}TFI-5Ibm7f-l`N4~!USHw%Kv7fy>EnzpjvC(|o~KlOb4NTgSw`E>tGpEO10mMr za*>nhlag!x9MrAAOh@!-?=WaDZkAr z(OvFd=QX~8vDow$eO|~!d`baPlK93rYw`@mz z9*R&8G;NZl6};StY1>%F@16OxQo3O@YvSL9kO4R8I`vS@1LW{Y^$PX2h1D1^f0wp} zcl9{;6Cz`H|IPE&rgAs=XRieY`dX3i^17+LunR8zlSPC=i$soYz0*YYC*5J!>Ioy> z&)TuC;<~z2CTNT;VL^YB8ajM=tKIMdop%hjA*#Q;lBTfzsDp-;M56iWA&Y~vzTVmH z*B?<}A&enLXB7gzuZu_x-`}44Z*%G0-yHO z^uTP_)53<@Yk>oMe6naK?cggHFwrwt&U^VeYyJacxw8_5csr6iQ$_05ReL^@R8sFG zf)w*NMME6h@R~iw^~y5KK;X|>|J*r*@Ql>psx^_m`c23;P!{qDdS@u`x7=W4qtjfw z$bE;-gn5Q;5N}uDSQ69(`Wu(WST&b4CT9Sy2BL9pLmG1>i9rhDF`=a7(%>&9RX;CbH21{oeZx=9?> z&O+2e7!RzxU2Iyw!nLaFAdYc0||+}Mnf8=>m(#2lsro*qeZ#4au$^{`_;Qw6Cb2H{gYVOKvEzw)RnTt1&C4*nTdAQ4bR_lVWVb@zh z0{ou-YjxI(Wu5eJ=5}6Sj_dnb$qEJ5Y_Rt>avU{V4x~#<)9&Cnt@A%kqRV_Q0Un*# zY<0?T-ia;ofkmhW+Z7ix^I_y6?Is5Q3Kpo>BdwM1RELi;SCMKD*vm)GRU1^{+%!b! zSA9PBD452q5+{oHTV*pn(}7v=_@w4WV2ZXr#-~enDw+M_9pQiCZSBZGs$`guow4hX zo!s$)+7RXb#rG?A9F-|q#Xe%?q>bTJ(klIr_^0uAzQ%D~PshC!RaNE{uBc;tad`Eu zGmgN!zWH<}S3c({v62l>zV6pC8bq2Y_1k(tEeN%}BiPNt8U2hSs-A_#?h(=^gC|87 zaR+)$6qVADa^Qq=vPGR_qHaj)2uj;OAqyoYJxs+xg{UB6WPqHX2W@g+Z^4R;4I3CZeZ>rW;gBZ5||*2&)%;(vjrGHfD+BdleR$^GA-{NPJ!NA_i&( zgX*1U8$9xYa{lDutye)w&gN!~pk%~8qM7QcKx4a*$nA2Yr+tlx70C=}V6(gc(Wm!$ zFEe6f`}JEn0Rz<*QcT*9{Wo9&;>Z?m-fW#EItEf@z}@FL+L>xUeYG-buVly)mWMOc zqz%A9cu)Kh&D*`2R+-)%f^T4krzmlMsq-Z?(oms4gzw^`8dAP49C2J&QM*NTRp|&N zr91qvr+%B-D4BktB1N^z2Mk(4+{Ch2_61ltZfcqIA89UX_MPMl<9Q@nQr^)hGgrV4cY)p_ z)Ni}|f{3P$VX;{PJP^TK0sT+$IZ5T~ zpnWIC4?)8(+#G#>khTQ6wXj#5Ub2H0Jx-R4#bpvLDPnXUaz*0bf*oBcftO&I)7V1? zT{_d$+D6FVOla=id~jr)Szq1dF~aMj+9xEBnASj3t16v8_!sw882BIS4m5C0VxFe2 zi+=zqp#SkRLcT)XD?~h*EAFXfvYgE2wgQi!52Q=CJ`s(1COU8^l-%0BpYuJ@0Gg zk=48ZN7A|YwcPmcA0b3T$e|iSNQR{2HiWpT-EN0;971=ZRqM3cK_$tdNh)-hbQ4k& zoex_lt%J7cu#Ve0tgTk<*v^OVKEL0e@P0h@-uv~sUf1=!!e)Vo2=bvYgHCN1pc&o- zxPkXMrVXe#FURQnXx}HZ8Eyb0Tmo~W^uP0Cm4C)+ra9cYQ)rkBKh4N?s0m7g57jJ> zFVMKK$oH9f=hZG+RrIWXD$Qw(*N%9KOV$jVQM}T{AI})8c0}p9dxZYTlN;Qp=!~m* z44ia7hc9+0(Q6;{xcbvkcZZoTYE$sJc8bU|6x}La2HR40!Cn1zA*6YOrxEEv^sjAX zdse49Sopo=5+IXdQEiBu1$`YXv#VRm6!^T};BbPn8Zhe`-8NtKixH&H-+DDC*As0x zew|O}YJ46UDF1`rf=t}xhvsgOBz~lLp3)v?PS@lZoiz@~Yf+WtE6nz(w#53YfnJ6& z!Aj@TlHlJi$!giPNqj-u#B=|frQvz39}8(LGCiPHl@#x+ywgd@l}2w;bWRi=I~|dX zQO2=)TDMJVnO8-xdEw#EvA}rdOTH~Vv5DEPbsyl@d#Lxf1L{%SH|&3@z)0s}z9DHc zPy(KHiRxyOvh8i8D+scR5KjVLiz!ZH^D7GQ=GUkuwJt)FTs5c7B>JJ0fLVlZgC~=% z#Up#AMl9(51SzNXll@Pq0p#|(`RLhI_Zc&X_3+$9r?$S+x$e544i zom%~v)E5TZ_AiOM%{Tff=51!&N^-lm1v&bbDLMSYAV56HIDub=xn?g}i;-&nh-1la z<9wT#8?XR#{9fbRXXky?M+Wd)x~#k17N}M{>@Bcur#6f{^Kt_Jkfh`ft1{mUMNnL8j5msJ*U{awcDD0L&eZ$f6!9VapxZ0nQ5Rp@GHn8C zkSMt#?2((?aGQM?C=HzX5a~)|6*!F@Rj|3}XXXkJav1XY~-KYo`BGZ0#ZH z_Jwth&gsnOtTJ>RVcbm1Dy!(h>Y!>Uc?+p56Hv5?9)eA*Tih#LWtwF7a-dgWB2 zEu|&Aj@QFX{2o}vxNcMOvaZpYV;j{*|6q8F{383eZgry2C8p(AV2I@)Pi%@M6t$R0nA>j(TYg)TMs z4`ln{eS%d4Noj!Kd(%KM`C12@E+%yF~A{B7f3QKYn% zKaAIOV%wuHHphv`*RvAy?xiDB$cRq+H3wB3QniPS+U{kjQtP+DsJ~eC{!@z7@kNx2 zoU;nsHqZBgapJ;?mt#v)74?Iq8Frc_iD)Sm;-slI@MqgzWE!L?Sz^i znKca6-ew&G$s|MWDv+rEFTxYX=X7;EI5jTd<hD(6nI(E0Pc| zF)V;HKCz1CZSFYZi{)3S#*3SH% zC|?l03Ve)xU@g-{>xTzG@X+}shV#z}k3*aeK2^=lKTAd*sd%cm8Y|sTuMApyBVinR zukePBZ=_r9kAFHsI@#}dF?^t32vQ!9Y3AIGaOE9G*kTAXeH^xMRWQTKNsRnY7Mp{f zinE*3XZfS4W|B3h0xQnu9-1#ABXw+xagw5Bb~k|<>)ePE$`{M)-bX_(s0Z@B>|?z| z{%N{^53@8>e3n>HCepnYMa!!}AN{PPd$~=c9d2i|9ug)?_uu4h?x{lUF`0y}c_;E$ znoYpZkp`}OCqwAE^?n5hUBr)|R-_&=#!7RL^sghI`0FBdqV6J_Ph4s7YhoCgK@)1z zs;yFoihO1Lj#iqVM8CJ#O@AB{VGr^`ExdrC+*i7V#9l%RU@2+^wp|f2;7KBbpM*)X zB5wv3%ujnJ!pOf=U?l9O?(>+i>;yaZS(ahR-&8#Ep&ETs$}eW^ll{_85ik z_dOa6=bb2;HHAJk!qgY4voA`Hj1%Tp=J+OeD>gM4i2#z9q)+%(TmO^x7ottukUoX* z9_G9}Uq6&-h{I5A?Ko6$z8rE-laakC*1>arqUdCnqt5Sn_H4d#?MP)yoD<$ogeW!+ z>H-;&?oH4#Cj2tAV>IJ)x>VwEc-VD+^go)HxXt8c$|A*^367n&bi(`<^v;RCxf@t{ zVnuAT!~p${h@Xs?{QyqV<4&nfWXH57(M|RxS4sll!epO^SWB4M>o8Cn`I+KzKG!eq z6>s=3`arB7D=w-(3BE~tN8bJtzNN|;d*j9IB3H$F^D7wz$&DWrvoxYK#wmi!6)xMFr~wLy#{Xlca4=gAx4j2B&YKuK;gJ7Mc? zQt2blEYLM7F%u-1n}jejk=^*tC9S+SwxK|4^eO%- zXmH%bssw15?Qjy+KU>2phl&Sc_ihTjb8Lnr~Y+~B~= z^9`_>H$=O+{RRKq8-e!+$`Z%U|K?AMqyGVYX7h;mhD?z2EbDKzexJpUv9WbC`_d@i z)6u2gd5Ks=a@{K-X}?o*T;XZmGH7}CL3W*Vq{5eeJKv-ie_z88HNZEz#TKaVqxof6 z+wS6qpOK>;YjcG73UF8C3b0kU8#x2e%JN(C-^+Yr=EF&af8k}AzThzuG^9WTZZ)7W zU-$|O(2T=k|I1M;<_W}G@8Ea*<=OAxx6Njb_vkz`_j7(A7kL&{makk0?*XA!WYVw; z?e*s^65N~YLEE4kD5IsYUfgLM&m~Q@Qg@6|5`^&kmO~ac%g<&u)Q_1wth8ua4pI=`>Wyj3_<`#MyVof?C?;%er@}^I8X?1RjEf$+~ zD+Rx_|4a^kBfp+LinfYaD!*Xt=Ws1_8FNUPEzkZh$vB=_u5h3gu{YA4%XMp_E>qXY z&ocg+2S|LeshWDHh}&oM*LYi1^`*((YWJaW+5ZGLYY+Xn<`} zpdQR(LFe(JbA7;FfDAFCCRKL&hhCa+;)Rp^iB`Y+(>#cr&=P2-% z<~+PZb03SzuLFQ1{ARAw2$?G3AhJ_mWSqJ%l|v)AA7Dns)&b;xgO&P?BU;+1rPa%w4KBD#o|)u)cNl2Ad&BD z*iMTdT|a7(Jx!DXvO#JNNIf^mw-#$&cC*opFP_yU%r@p?+>$9>XTy);fU?k@_gUm> zoWhyYA~RH7?NrSve+RZPXVhzr)$YUUiT9iqxBi)V$U&tCCh5d`I2VP|F4K7A9MjGu z9s!LnG2Mr@A%c>Lxt#TB<7By(6rFU!D#$uk+z`x;vl7*0{Lo zMv5wz(Oz)+pKCdR`L1rOa1MmwJG%Z{>TQBD~JD6 zo4dGp2^6*S)-2nTwA{=ZNh;X@I7_6$p>*RdM~X10NdpPBIE_84}DBVFF2p25kSIu8;VWkx|8CZ^;#2D zhRd>9E6KbJsb^M{3`6Hb=YZNptgaH_#MXRmCA>6;Kh|W`b5XBt-orT!`5Sb|g0Z`C z8+H1&Xi7!gda?l~3jDbPf)RdJEDRNs#mU`82iE<8cpv|1BxLEg$h?AHdw z37@jY*t}Ofxf}QNb7~slx8%9@0lh|nPVK*;M7RvXRFpa!rX>OJA~*cbAoR{_)@C_1yszXYD)NvikdL~m@Vqv-JWyakNYQUYXLR4+vrg98OWMNVy|h-4 zzMVc%x0XgYljqMRDvPK074ru`f+ih33|-+ve3<7cj?lLF9qG33x2BW>^O@UTqE67e zVL9oE5#ICV+E{{jlQbjW8K%L-!^Sl9%^xMXY8F6Aelo0+YdTNZ>ZJet9S?=OxS2ylkU+b&3~x?A5kQ5C~b7isP{fCL|lIPZ#h$$X^t_u+70A3D+lf6I~aj^ ztZkA$39$aC3=sZ#?s$1!WVwT)D{~)3rny}kXEV?J{VN-D;-I6m@(W-r5KH`DFSu%B z743wUtG}C(Qls509(p){o{U|2>UPtwhq{-Or2Doo>4MjT75NnHi$DWioa)VCoS;?u zryr6CvDYYF^0P{D9GmY%B*<(pZXkDQ7D*BB6k^@sajo-`gO?U1Qk zB6BP?OO{U+?!rZjnuq3^KC*L$QseH(p9JhAo^+eT&Bfdwx5&Z&iIiWcoWJZsxjOhs z@zZK^Nvg;#CTrW+AZ%|)tPvNSwyGnR z*Zu5XO#PWW!#g{#qyJ&YmDfG9gY+#xf61!m+21|wi{}hkLGGZ^f~iWz2wUw!C>;!> zyWY{dO)ir>k;2AY_(u@c7L>I6gSEbfF+=;vw%DzDP^!L{^SyLq3>X#Mzc0oXC*t0; zdi|C2;D>_eoePgV|YYYHm(tRs+unN7!h8(oYQ|J!cYkf?sv z`OC8;?xgn&KfkbkhjxFt(;lCF^N-Zdd8RRCQ7bG&9Rik(^vsOC-`hBg`ezfr2ML4g z=?_Mv->LC)_Pju#z_tdib?m&!+2W+ZeuFtu^NX$5qU7>r9oXl%9D7MH-N?iGF0(XX zT{5-$4o;{OfG5Q~RovhhTOIE#p5FqwAJMv-Y7#4YE8nJ^J{35CQ-e`dQPfjCl9@5< zgM_rj1*pFf5+?RtE0p2Iv3-=UZi0MGm#o_sj|VQMJzvQvB@YB%V50IgOK}6H>XOyb zlp;D+VTD0NRgT_|9v+RV)Rq5F-R7H(mf3E_VNcC=3NZ4l>JoF2q_n2eOegY}n@GC! z&O(iFY|PC(D^Rt30twXZ7669<1j@1GfdNmvEB{icqkH0N3A_D6Zkk#fVy z$8WWIf~>CJ?FEO4ebAdfa17m5++nKyd&u@OxmoHq^UcR%%S+{I!&Cgpy1isPB-!Ol zg^_zZTEKhjOz^yd-XKO<#T=vOuw#7qy=8LZi)cd#ux>j|vs@pv8JF3en8yv$xX=M;(&pp~rP(Plnbi zA+SN#eUYsqUkP@Z-l1rSJbz#Jg;F$tlp+AwXKBy|q z1t_LDLf1DH#~mSp#n`qVR2dCIG?PEjQSz8&mbN9rlsYw)XeVm>S{oF5;6F4f=e&B^ zXS}0VXh1!;a8vF`S$8gt=qn9Y4)@ih>;AF8ah@LnllIFBCf+@UJD&?jb{A3ltTHuQ zV1^vb`1!l4?rDDx;|wG^Cu=N$U>(Z1^LO<@%4kKT=Ey0s_(`$ z1F(sB2lX{-7NqJ0G$T&C_s!%}M+$=4lM}$J$Yt+m2uGl!#I=+YMU zbwAzekG?2eD~ObvllJHvXf8twsmCEdh^+|D?%9fLHSKO-rml~9qfSI`U1^B_Od)-h ze0PcV7~e~Be?O=06Safip=DfKTskKu@zH#wsPfCtfH$-zTJZdcN&OXAhH9;IDrXtm zT<1Fx@LSwm8P1@k=#I&Z(2sd}HO8AzA9XXzhIZ7X~>g?L<8r1-E7 zN{5Fh@oeyIKe?(@u~=P-@=ygr&(m~+Tp9nR%aOmdn*}GNo{fZusJV2Z1FS*U-eSAc zaKipPw0F*cI+?V+;?Mvaps7v^@7vW4??PSIgADwRhi_GNX%D;-=94RsjkH6A3?$AxJuJ$^ zpJiGnkIwEFo+7~yb|~Uu5!lnxWi?Llsl<_=%2_&1jZepZuAHo0JQOXW)`OJN%gV)| zv~+$O#DJC;^qX)T=HCgvt-9WY2qBq;vX+n{o>(W8TrhH7g#B*J+gQ==_zieSl>{u1 zZFG6_N|*z0l?}uRoD-~7&eGQ(i~9m3n(reIMA4ip4ZypyjE=}Hz(ya|RBV);yuUx7 zwo+jy3K%40scJUa_F_f`73I8JZFEj zU9q^BMf(?HD7p7?X2GJTHisFRb#yNJWhF(n1ADAowb>!B?r%rti7$E<*-2S*?C-)w zh$;IY6Ew@KvZ}M;i-f@5#Vm*`h#FV$-mM9rEz2TqW}u* z2J@TQ;eFa~(&!Tee(VZQ_M&e9{A9IVHQ-3dK$-<1t-ecHQM?4!=~}r0kfK-Q#M*lu zfvqapml6Y2eXJgeR+myl-x(9=|Q|8Jb^g_`YxXc5*IYB;jP z?b{N4*~)Kxj`|YkwHWvJ3a?4j zGZaRhl=4_N+!xkHRAz$}!HNSH=+!WUwhOfxg&5EJ2*ocp>0H=~BhGkAVcPp*t65b~ zck|HO=$6->qZ6_3$OAPm|%Kt)wZ;y&I?U(fW#*!?L4u z3*Lc+!Ur<&mx5n$Io4bgj<_VpKrl_ruS`F!yG9!v3!n~E_|h61jRcPzKJa?+8vEp> z2g~D96sdn=+i^)+JZCdgcxS{_#;#x1oV>(Pa(T+A|G9-Ql8vdc2Q0ClJi=|d@rM?1 z*ZvjOK+8A!U3ddGVKG!54&B0R82+s9X^cIqYIMA|eF6#jHX+G$DZNC3ACs909)zT4 z-NK9CO5dlr^EA7(FC(9=4v2ZiZH7#A2#KxQ4f47?@p)`dU&3bY%3OQ>U(_zF(OY`{ zIEUf;Bd9SuU|A*6nvo)V&0PQ6-v0pRFHhlztHf=jPN6{wV5$GyHiwJw6m1*sMy$Q3 zAT&=;i09B8dh2t+noC*o7VgSM@+gu?06KIXPy{@YFB)&vWpLUOlgxyvT((zxx={{F zH`Sry+L+O6o8O5r@>8PQLT%(1>*01tLS9bncJexG@$TwvwN|l@tX}~++}R#j3((_> zfbI|E$k+Wgn(C|;E8grS1BQVO@No$m@vGve8;xHl=oFZwLX()|3U8OC69c51U@LVX zu$YFn*6!=xM+1WG)&4g8!^-y2Uc5LV9mjr@=6;^>-b3j(>;E+!jiT)bjC@W5M!$e> z53}p%y(ccgrA=Jm08B?xY1(VFqmT(-OH{1T>|X&jbPv;1BrVy>|9RoD%0P*2ioFf( zjV}W)(Y5v^yW=pdEegsD9e+ihyBZn6DZGt!i2 zmYt3In>e``Ql}n`5#BnugcCgeLk2*t#d%8k%=b*{@KkJAa65COvw?JFzw-b8878iVpFBG;D>O6Nk+e-UkNxky zR%ZyC6A5`{4UK_A9zM>7fo#%kGK;`}0)BKkzCE-6@DVD!Zr~x@)SB`FFqCG_dbUiqGz)<>NQD* zxu^4waT?BSk#4h&tY!0iZqlocqC2k+nTl$~s`hne#%Ux&1%Fu6f$E!F&ebx6MJ=$I zhYf|alX{s1=2Pp>u!OfOerNmYnm^h>Y9Vu+lco-){f0`^Osj+{cAK;n9Kwa?&#dm} z3Ua;&4uKZMI1gif4k- zuZ6OfPI|~a?e0IgQ#1EL69#dlY#bmgcGf>q{q7nyam1uMn}!%H>&Byy%g>aHUm^kn`y%y$F!NXa&OM@DJEuOo4eu+Fi7SMV}|d>Ktk4&8KprW}~R5)Y-FR(6mxzLRf zL|YH`IG7sVf?mDoSFp9-8Oj^HN&Kkspo;N@8Dv#GvWa*TsF)gZcm7MAD^94QE>qZL zrjOikN(AcNP}$OVz1hA1Rfd}JqSJx!bl2fU0z0#IRlJ26t|)mo8BQmnf%R5lp3>m> zkmxJ=)|$rZ9OqSMxMan-{zjz(sMpr(ZoWw4op-bqn5B*Sr9jqSm`}DP4U}>7X_#ZAo0`7(p3zOT>T0oNLt}EJ!^( zFJ`Nu-J~r#>I(mS-0QVJ1s?Blb}nBOodu>2*JMW*-Z#K|vOlg__OwP z=O(>+9Q$GZso-#U0pIBuzfty<8u1<>h-~v5xG1o^qoo=)V|%%qRLx)`|IrL|Z$Hzy zL$IeqK7 zm&2*>t_q57g?ffB6HWDAV1=>^>@`1uXa*=#^8gr3p)HbMc!6D~Zt)0#_7>gZG~^*v z>%w+&iTNO_8O9uuY*M zyjLLAZ?O#g-0uOW&1W7p;dZqa^l>UD6qL8NW=wMvYB)_Q2w8)z9DvQzd(NX?P0OUu zVBiZ_XoCE((7MFFuRr;hxZx=>Nxx<8*lXu9Fx16u|K%{5#${-u6ay~11n3z9qbZJn* zfp-hTgqOmI`A@{I*n7;GZF)=H4P_H*&wQs0C_lPOZHP^NG~qk^iY9iJIY+MbqugK| za`LM$q_3+#;;%U-<~_{%Dt7K*(h;NKc^?sSGTM*luur4C7~4|P`joImGkke$*r>@? zUff;-3h9Xb&1Aldt2A-BYKqB+_v{m|>F$}Yi`GR~*yz)X{MW%R}Ka+G; z;Vj(Vwnj>b(8*o9nU@u^&=P{;!XU!kq7jjWx^CJ^#p6jF!?PE+9*v|A-Qbwl{b@88*r=}ek8)dW}ZVRzwo51vkd&>+bH-wutocEp%UAeK)*2t{1o8V%zGYa&FNGibB(wct+ZRo znWLv+0@EAVWESTw^LNpK?^kNpR(#+(yk+Cmu&OEF#^WvLXwh=g>09* z<-W-+Le(I91Pt~?|7%2928-<*vXZ|#Uc%E-4(~TzM z;M8BTOx@MB=5WujZBp2Y)FF3HLfovi7j4ZJ9bTB173|Q>o`gXvI}%SLE&I&`ej0Riu)VIMMU% z3F5_!xbPlt-Cwy4-t~LwyUl=(2H<-%ZoLp^2M+b!9MKTfN4RZ44Xk`5W$&6cuiZ*` z0A+P4usLeW$IO_0Jfv@eIHieboJm?4Ym8~&k1D^|Cm)%18HMSf}Y zI^3Mo%}K-IL!9_Hbs{f26TOFuh#ztC;cg2JA8DTb1DsFSEWy7ZM$=IBL*w^AvXv4f*)KO54-|i$YKLqk93yu39ShknKX!;?Qdg!K2-`0J zy1Lof$d*Luys?M-pOV(7fATZ6WRR1wS=*(hW`e@q&?ZU-G;^id)N>~7S^wg}gEWFU zc8z1ALuhM$iDNirO=FK~0Ibec2+FZiS{}^RO6RXgE5C|dqP~cS+hIIc#g$qIJZO?J zUN?}c;WsRHUK$Z{ZT$3D^SA59V3DR`9a0p1BV}bC!{kLTW=W@K-$oLEz!dwr^?Q7(E12sWM2M zp&{&)zo3hhklRoTrZ_66fMW<50Dd#^9b=XPW@}D5;tpuqsSe za~sz-D*u6bFp{NyE!7{ERJZDQCy8EUMd>obEG9C~f{q-i{1r!b>_kP0hVA_x_(Kjr z*K|5gUP67}2~98jqc(`h)SPB^VCNm)`!a1icr6Z^7Q!1QI}hTlr3nHocT%#6#cVI| zK5>|tTs%k#EF;U;DgA$#XDUbsw7;=%&nIN0g{{(!;OhUGAOqo&VPT|2k`rHb6#~na zEP`nCJFsVmC@Vd=t|-PZ&u>2}Wx;@I<+oVbB6%)&~s|$67INP{Ov$5QO70hlQMS zUtsKcIe;YySG&~CKm)N`oXX+5ozo=v*++WxQcQF_}{k0J%pC;z~LrNZ<+*?nn-l=m?va&=L;lutN*%9vj8om2m zma8WAXEFa{hf2a*B4^)f+o8$Y-BVnP=upxh?RH@j@VmiT(6!F&d9+;#ZUuQoo&w8J zyTWN|>{M=?sU}ZMQTT`Zww}yS9w>fMl#|Sm5Kz!v8oP)UHEPFcaV~z+)1Z?t9L6Fz zA0r`KI0i|GhP2H(hsm2*=-$Vu!6R%my+hs^jBP=P(Q86J7O~!FI@UmE&vYY1zr&a$ zd$>Y-|3}6|KJ3j)`8@OvaGss>68g?r_QY*%W2(5Yu#F4SDnY@yuDLNm^XEdeq0a#B`PllKAeP_ z7d~r-Txdg|Igk5MEkD_BkfVRyXLhhvEJL(hu~A#qY6Zo>dLU-t#po3A{Sgaj>Rgr$ zhgW0wQCxv206c&cB+p*f{ks|k6e*zg2bf5`gG*AEfd!fBT;do=5_FQpauN^aCqlTd20ItV?Z;il=gE)U;yBu5&?m)VB;zwZnR&ek*yhlHuSw<_ zwiUK02KR*mH>z&kuQv)-+t52yf+TM!Dm$1nw|x4Fnbj@KN+xJ;-GV04SM(yHqjDt} zypuymJ*Xp zv%X$R?c{fVwoafAAmDEbb38jaXY&M*faoAGhm9<(`2^alGn+sXWLXw#N!a2RL+SxV z0ZfuIGC`&)kN7?Xa4~x>OKc}|H|OH^@G!%cr9HUAb6OMvtC$M~~$dTp|9c9MM z_=W!>h~D`5P@Z_v$^xqOGax|qaf#sBp|XLCTbROE0~4jp8(Bg{bxkt_PQ;K-aBiea zYA5vV-S?Quvz@5C*p3R>50I`6DHq?{9`jva6hacv#$CI4@72l=usM(^onc`=p{;=cRpxmsC7<<(YAtWLoVzOhd zxv&YSo)WtcD^+fJ3NI=Mo6rUpoa?<*?!UUqD=#`Xb(!AMe#rly%9q*)0M$L!QQBB^ z0;KMdeJ^O7M0k(A8%gmpUtETZ1f#Me#a#uSy@VRE9y>?h9L+ICI z+FAT#jRAQVcckWg2F>OLa8n;raKnv*jr7~!<0_elEW0jCmiX-?f&LCcG7M52~vd1QgoUaU&A=WbGkp^ z2IUBSDYq`m(}XA-!Sd}U?4vqKe~*ogqF?E+F7e}(EWrMdhC%EljV9Wk3k5m4E`?Qh z95~O#r2;;>2~}-^?IAB5Yf1Vix-WK3zbsZBK;@iigi(X$@}}Zc;?&fRa4nr^hQ|TxAQ&YIMlr7+fB-<{;W>gueiMI4wOS4MO3b*gq`b0=x&a~ zu=_~&A4+qpfSu)1+AS<1g%qci5|)*aSLfs-{b9wa;nws(3#w=7uXcAM^e8~!=!#N$ zI~3_Q;cvqaL%FdG%9qqNVTggxXi}63I#cgFC+B0r{4Ke{=|8r(vxFRe{*T#h5j}FQ z%%bR*NvybT-gM%GRXv|(h?dk`p#6U8gKefBwu|azK0iqRiS~F&Hd4dlsNzA{K8eES zXR{*=xyEAKe%jbduS(%a*q&cS9}xjs=i8Rz@*M8dzuxHrU=` zC1U3F>W|A1*Cr6t3S>GW#^NN{?}Q4 znZeWsyAMz0dSa{GX$Wt+W!@z5jrmXNEqbOn41a#DZguGZub<9>!ORa>I+ylA_}X}QtOQ5ezLH#9dLPyLc* zDP2A8IR657Kpy(HXv?f!gmS`w;FDDNRb2bW(99It;}ND}Im#q^#?B{CsZ`W|hcrtl zSCXJ4j?kb6Oh}8a=u5 zqP^V*J;xPkEE9l_Ok?rR`>{9s3%_nAU!da1Z3UMSV9BL8h35N3_fAL5VfD+2J>90g zlZ#dG_{zz!u^j&@*Paq9QdXUmVufvxa8@?1Q=<|PG)IAj*lM};m*I`j44y;IPk6ubc0$gb*yc{jM69RMqnbvmO zZl%Y79py4uU;bjkfalD;8&+mnVuu2?5}Be0f82aTE`_d)r==`|68HRPY#!v%`yG`g>q1y~+5XeziPFJ+672XdHkuO{9AnD!5&f370+DI-VI zhLGGHj-K*zJ@AUODfVhD5CV#;=WS~$v?Q#hF9Dm|fa@3T+p`%Dj3KYFk2x_>{Ey|h zhYMEJ%Au(no%*0txFdi|-zQi$nbi>oO(W=S_=G(~s+nE^Ps2@v2UD)O*#OsbBvTUl zln?XWY~Z`eu5%ffZ{T-Pd`rW8P}OOfs8TOzI5tNn^=QwaM}SL-@%b`V+QO)~lMz^B zXJhz8xjI*FCXM!Pt%MA#?xMsEhuQp+qMML^6ZPM#?8(D^5%JTs;3T^aR0H{M!m?WG zgHpCK#sXd7ZzJH;TkX!;NfhL;Z>n5P=v8@CkmAcdLfi(`iitb#dk=57_^CD$*B^?Z zI36ZH%3Z6iib{1FbH@H?1AVmqI&oyCkIjh2F6M%yhtv~wv{d`sV4uH0JcoNTlNYTh z3SC1oA(_7e$~ah>KBR3yf2(cMoAiLFRaGIr?PUKrV)XoN^^z$MXNTBxbYcPdyaABw zpd2;YM9|7xz}KVY=QdziwwmK0=e{?mKv+LPX(g9+e9#inRmU*3SdVB#VXvs35z^=C zCYy=BOW$IXRGd4bmQ$Q>hGo*OLH&+1dnJ|cl&vpL2jcwLiT28qJxa97w}2c-{$~B1 z*0v_6VAQ~VGN$={<%JcP57QgMH!<<;4b)0p0aa!-2k0H8jzry|5L}@XdB0o3Se{9v ztzin@Zj02oF70TJ{cOZDSM&<3@3Twk@YF1Zsgkcw7~vZ|3>hRmLRLg`d$Oy^2jC9m z9-fnR`!JL3Z;cKN!q(*Ogsev;qMDO+$7455 z^|jBh@RerxM!Zn(__oa%v@X=kk8g{c>OwL-Ax2Xv%#B(^;FP%Vz1zU0@FglyR6ifX z(S99-JF}5aA6`~g&*8E@vPoVm$?VY%*`6u0b+9AHKx^%fWXtcWpXx5DH2x4(&b>0f z>XO1TfJ<~^o*wy)6js?`WBxTh!r{lWIQFW{i8T9nQP}(R!jdH@mjRl$Z(EAqJ-$rT z(=hORme!;-w|2I~9!&)fyT+tK2O^bPMnsDO&TDX{wnM=`EWqYr7f-p@NLGM}`?KC`Qz9DAE3{@dcM z=~uEuvlNS@-i_rTk>t1j`O%S>W$IcMA|?CKNM0voD5Tq(<^z(p2C-$uxqB484aq+W zOd*kt;lA9qj`xLe9JOEFjX~IR2z*oWN@~V}{j=!lZjo%|6LH=yXJ-j}lw~43&0433 zNyE3(B@d;F3WVLvZF-(-93=t%ufFSX^gCH2i^ql5Ly=lr$*SEVZ}1^O!h*TA(`3~nh8G7{TpomlzLnxS2E( zH|;E>)Hi0jvZWmZa}FGV%`C;T%`u)ooY*O(Uc#cdYTL%bb3+Jak}l>Hx;bUc zq#@DpJ9#qAoQRUEkCaR^UM=h7AbRS5AmueSB8`t_2TJ3&0MMT+7$@~zDs&EZF;Q^I z=_qX&wkS5B=JcoxxQ|g@r1Y6z#=#6aN0`ObnaZ0Gku|`(nhGsaG(2B~e#BZMqF+-~ zcGG+(Q5{u;GDzwF$9YL{=T%{B{D-KJp^_o+V_-A(JZaeO6rb?Q!a!>fH^2MF!)|dS z{vVhUD9&Qivf{R(%%HB4GXi*T<7ak#$>6ycPv4lzcJr=d5PBB(LWt<|d(R-=+QR#D zuM`>s;Y3?w3G=j$J6thyMS315wSNOvkv&M{b0~;6CQzwKl(PaJy_#6KNOm<18A7$- zaH#xnh!hvQN3{w)wUQi`CBkplfwEDOPXj(Ag0=yuS|FQ|11A@}rVeoI&Z?5MMyQDT zL*C3M{?RGBKaxrI#(7$Gj!Z}&Ot#j4qRa~-yLE5VJF&sBeS;;z-hya8_BO6TO!_K& zK3kyRnTu3+z#XUoMxweROQPkT_zl#msiJ7Ep;A6)LQZ0=B$pf(1)gO7gxF-oj&t4P zR)th1t#x8-tC74HnJ~?;!wYTgu8mIUymk;f_O?7N+oRE*pN$-^@e@p#dX5uo_ZvRI z**TpVi7l+V{*gTSVYx@EDHVV{=~CJCB0RS|#-Cfun-)5U?~jEFS`+EYxxyFcS#w`hGRzS-RX)hwS_XG9pxX_Bi3LSV8A+zmNVH z(tfr6?Ivw?wEPWvG$2>=wj#6esg~A-QHx!w&h;NYjXhXQ2=5OY{uEySixO3CE?TCZ z>KFt{o0SdbdN7cPV6AF~AW@`i=fVo=bp7{wfPo1t)KS!46GW}IS=%grHJZNK;wM_J zyNB+10Gzd&c!O`{uZmesM;~m9y~-p7H4@?`aQ24@`VfoPdfbYiVZibjN7BUp3ff8K zzl;i3n+ISQK5(7xap`y3>fz5pFqkj5wV*Ssv49erA@+7vxoH;&XUG8DGoY?WXf+sE zE|S^kIV9ROy~Rl1gA+NnUvhMhjXIcQyZ>Y9T>M(>|M-tEMB@(KbeQgt!(F>chqc-q zZn_<62+?UsLNu-8Y6l5PG)X0`CZXFM4N0f1mC{PuN@pFm+G^{xb2}e?`~H5v|KM{y zuIu_-pX>d8y`N8`4<&p4BcgR7Pk)oN?j{4iQhkTo+EAeJO2&QB70Kp^iiyk$$j4T;_PU0(7UTC zLocCubQeS4h6MX>#j#)O{P_1|_l#TPXYB*>WP62QnW=F2JVroB9|nKPu-)L1MZc5$ znA(t;hICf(s587SWA~^@`Z50oWpQ4ZX;?`Gz|7-rl}xbRi2q0#$DZQqa+TFzV4M}9 zVKFsL3=p1%+@TtxzC7kiK47mWh9go@f!Z-?{ZibaWk#~%@_RquENFs?K}joatH z=7V*XvGdFx_J)^O_zlCyj_;Tfek`n|4_|1}ij1z4X3S>kM^70RGEm3M`93>w@@iht zqWXW4Z)2LN1_%DNBT|Qu^_rVsKcH#t*e^CHiAj06if;wC)IAsskmvH=w}8yOE#mJi zKc_U6=R<$Q6!I*jXYLkGNB`s5S~)mL`Q@bAhugp>qKodnRToyDWVK^H-?yA&;GQUo zdfVfAcpJ6c>v;yTW&KY61+ai~-86*{#teYX4Yt{U&<+^flAvbaMFhb_-d|Pfl{g0k zRFXDu@P0uWW#bwz*cP;{INED^=S$WNd}HT{XMwylq%nqbgr3bqt+F9J zH$>HbzIua_f{-S+-^s*ZzSLd+`ASXbD3ZEcNdE%~E0RU$^NtGC{il(U40%({X4DFP z$NV?tgJ4|Kz_g*JKlf!X>%Z4s=M_PrlRa6a{PGIn@ke3aKc;A z?W#0=qgUhCdEQ}9CrgbXb^DzlOMgTyfBOGoNw3 zkXOI~)~&(h=dQMKCcVen!5zEP&EweQ;9^GvfKtB75Soi~EgIh9fwbIzvF^U?&#`_* zJns>K=QSj(M>12U>?NCeUc*CXZWANwJ447v#a1}t#1yC%R*%pb1`>+2fzEf#(D2!R zW#eA!8o0wXrEnPka&Bu7Q+CW|;#pmLG>qc)2e0IY42G@H{qW|9@paChid`Wd1B*NMkF`#KFbk_P4dyRLnadQa9Vye48gcPYYlb#UX=;9C*AeV5?% z>^dD*xN(9{p2Qjt&+kaFR4okdEG3d+G+ekf&5|z&pE^oKxqq{}HKO{xFhK*~GqayK z&uWD*l?#x+G`4pw2q))RZRF&bq5~F@l7JKa0OQ)(AKC50Q?w_dRT3ZCV58c z?Dzo^JBcKHW&?)rXR`Gb2qT756WGEHO<|EtHNE0@Lz-_HGCS-S**_z1JuY%0VV9tT z-uHnGlqHbtL}S2f@Yf<2qH4SN$2^M@!=aB6Is7wOwDjbSNrZp;A`Lbg-Koi%XVPfI zdn(}_XQ8qB=rS|O{G|I3_1@Kxw7l8`o;u7fEi$ylpP4O-e)l)70~_k97m|~I7!EMa zkA=S5hz<126U|XtVSACSz=g0z7O-1g8JvQ@t{kWc&800@x6iOYjuVy@{ia;(UDiuK zA3ECaTN1Gpc&i(=8Ff7?<`~R=5}`lX4H8zdBdGm3l=qL))`27ug#m(=f~+dkDrCQp zxD+WEYW{1|H01SkzB=>ALtzCsa1h>25Xe!MWCreFeY9|_t%l9~uFT%a7v*9Rwz0wk zj4UYJd8Wd6n^noUOZ>w^f1wa0P>VMAchb3qki3j1{G;5FKXeJ+O&jK`nh{Tve=3>p zJ~8SH=?s)?!Ey^8ORC;zMRhIG?UZg+U4*U2F+wnz`U)~!hV_-Mjl+8M6JA+_Y3M=H zJKQgAVS~1!d3Lv{Bp=3@Et4$4Ng7`s4cN?H8q4t&ponO%c0do=-U559#TDl>XSD|< zyVR&uW|2MBw8nP$VfL)YV4!K)mV#>8sZEX^1j9D(G{RDYJ+plY5)H#57o<^jn&-Yp zsf}BvP#|gC1Vor0o#lATLCi5!7WT!x>pXfqAXPmcu&R(j`4zY2m*!Nyqh^m3{HPy) zJg;NExDhNYn*-#T*kpCr*yK))xI6#&*iffW2v}G6V%|B%ryGsT2d5b%cFMt`bCikY z44j9dMB$A&6#~z!ew2Eb2z|JtKhoPIl?47aPObpjp^LxQpxd zcxbeqkhldm7{3&SXeeDi;^|utylw{zs6B9|XrM5dbrGtDR${n6L5U&V6??(|f{67P z+Hf*E8-C0Q*k)6Z!zxa&lcm0~U0ZMG5lX>m>{J=E5BnJxnhoLpv8=e4GYPMe@)Q++ zpH(&C8h&)vD*6obsGHAZOoYl@0Eu&jBy{eSmVRLRqm36C@w&B!rHt)_6CDaUy#93Teg+^R(e>-AM?xX(BcWo3wrZzaQup0u+ zUX=I_TalXZ8xx*09StdOlIs&9`B{9Y1j0!L5w)1?)}a*Z{Xb`dzmXr~?ovkoR=H+q zjBy;?Qw{vM8q`HR>pbp)-s@Pyk86{;F9WOW@C8&UkM;}32Cb_G-uE8X=)V-*n8cx5 zYTyqnWWKX5^MG_m06Vd$7ACf5#-!HckIZIu%G~^{A(#5lB){m+grydpuba9Ika zI=%^QTj`aqP+s283qThgpLI8ZrkV76?ggOW^0er;*3~7vZJbzK%is~+r>SMw781`p z!=U1GfPBQwMPR=bA-cVW2c{Xh`PXa@lokyeaL?>8pllB8oL|;zJa0gK8jtxs;oIuJ zIG>T4)Nf5_5$n2eE$4w%7R5h}v!(dDv&v9OAhs%Dx~nyPvPE zMgyO*o^Rn}@AQRvAdfe?GtJPv4o@O4TN}H?LIvLkPw5Z)<*52M%KQ5N8`=sQBJLzm zb2QlIEYa^tgNH4majWE_u&=;1#7vt<$d@zYjr6nh2VU(o7rL}FjFLv+)YI$!NZSPP z1Y2O0J3mGc;qR`Fl|BPVv5xB3m*F2l(UCrFA_Zd{(o$7_L3Xzw{+b&aBmu_!1H{~t z>ft1<8o1Glp1lzF9TQ02MNo;igy(&A9@Ff?BaWe6Kz-iV1+me-_WBNvVJH|^lA~WcX_O$QHbB^=N zZYZy)J$bFoj)Ywr4|xwR)v9%ZMvh8(aq9%TBDPrY-G)T>?wJogdb zEIh9(B{l;2p5t*Nmx|9JrK&s9XIGlygG+G>? zB055q^+GExu0!A%X;{!aa?1pgA_%lIuip`qR{cohXJy|{9m9!h9|5LLFwJONFmFMR zywVI0b;+~I6m001)Q}IakET}ujPWxI5c1CiKhkdHn%J@gJi+HC2O|$xPS!+-uKg-$ z6xmJa6n%J%E%qKbaI<_?Ee|MTRG$&Aqb<=q$TRL@bYfABl2)qVQHZL5ADae@w~JBeO>Jjw_G@7sQphg^`G&{ z-0lG@%46eVtlIQH(1*Zx^+#;)KamZ!Bhq+3v!BQ7KA>T6KU3=OZqmscw1L>g^xd(i zx;??aVOPlQ>Z&D1j{kQ|ke%xE^mAOtoaOLjv?-n^x7bO1wDuZm1^EBA-EA?32|M^d5k{Fc5;aE<$A}j zwA8}os*665GdF)7P_9*l(ZdApGPDhV4aQy=B>E&c<3NntaX6{Kk1r`LM(`*bH&g!`+o{Ie z0_SSamfkC>cZ3ys5;+d?(fRo65#0LG9ohx3PN32vKHaH;P3R1?%Ll4RAZ?e6%nU?12Jkj@geXpLl5fNcY;QUuz}jVmT|Rv` zXltaBk6#Nmj*Q#PQKIJ(4q+SSe_62+v+csM04F*Ge%et(M0VJwa67+!$3(5GALm#o z|Lbp83P4e(>o>D2GKcOW$_gF$aaH6$wXGhCqv7?u_hZV0E%YaOWZWM{G-2x8q2=%a z)Qp+2P`Ndbl4oG-C}-jC${97DPILAcqAU!n#8u@@UZoCiow*05J@QbQ`Rl-&6xTcPq-mn{LC9-Lb0? zew@Fg3io#x5Aj`{lAieCG zO=Dbe|_phhr>pUp_P|xdREOmC4~^C)9?qx!*(my;?T|!hPJHThC`7qiakuQVpLA%fN==sZ8_rFJQ&WM~)i?-iqg@tq zU1Bwy>8NVQn^tX+S8wUH805U%`-=YjUBG+Yv>c#eVOR`O;0-OLb>LS)1Oc>BEsV ze5}Qx*`J>sUNZurSl5}Y$bQLp%jALm6G`|NS}_0uw(NzAe{GSaA``cY`VWVW>5j#i91f8(rc@u>ngQvnEnWPXyOhn{+HK;ZuGcatUwFi z;%+ALFR=pPp@&eC9}QD*wh#ZY@{Bp@KH}zRdHdtkcxQeccEK|2?-3*^k?_+1`oHtf zc=Z2}__GOj$)9A&n?OP<2sek`aUPR@J(w2`|0?tfk zkgz5Wjogz;XyBj?DvG7mvEgO$XJ={5UHku5+pCmCWY3)!QJB=N-WkNU0c%Jw%LA-Jb9o;Ikq^ z=qo@=LT7hHK!{6zL(J61wt`eS*KM#j>ru1cSs94S$X_0_Kln$!tsA5RjL^w_czx5n z7iJ4Cv(48i;emKQjCC8z@ zmCK`Ad?@oo*aS5n5Nn>iEh)(K>xza@s0{s=eJyoPNlxm+Is`k3cv5=c}^puDSsUSwncoNtQc_RPAW?(oTbOJkk>-qm!3(p zF`!0<4@|PI`_TOHYgSu`)2detHAbrKgITI{@8`KV>uvS{h)m7p%-x?a24h{qpQEuP)g`H1n=O1RsnM(E~jZTMSBWVOA} z9f$Uttt;*Bqa5>6P5eX{4@x)0_ou%EzA=A4yfLxZ=%&A5%yL;jM8E2Dg@FIh<3o1| z{p=ZhAitfA?dJv14&b*@4mNBe1M!7@K$RhRZ7oSH+M#P^uf=+Ju$*32FX)d{8%n=R zoU@ScWjj0&BxQcmX$ASe!U(?Xe5j*2Swdd`)Xapwou~{WfR40dUXbg;5IBR6?~HE6 z4}f7c?OSwl$z*aX@lw()o=P>%9 z#J`b(xC#om&F<6?BZb?J$+EIFEU78<_)`}`FRh+iCEZYSpWgsi|{-d z2bhxqxfpEnSqRIW??fmigTW_ZRo`0C!SV-@e4PBsteG~`h9RnGhG$U!W(KUJV;;$8 z%aN`1tOAMQO|5M!rj%i!xjegyV-}bGq3p48fzAyHKrY5I!pY*Mo z)7tlyhA$a*aya_)xRmhX44jPlz*eEg;cx_RfogtmjBA=>G$nL@sUDU+ zU+=wBe25%ZCU2PR_dpFGUsTWss3dUGc;w_ZZ87_;WxM1YI4&poZuvRSCss(#Pwq{zqD<^*T1EBA497)G6rC;kBr?@em#K8U0<$ys;RP}$)K--ugIs13Z{{W7{iJ`8 zbC-D9#i(9MF@oKR2VoCFYD#5;aiz9-B5K<^bE=!S1APJho_WMeD^q<*y8yd|xJ8p7 z7Cr8?QgGeI)X^m_nbWy{0p)L9p~qGk8MFD(ACTb&80DRrX^LV=^ zZJ(gUoPqb%0WXFcQex&3?_0j4Z^B=+?~>`Ow)abp^Cp*=MexF>-#8F2I_{wjZBs>P zcX4rBGM@>d#FD@fDno&jR<$CaFu5EO>9jkYr!kWxQirlvkmWX_v3Qg3qL& zy%V&>DJb(c`q*TrG14rr$!wXfQc*r!)_l&zh%$kDD~K9neBa0EEMI7WKyCS4>CL;l zVl?V0zmJdqUds`EEZdEc9#WGQvQLMT&z(v7SvEh~J@Zt*i)9tV3^om4*GsE*Ew4hl6OFTZeaNr zXxnZk;645sG2h-N6@Lc$fh$!`Vr|tCDA@(ZI706T?_M+GtW}#wdD_heR zc}Li(2$gZAQke8NS-r=c>{0drAh3QL3ycqk0Qt!J z@QdA{xL62i8j7ZCu`0vNFKvq& zjuxUr(pGNMf_|g#;DN;xoBMKl@ ztvt{!!3cBYbtmMjVkT=G)+)Wd#B?yfDGv2z4em2?`s!eS;x29z-A%awYpMN)447Ls z>ca*yDcrwKpGsk);m)0 zGeT+;J0iOUyOEuI;fjF2Jj=1hs>+J}+m{!BOLqEiQ2&DXl)EMf<;fXGw|@XVRdsu? z-f>B-^!CK&k==HBub2EqU+l{65oz1-7aL+|UWm|pkxyAxDjIFypy=Qy2ihBti#k|J z_tYoF!xPfcp}3kJ^$wP&F3vY2YPWl`raeV_tM|P{>_2VI7%q zq#o;|nTe5Z!b0Zj8?CzIv>6-N16568|9*(!i4drJ(n&eMs$p2L>Q%vLa#W658(KDk z90vO+oU|MMtc?ldajatOZ|5~!I5iP$GU;|Xu12N}KO`^Wj(Weo`rat`kK~^kGgovO zjykH{H)%$KxhB*&cfVcI4U(6%pgi!R+?9_RuvlkB_UIF@H~0Jll=HZ@uFfo>=U=8) zf6M=#iT16=a7T<8{7H5RDr)}T7y-jLuw6-}kl?BmkgNF9$ck#+9&V#Zh%=d;zoa`1s*B#>P_Ly4 z!nUT1|0Mmvudt-v5Bdy;C<14HReDvVUUD=fadrc z#oF%Gt;H8o^(K8#D#ayfG@*umwX4EZPIIHbR1I`nGCue&+Y~*KeTPr-(Lp83LuA^} zfYgP>WrWqD@f5UOrLL@^MZ|BAS6nJ;5-zgyApfDabNO`jt>sGkQA4F&TQynx*HU}yav6{CIuIc=TKu9D~_o%{() zDVX*lx+GVR%#djX=2$Y}%r#f_LW$!SW>cgDJ=BA|#QH2q$z$Q;nn8 zvHCCFSkm`)VBtOE2l!I8*#|35!dGv6mOPu8Mo&^LS6xw+#mNkCtDXFqqtcZ5>2r7! zhIP5uV}M-)!hUYi2II5hiE2TCZQ5bGKiH90%ba=n9;JkuD3zq@Q85!UTT8rG=-XnIw8FJQ-Wge$S0>VqdQQUA`P7K!I3bDEnYv^hQO+N5d~5lL(83)9Me^4e zW~x51^;=l12R50Z7x-^Nw(7?N9+5S{mIdoQ(B*zByzGXnu7bXMNo1F7>8r{Agk(`1 zDY|#x5&fs@)!}1@=XAKEnn32r%k-gVH}cWAH)#!`bO9NkqsO_=-xdBAYJ)yjV<$4^ zN_~()a(#9^;(zYL|C^aA{~ro&o#KRU)A}C~vps-=pu;1+vm~x5GROL>T!0aX48yVm zJxJfEK}W0I1`zK=oZTk*0cPogQS||JoYY2LHw*uWUyw$MHk#rFBXIO@(U*B(^hgZj zwb;NmX@b0vJ>Em5O^M!Z+6N$ayv#HVwa|#MWE>bC6Q(pxNef^uTuSwgZc}QEnE!`2 zNOxDe;VCAVgt^Fqi2^ufD<1*QIwi`1Mx5S1lhWAPfuTGd~C~ zr+1h#()?%5T^|jB^+Dg4UbI4w?$ph3A|%EIYzU|aH20K}t9!!JMtjvM%{_wbP&3_! zm;mR>>Qc8+4{n`bu5t<1##TH2tX0xLqpOA`jb--DS6_idk(5?PQW*bZn9kvoeZ$MWbyYLA!cYH=m-7640bD^U9|G#%f{ug+gd*Xh+5KE9R^d0KO zo*ga&Jpfw3&tp=P&5V-QQGYVZzTtEyHPT2fGA2==8CT=c0@-s6JI-kX3vXMV_ZN(A z32&qk3WTc|xNZVl4CSRK`WR$E;b!-a2&+;0PxS1u7?~j~A9z!BLq2FcRTRA9C!{_r zx(%9Iq}($hgP+B27g0#nWS6h3)$S(Vs}#rIwHrHtXv(7U3N?iHU*Rt!;tI{=dO6Y9 zBs!F<_hhfe=E+V5+qAUIhn;i81PlPA6P9Gmsp!A@Bg`rFN7zuh9j zKj^T!>a{~o9aYF(R2_|At-5ZlZI2A(cYH_TtL2vTb?{8v%$R%F6t4+t33my&|MZ>E z^UCM>{EANqSII*410OMY+F&nM&PtH2U`#d7-}11JL|7C4z@R<8n+pQeJCf z@&&=z%8mkRIy;?uuPAK3AaL%$cW$#T?a!0t8JLrl1s;!)8*r__Fn8;`|^sh@~oR8 z@KANG-{9w9eg`8@ZwS_%;#%3Oba~^8-dbJr?5GEA_qZO~XgU4U=7r4MJ8If0obV!9 zK`(~$A%BAeURJbqX~EsZEa2MO`bhb)bZXf6~QW!ASk)R!wf=} zdI7Urn*)#kr!HLMP(&@%MaK})!~f#ymgt|W{+z5q2@lBo*yeTy4+6pIzDsm0q<&(J zxKj@)x;`}P7}xtJNbfKQ>XZeD>)$wn^4HVjr`JCU7+$8iuIP=fFwi(yL1H8gwq6)3h*Zy#h~beExIBSZ_+DYA#(f^&>x&Yn)(9SF z_5(ovK@pY=!VxuXVuKLS9wF`pE*(>>HYOJLwQ01In{Ji5QqNTcsO_Y!Ki>wRl7}|% zj^3x+X|C#8#pcwIxQkJF#_qJmKJtEMMzcY5AY^IVTdFaqb>Th2^J$IyB81F38K9DM z%+6>abj0g}B(qTR3OomNMfJOLtE-~ajC@NAQf?!VtsvPk4T-FXqR4}xI>ECuP`Va& zTr<1sx3u}+qK|GMOi-1nVS#g|5=0CGu7!Mz5-f>*fMs-AwS(uF0q-r$k_ljK8=MZimPgxj4?`1kERx@un*fGC4 z*5&{5{-z5=hmI7zfXhKl2Qz$+_+rF#4>(ZKZa8U`r3vGc`mPn0O4mri7Teh|BT&PJ z@`kG|`uBZe=ozfHECw5T15T>?6}R-)823;YEBFb}@054&ch@dS4? z34+KT!Fo!+q~XMYkuyUYKd+DX9#Vh(P)}f3s>3E3^?_GNICEy31o2Z zIsBo%Rd0JAzeT&p&EU}r1}!J|@wm(c0XR~<7$)mFlQFsk@(lAVGjtn$gOvRNXcbN# zLoN*vK)A}|IDjl!;d0TA|2MvZ{#F9Dg$(>Cq!lKNO^duz3;EitDqBJJnGWY@Y}l@p3iJ_=6Q4 z>o2wATUH+j{ri^wbiJym#6Y9${2O@9bJeJWyT5xZ?FSIcqdI{9ER3U->rH^WxnF0@ zu?as6v)ht*vb)1)o8bK7zmQ9U@r9TLaKTdT;b0q#q_xz)EV3PgOTGqy$@u~5Iz6N& z!Iuyj`=W64m90&%l~v*gdh5OnXMbkdQg?0vt9|JcC4>*Ytgpnzeo6a_?yy5v0uma( zO~6NmP(hDr!_;gI4ZA&w%!a$QD&fm)57&~nqQ-3BMtn@;$u|-*X@#CHytH@Ud{jlL zs9-2|b)xK!bY;F#cUX0f^UyYIzx*`^8XqBOS3H2)i=1T;PwN!ecl6{)maNYtI6=tIAS6FQAF z(S9=`Y;9z3%fp(m8`9JZ0Mc3MNc2y{6IMg~262|;kq_tY{C_;C_0I~dabxd%{u^q@ zLu1Jg*}=h~5P%w0W+E(w!r{tXQ9--{GCd4@qMtM|q`8E>RL>c{U04W#3Hb=m82hT^ z<<&U@hNZ9PW_e4Vy_6j`m(yjdwQYXt_BL|R?~xZug@MS=Pu;sVS9?B4|Aexp=b z0zt_>QyMC4Nw>@u_lp}7v}wQfFE%zN-D2#*gl3CccKC%I3a8&!CWh!KMHXX}$?Z`~ zJz&rD%`jb~xYzi)mg=4^YCv7g8+Pv`vbM7A=KsD=R$o4dJQ#YguTF4i!UMtkTCToP zxLtLw{1EayK3hGQ*=!K5gUvTRv@|qpud7^$EZk=jt9J~3%Pe~TqryN%)GWH)+`y@u ze+2^ufluHgYQOk|hmzMBgG%~GVls1odiniG>*;8Us=X}FS0LjJ$+O2wb_1oACb(E$ z;pbdRe%zY=f8gqv1At&d=wooHZ zzGR!nB^Jf(4IPZ!AtoG8JXo|#yv*B7=gJ>%l$+!I0{;u{=d~`Z^kg*Yg zPV*tSDB^b2jF&NCvZhKPsK;d1^Dd}^&MRNxz4^;Lls)RX^0*ZDN5&nPSBog&OY>%K z)o&C&tZKb4#fjrmvnFJ|Ejje`vV~wV;f7*6H;X}x_AcTtfi0pC1oabC8#~yrsxiqa zpo!oiER1oX%bBr{aJ8=Z5Rx(EaxNVNtJVcDRnDg(awaLzabWh^FdvQ$6p20V?f`^j;$ZD&aHg`&qZ2b zHDNB_MR+^-T1A=)%J05DZ3XZf*-iIuR$i={3|(l!anigDEW(T5emrp5p^M)qgpjk} z!$z)Ug%$VbIfB3EL~s5Gs%2%USN0)g6<)!y*RqGR`SeruDc%kfZKbi3&`u6n5T`kVf~)kS<;~z zoOkpd2G1SS17>3JC#H<}E}U>k56IQCx8t0pAlV@@-vC~RN0!UJywFn~gQ8e$CpgD= z0dt-vK9R{l7p)U-)Qn5wyAyTZ)1_}RzvcFS{?!6@6@@$zWw}f-<^v>Eq{NW2yXCW1 zwQOgh;)Cj?&YSDk(rnJ|EnA3wIyOKbsxJfXJ);XwgX@)iuQ^<~o5Gm9q zLs->rWd`>>+WLWMYv(>Fe zuSc(m-jkVK71|YjuwiCcoCRKNh}^uXFLsss>LK3$kl!vJR_?jEo7}1stt`A_oUj>$6ZJmK6pKXm(&gmx0J;EEplfRc2@YE#ts0Jf z75)L=!3A)Q`|$pqW`fDe)bG(*MBy3ReTptDrcV2L1Ycz7wI6&R+4qbC#6ne{wi-^U z@wAC{(Mom%8g{=sy3nR?;mE?MnWe}`^uH5pyXWV7+>(AZFf>%mC;oUsi%#(nx%lR- zC|@hP9^rB(W6zYnF{1tIFhJ#g6ZCsjb83-|7uw(@U!({Uyut1-taNEGHURMUq9!8* zxJhjkGp@+#>oh;G&i6sayPw)qi0{nt04(yTVU4Tzg;k)9@Wovi&8{NfM>c~&GjX9m zlsgF|{PD6vQp5dtDdVlm=QSC4QsAQ1zv4}$G-|mA0T{BfD6~|k%^xrLH}4J&0RNe~ zo6h`YS@hHn&y{O;F%OI;yU-)<`h6uDH(A~r?U64n2Uo_awhGYhdfHfa%)-ocg0qPc z_A&^O_KVLJ3!;q5)AM zp6>l&7N{CsQ%%arH37wLFj>jG8)H<^dTi|<(oZvk%JrkGMX?^}U`|Me@%-q$1f|P& zKFpD$kAfgLNG`xsWoETT?$`0Rat<0cFh~3e_iFRSkLu<{iS<83H@lFt{`CQG6WWx} zQ{yW<`Z?A*3ip~9+1lbp<#0mgP_yB@_No5E<-0|`zY$K=p&FpPa4B?^zujrBQVmJ4 z2B4VBM%R==PbXd0wk=RbzIJS;U-`@lH@Yz;|E`(vdQ_?k2|ixTh9xMchHoGn5qt@H z6f}z5dh^^N<$85j#3{MQXIv-w75y|1aJ70903CYf`<)PX8cJ@Wr1~{oaQM~>v!mDX zH-rBwpWDoKr0*5;;4Y}~O##BwrZQ;gV`Y*vl38c0K9f;&E%vqFwfC8u6?d8rRRwZ< zNF(1MRpQq~i*0NN_WZ?G|B>(UFJ=jXkq%(72$0FN)|bw@Cq5u3?r-_7zFFk>fVH5g z5@&{#@Dse;R5F)|j%p|taPuB}>K$nA9Y=k9iWP9i%a%W){_H`guk*65d!isq1{k>4 z%AhUSg{oapw6f`CP|V_f*IclP0(3QE;!ZqvXpO#o{v()Fx)FZO<2W)eIW=y~5kQKJ z{oR8Q5SwOLA3r*Lw~&y4pCCRbcf}8sN|X01BFLQ(o*BdP5B3GC{qS3)fNaO>X*0#O zv_6+W5oT2J{>%C*bNvAe_I|^Qx&}7iT{*k=eMyMjZ&J4>R>dKOE2P__HrA>N1lIRT z@x!eBR_jAPk@}|~t({GQMsW$-dhi!;pvq&O$Ma+I1@--p3JukIR#Dyr;F z9l!(%uWb4{Y1nr>;jOF#UvHn3LDRfGfI$3FI|t9oHmfZk8+bx0aGM-6taPPHm92MrtlyP7_e7?hYW zb>Y3DTpT%Ba2j5Y+u%nsCo=P1PMx zC(XNE*;;T5pOBLg%sR&&a@`d0iUta%QTiurDKsUzYPs)whaeZgn zK9dOH?x_5Gb#stA{$w%yo;4=wg=5bh)|qPq}8pT=$%J zJ09-rO_d$;E~B2BY4>ZQ!C-f=!M`Yn%5Ps!K^2fXuT>fThD7-yt8ly%a<|N*gr)fb9-mr$K|6+~j5AEt70`We zKi!NdE-DHL;wG7){)4r2bmh6>Yd_WXh?in9CpFFrqrr<$vROl5mu`mlt9FX-=*PUW z=;r!==Op5sACwV{$3C@T&`0qoTGW=zv6VkIC~XI?X~#v`_%nWX7i1&NRbKi=G#pgdUKs8VhuYCv9MlTHH_f-CgFoMy?F1! z82t~=aH9(M1I!NUHXpUYq+z!)iH-Vnucd@pkC%^XN@JtC9HNnZ(%Mh;zc4K<+_@>W zVk#gLK(!&%d{9%$zm(|{qSzGOM?XU+R9+HcFdWf!|7vp>IrL?tJhihJ%5%hr3p!CP zPxXDX8w&(QhgdFu#%ixm9T25ANUYnu1sT;oXINf+zI9=$Z+T6?zS-iSiO{czG|h<5 zPE^hmrN4`3ZMq=@7Vl%A`^@tXfrFwI`Cw(y%v#`z5JE(U6v}s61Z&<94cs;QW81UN z42a(%Jn!=nJ$#5*x81q=ZP~LpwCKiGs!fMh5_1!VS6v>@oyxaprql2V^*zA(aBrt!{Cw;SC7#=JpQ>s*{-_|u6M<(47ji7y!2x4 zl*I*q)E}h@xl40ouQM^=3Cju^3*+Uu6i5ZnmQJLYAIg=T-EDS(9UEG*^J8e|!P3c_2e_dclFcbGAHW@xQk@KH8 zHkd1llNZ#U6koa^gM{1&80t05RGPONBuT6@Gl@6l5q(4UWUn0TMS*{{PdSXjHKe0N z>u5aWPEFCSonHdS^>sU$iwLJ@d0(t7K|hpp<406c7yB}b+LB}`7z^Vi$P#ZGEJyZ} z5-q}dTK+m`u&h)y?cD5mitCi+-wb4#+ubBoAZGXpJ&!~`p{#%9X@$!mOx<$jMM^l- zkg1O@1ghBF*G3ax?Bh|1Av#AnMs5p$Rz2xA*vzLDCYN*kY!emfUhxBK?zguf6p3S7 zSV;AiL9k)-qmhH?bq4yty16{nUe9Y%?4KtT$n4aIz|wRS1D2`>sDG~)>Uw~3$XDYx zeh>d{j0OHSTs71qUzitR$8$=d&p8-xwlGlX^G~Yu%V+mZ8#}g`I5vPB5vi0u2}EY3 z;y_8`U^X!j){94O$XKe-jljNJci_a9_f^j3hCB&3CF341!7^GJ$ zwPGw8F7e!6PQOBU0SU;Jg`AA8P~JvjK-iaXZl&IevX-`if_qtSqx|NdBRiJ?J(Rhy zNT)p;PUE?{VS|n1DWAa+NW1j&ag=e02Lt&aG)1?oUW5}t{1UIt&XXvJoxrvMPI7kt zaW%#k8A0DPd!uD&BOVf3h^MsU=Sn>+82&F2jg)n;7wfAc4KFJUAtqwb!4BXv{LS(< zWy1N{hs>E%C3FUZnF0UEC1!rd{8ji)U1Q1iv`^~3+u@T?DWZf7%)@O!V~OA9(J&j9 z+g6O#%P*1gKtVV@M;1ZrTi)jIb6}nHE2AkbD$OR?i9B~0 zL-Z(lj$9}6t<8%WzhwGiz8acTn}Y^AZGkOS-+0Vh%#l;2 zl!xcpU;iB-a+t%QN-e+l@PujZic#?q*mIz60hZ}iV}%N;)G;+-(v>1>Jy@F%E?!0r z4A2SJg_#0G2u&Cgk~h=LUS_!5ogC;SrbrTocPfT1(vD8J>%fNYAI8hQl{Z7PT}lw% z!875w=0^AE{xJf_-@U)K4QxIx8V7zCTwb8I2pHX| zIy_$3He@y&i5Z9Q!);NUtb;f0df7HT*NWXk)6_SlvcjG^l9lwujQJ+3+sMA7CY2r6 z+&bRNxGKgZ>DK;mQ!d80m4o`v)yoAuB&TQ`zaX>1KhHdI6dPCO>Q(GB3K!h_{Tz>IH;yuDDp(-19RuR!|Z(i_!NILhpmizzz zBgBQ~96Ai4i&RKzwKfr5t`I_W+C|c(XgaU8LrEsl5JHDZE}_FZ=)AU4tm9TXtmC%Y zYSpS8Yv=8^&-eG&{@ZQ0+qT#H^*lW8cPu}oZb+9oydp@4fIRp^`#qKl{iU63HZC^T zgxASe&pau7#=82;Y=o#zTWlH07cv}fYrEoj7Q2c&qnE$gY$kR&w*RSPIBZHW73-!Z zzell4Q}c|mce5jM=(n>ORum(dnbKqYa#z0RNS>cJ<-6fpE8aPmI&XW=VJmh!iU3zhBkE6PccJidTyR%k>SjS#k4+Cfkssw-d_#vG)wK-%rSuHmNbrNaNloI^#n zi;P@IJACVYRixRG;qbzI?eF5EtrW{~Xmv)Zu@eB#e`er5jwJ5_%|2D5iBeB;wEUPd zSkoM|3%8`|z02(<>=SAm#J}+y7Hwv#OLP2x7}{ErvR{i5_}j--+4yZ5Q}KR2>I)WR z+mq}oRJqywoPRALSugCUd#zb)uc27+-E8CUud13j`in~MkbCM1vGrHlqjNS?&n+A6 zL9*@Y1BPY4_Csxu|IG;ly_m6KY^%&&>&X{^f-B-5A}t$I)CE;~O-V=B>(?u)41Yo;CMa_&OEN=d9Wu4LjUnGldy07|ao2Kqyjx&-V<9dw z)f`cOv8d9U`_Gi+eK$QixQA^f{7%;3YK*(6*ReOqYpdls*9Rh+AH0E1I_UbG#d)k< z^3ws#6h(bF_tll8{PJb8usA_=N1m_P6J?1S3yLFGN>4clCtdaXHk%Q*o`49KYwWjF zm0&SVz*6l+y&i2Eub^Efjl4`r-Nl4Q^p`WdedZFPo(8;N(bizDkcaKme>*Gg_Oztn z{=^zavyiD_HMDJdj}ynM|4&9;mHgslP2POsdw*kOSiFIDh}*-?aiyI|i)P*z|9c;= zAQLu#*^p1rVOD(b_^fOqH?qrvFH1DgbC8w7#=EyTL_LI?SRNo&RMUE8k@JrYHG#?? zLP%Wa^(qO(--U!Cwx)v0`Zns?NsC3ZRkEdswT;j{a$MGL8(Vef#~$K3D_)$>D#q_` z!^cOK$##ZYdH+>>no^TL)z~GaY6PeY87pnS0s!XdNQM5672XI-COvHsgeqv^E2kW? zf}g4@XIUX-f!m6G%S#&4HA}FKCc@|O_y11DepLjS?pV1_uXgQ=_sCJ58fUbu`Q3b@ zLH5|_xS$1oHPz#v>;^%#lqXpVv^y$phDuZ;19$vkZHlKM;acIk-uOc4edcA(MEGeR=r3tvNY zLws#u==^f-V7Df{F{80 z%C=vOsBD)#!md`p$AHH!YPtmzOfJkBdXQ{!KEhMJyck%zr2`Q(QBCjG8>RotwXx_>}R}`XF1P$&8qr#DR0Il+(zIA3Li1$$%{$$_Fe zemJ@kxnRYZmNbJ-K*!KWvj;B3Ri(g|Q65twNyqCVyKJ<{k|4?#A7+ia1yLP79RaaT zVYxxwM#Fe80|3#7#EEm(uBkzmEk=H|Xp@1><*e(Ngvc8pQ>7#3Sp+GQ`byIy@NaDw zpua6RUq+P98@6FU4n*8>v*?o`JV?a(z!Nio@xv;#{{e}0Ix`j$nRgC%@U5RbruP0lTI{n@9=tSNZ#hpt;Bgez= zd4t)}bLKV2c&}?8&$dU+*&$NR)fMrz0l#}>~`n(1GLV8s9i01&$%rhxqg!l0ny=vtf!S;DD)LU_wxGo!VBF>ulT=Nft z`{&e3fvaT)#Q-+7>;4F~Gy+nU3@^+Mp4l~A+&>R#COc8WC+$&RpHW>j6Mu}HgF=sd zQmc3$5sS)a?(AC`}#nS527sQn8}@ zYN%#?^k06|5mkrE_f$@q_O*;a!x&VxCUU$VMS~$SDzHpD4E3dPw*Q>KUKaNliDEOi zX&8u3ljCC^tdaqzJI<;BnnC0KORb}tjm#63G2}_xly2oKIQQ^v%I+ z#kTpTQ||Kt`mmt@9e=A}0Ti7jy*Nf3?&Ez1ioI)c21kU}zXFB&wdPo_t<8#$Ixl;3 zC0P24=Bxc%t96(LCI~tT+oedupA!SnwJwKb8!^8|90V_us@)^uPAuxXcR-Z_BQd=W zi2;LE2|!GwZB-4f(Shg9L(zQeP%LD?=L6~`aGip{3lbVw|Z;y=$Xk(JDTUhLUP*@D8)eGs%U zsxK*A1h(XJ3W#i*@Cfs|E&9E*Xc4;$|59^%EGeT;J0$3ohk=ooRACgk59InUN*p&H ziw}6>M3-+WKA={|O(sINAz~^aDPxbJ?Q<`xvMl+G$aYaEZ$#~gIc2!t{H?xAs8~n^ z?WBJ?1)xJpnQ?JOSt6W%ilC2$t`f?GNmQ$_;qZc4S>(w!t`^(O zlZN>R$Qj8KyggN)i^8EO9;WCwoBedY@_)(SuR3g~d7#!8g0w5-d|Z@tTBC2m*Nez(&JYR-{5v2{$o|x!NdzMGa3||u68)i zu@zq?T~RM3XW<8xY%rz*?Ot4>zbkjZw<{<&fpDC=99xth$BemC;Bs2P5wJC;#i(I< zuK#Aal4QLb8A91T#l-&Uxl47GgH5myl{e=Pz-q6;|NU$Jq%ckTi z$Fp-A=%~M)of06ANY+_c#=+^Wi(%HB zjO$FVp&a7F#i<9(AE24%G=2cJWLQL-N`7wg#p30JJeHgS&*tK|rnZG%b#pf3iA-pa2x+WZFe000DssSuS+XDffT)W9Yj4H; z{}(N#-{wz661Zlv#~=F~ENIy<0N4??GlJ2oq=^#b1%&6U3X+M*x<7gFIQG=82{<2L zgsmibjL-^ywhN{VogZ@&1aAmSBAlbk{JpEMW-Nik=RZxNm|zAfz?D58@vdp^o)1hp z8X@E(aF3_UqrJQo^uYzv4s@i_-(C5K)Y0l*PY9jZkt&Gc!^i@4ZGi24*J5|boQ?co zk<$QR<>3KKaqlH;>*7Xr?_J|Otgufh&i)h|Yqo^#a3by?;G%jvCKWV4YV>&Y8RDNw z#ZKN|G5NGb-34`g9nR=!I=X@PI-2!OoksJx|8;RLurzNOmbZ7M)&uCfSO-u(SN~!K zR*vIajH5qGxAaDXjw6;qj>hd+M$E>&5^#vg0jr_gCRd~IZ^*`!uY}@j#l)@fr-Y6C zQ-sIZo`9tGg(GnF=dO6>>uQmY*>c`e7mv7Xe@fnMKsc#}BgNQ&kL!f_%!&A^liAQP zjq8MyBaGz*zETH%n|}*U=?2LlBt5SStv)!2FHK6)(cIfK9OS1+F$B?f9aR?aDDoV% z*a3jPRnO&56=CxK7DOg44r^n2&l&3aujggqL~D>Iv9lKPb(%r2Y^^*w=P8!N8hG79 zl$?72V+R(IZ5Op`#^I^{E0q;ZU&^38`-KDro8anSG_f%jPu60_EO{fM$lmfsViPY* zwK4t$=d;M_E*0uu5?wH1{`F_@nBiH{0j2Nk_9@qQP)xpeYe0uf9p;zsehd=63K}9= zLHts8#$2-l`aNqFI%^=Y_cIL6c8R#}u+5Lww75Tq^#OU53ttNC@6}r^C#yuY-5L5Q z{ayjs0Db%df}v}87cJ{XnFMS6P_6X*C}Uv5I#pqZx& zuhC49vvFZ(Tpk4kK?NVvXC=Z2GIt`867Ii#c-VHmUk9B0RtcVETx)!WI?yXARQxyc znuOrkW2yTCy_3@pWA#2fHN;wXYN~O+myfl`PPkp(T4Uo#X$>wb8b5K4AYvWHo`Ig+ zov{o+O<&@`KA*jX{XIiYOE15#H-(y9#dwacQb`;kLFbtTeFY#8{|DR%c{Fx9LOX)b zTGNwz@+0d3-H`qqb~WIen*5cp-Wj3_Xs0K^cI)#;YOh8A`E`8IfIH`zMEUfx`R$pZ zWsI=Fs^Hj=iUw?2e-AAT;vij5j;nx7#P_0I;nAOIMVfGt^FE3#r{m3NeYtkoFKkc! zKKM=bhP0lVLXm|uet7qUid@y*Wq z18FzN$lsUDu)%0m2fa!!)esNe#2&&L)W4SiC#t({%=ZCs=ACaw{L(E0JV?7`dDH8( zRQ)jbmiXs8qR)Z3H`{^Azei4Y0y|>1jb~)L&;8%0{K_$(BgSv5d8c$43GB#pT+c|3 z7dB1EI+7NM8zDNjaOqUw7ZZUhCPC^lOHtfnyol6#%MGPv4I41e9hV6M&omk+#>8zJ z2N0z--u}9P1EV~YM@)L?`Pu7zb+a1mbr}mD0jnGas<6BB@z=g(W$^DrctSA21@=t5 z2bMP}10+?~m(Razai2CuFU%|E8%%}XSNHR7)C4Vnq0T}~#6Jp%I#Xnd+D*?;Jn}8R zQ_-+c#O8G|eTdRe<)37?v8R90WyJcn=sX?o zQE^&a8R>}}g+F0F2mFDE`q>y;;Xn6)PDwQtH53)a+>}RiALpOMYt50am@lw4*uUM{ z3B((N`O&gIGTB{WCKEVp#P$tj{;)l6`zNg3n@0e0mV~sDGonyMg@a8X3&JKUU zR2HpE100lpN+{=Yel>JQr>#8AsyK7X^O(hb$e$`{(f5Ywi?dDLNiX34A}3TlN>|8N zhh}8XtC1vT_~AK^*oxTf-y!~nM%sG#{5syc59s}^61Zkh!`2iQn{p?igGTWYY z;?rZn4UjsUr7AbmFLTt`X4ntWP~jTvZmk~rd{`>3P(%Fqgp>eEyn-}IkE&I#YH*iI z&yi&82y>E5nOGDevptY9u zrE7u$>e=9J>g8dQTU5oo^_58I%kT9iajw?diAfo;dd+iH}2P!+tf0MVJ zUas_Q&j9&piyFe?2LU)kCiJR*bibFxGYB}OxMQ@|d;qYfW|Nb%BM0AEf2#iZow9O> zU9~z*OMC6V&``)it>n+-$PyWeKd|z(;@cy3d47WY6IES;4^<_s9-CZ)%maWU9P&#| zvu#h@j4Ifi^*Cn<=HK+}wuPG+nv>k?{H3u8#ayN=49^zqE&n0|$r;kIoOB3Bf$7(4qo_@E?S>i1dY; z%*HBS)^S<3ZV6>g&%zBtXvL!82}-@M5Bp|4SeZYU=Ma)%Jdqu*?{);b96&Y|DKl6O zT0<^p$2A(qXF7SBuDm(#AnaH)nl%;T4EJGIp=O-K_?-NB@-LjMH9jf+mE=p~yJ(PJ z*gnx7C#c61B!{iXHYgf%ZYj1ZPw4ED;l;SsdIUiU6K$8nPdiR!&A_yiSxJ^I4+UUZ zO{L2c$KP-ZqHGqC9ckfklffC;rBg8TSwk4c6rOh``!AN)tS4dW;w(XVc`9daPiD}^ z;S>UJI1h!FG&((+VJhxSIrHwPMz?9tQiXOW%>{LTQTf}8olbu?rd+^I-5ao!!{mR9 ztphz*n6rwy0*Ck$hPRaeI63;r9gb=u&2v5!S5M6t$bn_F4qdA@2wOSaaSr@YU^!{G zq4oe~diBr$#^{u@j>^QQy@w;$KY92&w;pBg^@BhqNOVWa z>P;M~&hkFT9Cv2BW~+Qg-PqyZgkQF1{EANtuG!Cf9lx}uSJADlQuq?FPo~ZXODs<~ z3=+@1qHfg*MGq`knu1`0#wGJ&|UW`~xh=)4YKu_#|A3k12&S)pP=MSEx};nl>_ zsi&G}!>z!c2e3`aSZpgKf^rFp{w5^o^_X6akgpftVYyg#DAZcIh0qz_Rd-nR8jG_< zUqddZCnmQ!pfOos;cp(i|e5) zmd3vKg1AGE9yMSdK&NXU4HoLQ2IKbBDc8^K^WIIW82Q45)Wyfbb_ z(PYdFek5bzu77UNq{(vt<{%m<84gh9p85~RMjhN(@3jfA9W|LHHW$~-ZR5^8gsaa^ z4Nuns8}(vOiY<6M>O{dr4!JB?uVQQjD8^2dSMVoqn!pS&tL$Pq9k?xo7mDp+4&p~UaG7<7RhEu`jk@!@u57@HBR2y;H{Oq zQ~Irhm4G12iiII=_{Htl9T8v_qTT8~M6f(EE zCfIoJgPMtH(VL^W*xFK;>EIldQvniORbb@sle3ifK&B2$PHZ+U)!R_D_y*%@i^8e%%qZ zh4Qx#*+$zZ-NEkLprFZrlDg7|ps^HTy!W3*!;1I}=c7gP)Kd?Rx2OgL<}^eLY_fe+ zm!g=5zoVWpXb*o3Ysk4l{(;(}E}TO;Pomk8FsHYe+Z2zA2Vg%J_YK>> zV+2iol`z9RUh#JmSIWj}zj<~bi#MY`*&M{2a(@In0moqxDp z)OGemL!scxv{xxVlXw`00~#RjE;fp;a0iubWfMrC2NrM&_y@36qc}B|qS!M9l0K6E z<^?&Bk81qDWqt1{UqrC}z>0D!>q2>?{G`esq(u#uGgZzr4}?rUuEK}y=#0RdmSxEI z45vbB+-hiom6SIDpZtP5a}q~8B{-viZ^&Z#e~$ETlB#RA&jRlCG_@}@!aCLWYiK4X zfa@|`zgjH=GI}+y(S3_Ea2z=)IMiF68l0*#+2M5GTfH?gU9v9HJlMKV1lF2A#QvF( zh&dK5+q$18N|sA@LyOGfC%=ySPI42L@dAJA;L#XTF^~636YB|`WBW*&#=+^hO~^KM z66{ZF`AuxdxXZp(@RFu&^jX6uCr1Edl0?}7)=I_Mz5v-q@hXHtz3w)bPHDF_j9;!c zJu$8H!UqjajU^ojZR3F031Yyi9O}tI5Cg3G<7cT4b_%^otC=5 z0S3i8@~?-wfr{sL)OiFmNV;6C!Lp=)Til z)^%+KVO~9yJl_sdgE*fkG$xVVt!5G{;s-Gyg~p1xV-q5d-w&)s{@hOI0$KbGeo~#j z1Y>xPNmo|yaM_-V9(uad9gmu>Vr{%GT}*RTm3z~=B52T_<{+b7J-~% zwtVjeorZGcBI+jj8K$?G6G2$qr0YP&TxRa(dhr&Lpkqv`;8)yPf6u-J>9Ma<3{h1C zdduvZH^Zf&hdlf1E|OVx5sidp7v|Tg?Mt zWh?cU)LS`DI(uzN&$&Cmwwi`kI1E`;(dn6uxxI=UxfQiPuXn+T)NQ1;y4==W_CR`qnQmun7fpGbt}( zn%d>wuO4qK@O+Hj2S^{If|f&b3ksX$zrbL<%*I}vDb{&KQ~*rZlHcB)MwA7Q?Y-cFgdR0Mb5Q|~m(SVC4qmY8iwTv3gM z*Od`=ptq+B_Ju$?B0J@C<*++D6xp)306kXA9KI2HE@%DhS$3Tmmq3WL^IS}q7?4te z8`bC0qx^`Re?G|Pi|!O{#e$|J#Qs02rwHWt+&RU9$t|6~?Y--b&otk=%ryDJ>+ zWErm_|IiM~eXG;9BH=SbjF}?gXijz^;HJhz)_=OVXCL-0=%YK0mryUCU1CID zg~eqwI}Dsp%ce%BnXfS`_*ob5R#ce3TR`IP1__|TCT${iX;ii<1^uq(l46vzouC;r zSIz#OAdMLe|Gd!%(77o={XY?vvh>&|)#@>iSCZxUE|YS|A`Y=b;MEC1!o}z3jZti} z&)*9JC7BD~T1o}0@b`!?|8QZm$3|G7t}t_N)i9JDGFCC+!gZ!uho4RL=C0$_f6Wf& z>*{q+1?rL(7K&kElyLp3Bzp`&l3(HmQuiZW(&{SIQ{XLGI@L1E@oREbkSZTmM7f$@ zSRPgUCi1yed}N~ka>aCvsDkV+G<*=oGKo)}-mF@~Wd7|BU38c${%X+dk`=Xj z)P~R-G6>{d#2hS=Y&8+g`c!nUY)#(GEVLQYCF`BcYcYL_h;SlRRrf5{L zWWN2OIYWq7%-QN;ea|{=b&39u<#6hi*g;(FknY+4rZaeaXhrM+Y>s4q{RZfEHPWF? zMR{e4SD)8b?a+}~89vrBfb6N{bfb>x-D>#QpG)dmK?Uf~QRoB=R)>X}W6)`nu2=^? zGcm_6ALS;$c)@rHh^F0ASl4QB^ot*LiO7{FtaD*xM3 z@7e25V`Bwrj5A=hw|yGNWdN=wd9Me5rQUW-*X32RyG8Qn)6G!9FrUeJSysf-HGEMwkb)pK?wX4TB5A$Rq5e}E}?k+iSy z!eY-Lit163Xe_oDf=(-!%)EYY3p@xCJPI#b^m-4}zUm4R7bfzi?B2A(XGAv%afc)m z0VNQY$PzaEUta_TWwl9xF@bjYU>lM(QA~5*ptF_goWEm_0MkktrX0!9yzDY~w$=Ou zt~<4&$8&(y!+F4)o3{}5MPGnlQ1iUTS-H>_<6a9l=6?idCQq+?m(}fU6F9~;z;)dd z)h0#2q?4Fz`ea><{5Ivc>&N)~nfOUOMXqgh zC)g1=&)RAKf}4S@y_>nSs3V930);8hV=1 zl&cF$@@{5bqjU*7@4Uc`LEka-$>Fd~ELgg80Dus6XUUrSpiwSPEf`oBYTYzxHwV4Q zm;FHdTC1mmKvdnToV;K+;Q4y;7JW}9``33Lj!D!w$--yOyNvcT;~-BW`a7q?TkH~% zBruzg7MC5&Sm(PD@_Csj{}g6D+7dBQ{e8b8zN>JUULGco9=0lpHz2@d`q~=eb;6;> zu+{i-Dn0o;EF0|6(5^Vx7&;TY{k(f|0EUjhyWc-*8p=UIc;8n7m6|BpE-3@EqcU}g zanIZ|5{Oz(i`^g4r8uBxq&zZ-%rWFDZhRD+8#x@o3~uMiYa&@H^I%et&A%1FwyMzDF-z3N@NHZlax%UhGc6gm zj9DdLI*xL02FyL1pOA5r6LQ8Wo|Wpq zwDw8;I@J4I?K_>Txc%B;q&3TMPC7z{hsb_N)!NU}&Do<4w9b+5mbl`+MI1U8vqlLJ zn#*r6{?T>>3KNSe+dcm)_}}Ez7rg4`eGOed+dMj{mNq1;fqGF zZre4x>CI8o8>%x(7y+9hNvch8pTK%IN*+u`w<@sVy}+=f&zLf!X`f`{k#| zz68r5LNMCuF$LzViVQHp#wzO(7eLz8;>gU`=)Ga@6xZpF@PBL0kC)A#SBorC0uk1H z{j_s-Sfmax0@l@W(}$2bG%~v6qk96;epEgaZAQ@*2s5V6cpFiknXgugdEdRK!Mm`8 z12r!Zp~`zzP}_fSC(<^Ieu5}$e?vAP)^)3C)%ktr@yu-$ot1+Xm0xJ;3h~ur8ZaM# zwtl@KybOqc#MOd&Fl+QXx?qLieeL%jmczm4h_&1#eZt{?-vX^d;H`Dke7NciifPtI z@|>qxm~;7UMvq*U)=lJZq)OAKAaN}r5iKp5zu`)%;L}!Imlal*IUgsUdl|d|y$7>; z(y+^zVC?dCfg~S(nITvxv>QKFwGfF%ACg>)tc%F;p2Wmz56b=3Uc!RIq)d7J0KG-O z1@@b3>|}rc2S((_wQY}v`hLzbRxE{WyG)L~{ELGjY{C@dcZ^u(ys>C(^4Dxy{BvZN&|~uXz9iyCBrueJk(Ba(0oxg_-5^L zvyUv-GFC4wcAYK+5v;@XyOXCEzVg6jYVst(lho|fESpqh=UNmCQZOxwlv3FV;5DFZ zy{b(P?UZp==#o3J|2Obe35IRH$N}S-EtJ*3G34@Q{3bl+Nc60PNmghVM^Xy_2URL7 zQAO%&4ZcM_KrxfegB5ba2mo7v{nc`H*e1+W7c=Rq`9?XQWh_|#`U^jU`pR#n+%B7^ zFGT5#QnOnz{b{TOf;?FMUkP8(X5xH8kvU74CoQcEWmkolC`S(|yI1RPO?faiz~7sk zjT#5v&bKl`r9&p1WEV!(N|@-s3$owKM0ms(_#jl9Sq%G!ybXkiZ@fVS!_HG$ZGR6& za|CY80yO_f>iG%NLRVKV0V@23x%|(w>;CVZsOm@rC3eeSd#ZSY^-z^K{_XnrC!`6*kv4! z$@Wz3op3iWGSG)vTrS-Y9mn-RF;KRll%zX7LCVc^IS{pq63UK#qf8wT6)WFsemc|@ zuaoMLnq9mdOzLdkz4pi+EXEg`khPdG;%gt5uGj#Z1t*&X1=Md2zL@InPIEKgN2CZU zwu|uvMaFepnAkrLURUoYuu7~GR6Gjcsn(tJt}z}Ao_ah@lAK`lp9{#R%#iNeq(pvB z(8)PqJ4FVJ;W6D+ltBfcx%VK35WQv@I6qYA#{a}^lV#$h&hr-?% zSI0UU8Z0AnqKmaJjcPpriGF9*Ta}OZdjF{+Jf|?G@?|TKcbpzTW=q(tg6&k_g)mUJ8hRW#=GFQpS~*ceNHVY56|oQ4>otVe(}m? z;$(Fo?@BHqaxZ;}@yrs>t@RLp(JwEz{O49zSGzEsDS*}BTq4R4SmCeK9G~^L9%e6y z=kV<2>lv|^=QC8$(U3%(rhzfzrI%o)WpF{VLeVCzCVH$uv2Q-#?6r;|`ZUrbTn7PK zZV-Hp-vQYSNkxzBlVn?x-tKS&K~g9H(@SYh+T4s}=$JN%sIdK6wbXDt9^S%AiH*_L zE?hC(gm4UC#I+~bDae*hAp0w%r9BnV`dw7YgYRG&iiN?prJ` zN9FNfJ1DenmXk7wQdttgba1CdL-xqlV9bhm5Gw9#*zX^ccC|zQcS4Wqw>R=WTPDId z7}t5kktoOcb2LcSwBBDxyhhvb(Vvhu3qu4_yr3|iO3mA6|NwHavZEN8#yU5 znuIWuqb?o4H;Z%6*)@Lmy=)@*w=mnKT@V^Eu;XQ2mSGz|8e_hgNm@F&X%4oERm$xz zia9s2y%v7B$U<`o7XC^qG(3G+eOpO!6IohZ@uUfp0hVe1CP|Z+n!>3nWZK;d%42ma zxzSQ!gBXM*tIGxdQjE6dWN$@~YNz7qndqN%k?l8?E9Udq6T9&Gm_>o@G`x0Z*CF$` zYgxuY#EQ1?;pue&*_Vq`wZ<2jpK1n!xq#t!A8aeY+cI!OGh0XBG(z<)5U{=8+l>q? zV$lR=vOnw#*e%{K))8;T#%XfWczFM(wRPO=8aE`2=kV)%%yHosL?X4tuGjE3YSIdqQ3DTkW(TerlrKQ$mrQQ4jp zowg`(9SvsD&|S@5=ff&+t>&m#!p)ku64P)x?WJaETukv);=(Sr)k58KRW~b1MqB}P z^tQ(iN4w=C=-MPfRe<|CymozcMA$6rI3$P$U3BI8NRQL-E$S3br~_bpri)*4UcQ@? zJoh83Ds?=+dxFR4BYUg#fo9jz_Va|{5d0Yw!WqQP;ifF}#7#6*w#Wahsc}FU!MuB9 zH{ifG$7KQI0wq@YI8o$@D>NYkwpd*8XOr??^P;4SR2dZwX6NueJO<+{PM8L>g z^HmQN2MuazxndqWD4zy`zL>@)-8~dF_p&4O(yt`QSpG>YVJ+&a=4wH*b|XOf*RRC$ zX@fKMJQ>bT->gxaM-P(UFI@EWkPsfhuFnhZEyFEPW5DKf^NGWE=j?4OG(0M`^ATT7*qC8mI}XTV%mz!Vy$^cp*`BaX*v&#db&J9%PL>eV zrXXO#L43+)VhY2efj#~c(njzBBD3jeF# zJSoW%U#xO=$cqj@0GyrW=L29&9f}|pjKupdzWl$l!T54r$qXQ8V=Xd5`I9bt(yK$b ziwYeU4^S*~n!-)HyqUhA;Z#8%H;b^>D2DN(Xe$yEWAntM$@V_}2WwddYxH->u%eMe z3{RHol3oKt_bXJ6#a+?BFM)<5Hf_!NAE4R2@v>3fhr%`SFFD(~(w{!Xs5Z;{zYjly zS7birSL-GW?#qq27Say%<&0Z`g?84YT^EtHM@xQ#8(AQTEk}3wquS-(WrgaIXhLA&Z~zP23i;ZoJGexvOFV}7p2v2F!vBSJ0UqS1 zrMv8laiujaU3!+|nIU*#*linVt7_aOaPj<=&)lKKW_igw_hBGNdXZIh#d~?sT;4*_ zrlT+dZvX4Z%T1Z?jl*okuws2JRSlxP33P#JF&E7T(hRZ=Ixyr>JVV_BPF;DUJv5xL z;me$*AdgYK2kFJuA#A<`wo>=iH(Z}Bm^qU`JZTeJQM@U9ad!~Zq?vgl>sMTvL4zz0 z?~{(g-0P3KYhL0x-~uBy)aOeF8>X%M9D*QNI@vT!vIHgcG5`oUeo>5wQA)aLnDY|O z-8Wm2dOdg=gO9Y454BzU6uAJER^VeADywM;lYr2kEzRi}7Q%Hy7h4 zvcf4OGZe^|*C({gIT~j!9zF9~Q_vkRzWY5oUwes^?9v72{N< zPRp}~Mp;dw;m&w_p zlxNyQv+d&oo4Tq_eS|N^s`zz2_1Qae9uUax2%@`Grq8OlDE$YsbAuUacPffg!N(&~ zImqq;!gjFZN_3|DlJtFZhItP~T_^dsCT(Z*pav4xENl{Dd|-UAM_OR8I()DtRifzx zed(g#FsvMcD6}aJa4%7wlvjI}`|O4q04(X^H{c_}4Qt44eG1J16ZVmgpArCla{U0}T1FuB z%g-%D17O*u*$Zsm=vWow1LXE3Xz$_~+8UYGB&GZpAPJ+igrLzA49NNL1PSe52Z+d6A>wBpK%ODb zhCT=ewk6XsCyB7XQVFAnADD<1Greh7>g4f6o(1C-e<@7X9#?MGp@DpkEBq4~1UBDQ z*I$zejA$eV!AI@yJG#lX-miBPzg*bd0w+F!)%hzoii^XsY|BLGAtcR*-02W;;{u~I z91ne=$E~c48avwEZP)D08X}r9B8H)ZbdrGdIF_sOj_`Dd;~8eqw+o`i>^VOeqWFI% z&txOyu?+MB-EKTki|;C(PR-EN23SOyYEuQRR71){ZJ{AokmcTkcNJSucFPSh{#$gY zc^&T)zJgFrr<(joq6S)4ACCSn07q%B_V}zj97k>oZWm;9#3hi&V-Hhe#=Po#fE&P+Oi3whrtc*@NAvDgq#?M3*Hq<*krD3XKO$G8 zd$2ub3HFFVu}iz9YeASRKa9>@Dw1^!%Un1E`0;MG0dhMJ{!KykF$xaw_%~oOleUVm zrhfFV=-*kqn|ZUqC_yP(vSpku*Mf>NP~R(%Qz%)X{M&oVz`8^M`>wF!y=SKIMiXuN^D%SOS!8U!*1n zjJ}H&WUzc&0f^zQn$FV|LxGgu5SY(5l%V~3i*^2Mi5=+!p=&iSf6Ash5g)&TJOH?6 zDV_nhGPJL2) zT}vfvpNJ2ZQ{JW&FW(B5YH4pN!K! z6E1YUj^88L>Dk1KAiQApqD-TD6xtB+vxSQWgd45#3Air?u`>P90ah@zl1r{e6C%Qvt6?}0#+2ZbYb(N|2IE?(r zz)+Hlo&8j7Yc!I=0pGt>P=Q*j?beY^G6Y2+k|{mn9slRBQF2bI#t>>ISOYGxh``2X zKu4Cs?pD}w0przj&R#=@@P%Bff$)r;xIs@Ix`MqI_3Qikw^jeSkArt-(1S=V*&~*+ z6avymeh#yCs`=4%OWp#z1Me8rUXYngTxx`_+|P?!9d#A)M1B9N(XV=|46Gr(g_EG= zRy>u6P~LfK(ulF9V(q(~QdQ`NFJoTwqlH-jOj`Tu^G|iaX3YWUaIOej*q1&I5^a&! z_s$0O`Yy*dr2Bw7w*GLKzakFXuA`@+xrY=%3q$3zJh#EQE(_o;r)8s=CQ5rNQwUn6&u|IvfrL0OR%pNv&{2_pPcEm?bc z%b+{;3%ZHVSvGx6b#Z@rn)^!i1cGBCSQ}so5EDMjA5qDn(N{e?6#=_2lu6qV z)F}?&O=MFXHQ=9W5kzjJOB^)>pR%d9e)>k%q?n+;ARUQJju;uj+|7#LhOW|hleZ?g z8h_<9p@NGq(|F9B306H(H!r%n#!0s&e0yG`PH!5%O$nml?p<57OC|Nq;3U(^M_sR* z@*Ezfu|7%efIhupy-}q{ri=t*pADh^4uDZLyKw{b@lcSro+3?CO_OjVWW0KtdvJ5M|)w0?`|Mrl~_&gX8 zQt(*{|0nH!%hRiCTnc`kDQ8XHn8wA;zNh%oPQ#>qU0AUj2)Id(!1v#Su)>ctvl?(8 zvB`ME8Fe0?FxMHk5hFC`B*5&xlkLiK9Flo};E@bH3rpCHvXcjt+n#HWTMA7OFnjRcmKOCZhC*c9e@Z%pu4oB;WcW=*?V?rKQ96@SfwwiLTH0OCWrM6iA7!V|Au<#jf_<712w*R z%aHA@!2#Q^`Ue$s4~iA)`-;`aePnUcajfZa=zzcMIR0<6^Zz61T>M(@|NoCLmo5#V ziw;BRA{vrTYm<;ucDZs19mdr~7wLRl?V!St8d9O!B8p2jN$0hdQt7aj)H-gft+raV zV>=&y`+R@@!FJnwulMWueBPh;M?cXx1Ic)T$_zAjb8(SxQ+^0`eC;9CxxXQK?NJCz zzqHQ3X1TgWAC6YHH2KThp1v-!yNf+);3V!Jd^mQ{MOXTc?C8AGto<}`L2Z*`{wn#h zRI2&8aT*#Q9e8;-xXt|a=XeXk#wzYJpQg@ zOhgNI*k(bI{GLhFt-)-oEqjyb#brix5=W8j?hwNPp22D!C;0sm(I?1FF1?3p*AGk@ z*#3YE@|Wblm7w*`oWurrtIbm4ZDhZ&GSknVGYOt?*(0m)ZzBO+l)}ue_J!40zap^V z3Dl%fT7MZPBgNZ%N^^&m&ig`1)h!xwDhqo`yAs9O+nk3a=TmK2dCVq2k@I~s;;Xo$ zhFzxR_d#)O6-DMiTz(I9YmG(|zX-APa8awjaJ%k1ln*wbBAjIVRJy+xa*4&%V9MYv zd?%%ru!cotK}c1a&bW{i`mLuNGG68wpwH-Sd{<@YSm8vD(}!~oV5YVHL)!EIpE0YK zv>OiTP2Adba!t^bOS${M9s?vtsDl5t_mt#)zi^$=KozKnyeNB1OxINw1f+Sis!B$N zy&kkzHU3zX^A-Qj-sic?DBNZqT3PED9KwSpVu}<5AZm}c?RS~?ON&UCZ78L^ZKoar zp7zP#uYQC0j{hbgdn;65C`M+x)-3no}sR$X~h~5d~*-N8tJW3tK&-WyH+> z>rC1iFY-3bAI{T?H%QX7w<+{IZ#fLENvK@_Asx#=u^#RMzcn?27Wk)#%=WsYn1fEL zd*pLQ=S@g}a?tjx#d(CvbEc$*@CPQuP(1vyM7rsaIAgp^>>81^9)X3)rE{9)jl#ti zV46b7YP4roq>|=H97~?jJTFlH0{a!=G8bb_>W@|)OzT|SiVtR0jz3m+_8y=7cZevx zhhzB_BoD^zLvDIb$1e^{qqnOPB?Y>T;?Y{KsNvpgU7#@_+c_?bOw0#z*GUdX>F;Fe zmcgGDEIixUrCKHg{%)h+HDQX3d}12_?uZ}hs=|7g5Ps!ZsxoWk?_^NfMm3f?cvEGi zTD-?LNsL7DdDE}6+Uyy2WdoqH0t=#Xw=Zg$jiQw>CA>0zf`N`J_>KQo`o5yvV~=10 z^2F}fMoZ;>do_%g=Xy+#_bTR45GM`b+n?E`IYVULg4w!-YCT~Wib4ndN^@A{iiJeL zv^Uyv_Lfb4%rDG0AxB8}$2YrwkXF6K2rG=s;aUCo#mo7&lWlq0-DANa)gxE*TEyNU z8W6f(A34PzeOmO|t19U;+fwStYDqGfQHx??GUbaXU4b}2_*2f-w4B63k}A`TTlRmF z*C)DO=A>aQ+rUp_?qV2=t@+LcNmq=ELDvmUh+8N5R!EwW2L5UCPb1()fbt*h#{gi+ zS*4xQVpbTbFMka3Zb-yW*`7q(@vb@d^a~CnzSXk9sNbdD5g7$b>+YegLC6W;;S$I~ z^*QZ~U8skT&C4bew{F#T`WAK2+$bXiJAHy!m*kdoDL#Xj#&DvRTrmnWOf=@VA6xy| zM<1N*xAJbMl<$Nz;hPo@2jFi1J2;V?ux_y(RS;3SseN2D(F`2SFdR^PTr7}>G&@Lu zJqCH}n(f44V>*D!gfWX5Uw>DwgSmk|Qy=>_loSAe#3h-n@wj|o^>tAvM$>dnic(BG zPPP_luDW8C&eLhwa~Z(MvGvlp{13XC6OV6J<%6ah&$1Qu?1T0Su4=@a22_0kewQ}H zg+BCsK1K&Qjs9{85O=50 zP?dzs%x!`Fg0XNzR*Ud4!O17w>wgx7s61ryd)(%a1H-`uw2-P-3l{84QUJ%J-QNXY zGNXMAG5i1+!H$j6*nm^}=;A=oOk!YhO$HI6q*3>frZ3HO(GYJrNNsq4YfwY-vg8TZ zkf}P+KztKT-Jg-8ud0-P$zB1Ro1p&G(J)8-CeogO;xqKzKbt|6cjK6-^3bSRC+e(} ztMJsnrb*0yUj9PeBi+R;G5VRA?xzXKu8K)&XhEBCoKc84YSwI%0$EBJoHZPdLg<(c zAnw>pBAC|@ul-)aZ4U1<#{}^#Q6Cmrtr4@#_uoiSX1g9cP)SoyKS5-}KfNw{FWQuX zz4XKO0uuuGcj|wFBMFXT` z;nWDeo9u_&j+>x8W7Pe;Cb%gVV|P8kw%t~39Gt{e|3S^0rQBZtgX&KZj!O;?N>={{ zWhiztaE-qjlUJ{VD9o`C(Os8@g6S_Kz(SrbKvkg1PC-T}OWZL)@{#a=3N-0pYs;gU zOT=hvXH4B3Ub{8NBu^EIp(P4c-4`3XZEm_6Lz2#Zya!graQ{&LHoBXZfnq0~=cqWt z#{4#_It~ICZPBdP;B9_Izf_yNH(jzZ57qW;2b) zlNBtM@RLP`nEW=g32#xbkp)>Clr*F4O=GOrPQ(5}LhC@qQ_IR{8bW2mnyv)y%T#?c zZhh{3NWTMq-QsGc9=pX(@&PR|CoWE|J^^7zP1*lHmTz#P!RR;#$(=WTVqovKX`683U9b0 zxa12<75WKSVr6f$Z9jc^A53ez7aWYcY3KKZHxh3?x4o38<;(qti;mJ2>p))A3wmam{w?(g# z2NgB*tX3@TV*V=C*M}vZ;5z!9OwBdeC1vBYljpI9_Ll%xFoTp|R=?W#@AS>t%Ww`#1n3TrfA~gqw<&`g1;5Qz zmkkdk@YmD@&yJE*3n#bZ=+E)mkzM)iO&i^kQe5B3UA^zl+_FJbE&1>l(_sHYz;b-dGnYA>*?2I*YfVZp9vSf1eHiV z0nR6O$$G>@=aqWp&Qbu=6mRwmp*i9*wFy%J_mwfJue4%VhT)UdL@iJ`ZWm2mMsFU$ z@^L!oMBajh$1eO~mFvYi(g5bLQa95(EW94{u^2JgWmsGP`5rs^q!zsEfuZ#KHkzU_ zO|%ZR3t@?zBI{2e2nxm%C?oDkAa5jAFYwQnCgclRdvmuy*qok{Xabx5!QB`Rr=35e zqD=FuKFn?EHg(tKO@#mgdD=cRRdY@f-4;3vP^)X~Oi3jMIh9ZG=+Cc*>`@ z9Sl1qhd*N&*IR8?+H4L7yvHOpD6ez98G(h=<1y!~a=FT^Gxm|cr|5 zNDw@!IHc^osNp`pD3D^FuzwDV=K&^x)X49d#59XyUKL~o&G*0&-W~J_gPET8CwguB znqh;vYdU6M$W}%DxmuMt#1>&mF_*Aihc_A{v&{tWRiR=JapB_#x0U(@X5}27yhcuS zLnu}t$x5EvvoTl|Dv)Bq$7?WMO2*)1)xznZxhn;K(L5CALuPi1VwvdaUzoM!AgTYs zsFJE$OXFxRpjmEwLa-hdy-;=sSK4J7x zW)Eydy1GE6h2-gaQnSl+O_K@Nb0JD{qBipv-g7BO_gS}q?SjqeIze_PuvI=ZdbOCo zaGP8w1wcV=V!&iVngzC7U%F0}7PEi5WJ*4hxRF*O5%u)zI_e#R6QCJ|Hpi zRcyj=$z50W>D#>j9dAOPrt#_%vZd9O8)s#2+wk78hP?%7k0WXz`>`QyW-xmgt1ON9 zCk;kIg;=IT3t*q}@dDD|EY$=%-BsH?8oQ?6+xXGfyI{+u;;}7y)o+aetfQS~Nx?G0 zG|W&~zv!eJSxAvqBgJ7EJl#y0?zMv%FJhj5Gsq&K+Bbi@PDxIy>BaF5fm#|^P0=A_G^Dd;+<$*Zgb3>vr}1$+yid?=HaFm9ttZ6yWY~S$+mOdxEsFmik_4#@{1t z#gZ;6mS8cXe?0^_joTz#9)@X_1~sOGHUw(lNFY^R-p9v-S$U#;-N7Sorjq&;1tm#D zd@+pOTaIy#2FE`PqlH!3WzLSdO?M(hnpV|EXvc|yV6m@PB-w@5dBD|)m&F&6} z-BK0Yx%kHJCNH=8lkUp@)z$a^XR?V32dyLC9alsQ%}b&2SZM;3=Y$Pduw4MjbdR04 zOQUS|N+%X9uzlO=BW_7nCrN{kDqfS4z2JSIshY*j6l&%gj}z1ftUjQ<`06xq-arlD zi~F|ZYAQF?^o9;+GMa0v$bmljv2=wEKKYRgPCO$#_5GR?bV|PDO-}nXRE2b5txo)5 zr%v;HT2o?LPP1PP3iYs2jIBJtPXTasBFYe^6J_&|IjG4%Ffsj|Jnn9Gjfzb&Ru9&5 zCR)5Iac<2=qsiCc8|@UoM^NJ}vuAh_syHqs#)S^lv8dfYE9~GOrOz&7Rw78RhHm94 zv8pwLlY4V|>%I`;YbB0(pc7C-8F&3;^W>`ydrbxv-VpQ9`d4*_mU4z5(+25HtVu2E8>1RMk9cmI;oRcbW-RyC zl=i!yC*^+Bq`g&*7c5wcqR$cO?Y*VEa+6MEwL4%eV(6);|1k?9Pk+k8zL2bb#^l>gm0$n{}AQ<{H22R^EdH2 z_u_M9x*NT`NC%Z>WQfI%5qtHxz)qQh4xiAjLrssaleL)=@%js{_)v8=>nXKIu~pO6 zD0?)!p3Pdt;?$m%E(MMzjDeEGNRZl; zY&M9z9u%{&-4QM$qOS&x?#A4Ote0mM!88+%4ydbC;wshxsw}J0Y+-6kpv^i8UGCF{ zI4hMwKMT1j*D!3@qThEgFlf?wIuYByX4)MhsIdN38%4Q6OyITOWnMMp*M_=6RRYBm z!jS5Kn}6=G=7tW4w50VS-2>%FN+>&z1$)+*r)Y{9)s0MDPCl4ebx(Op3K#7*vZ;K{ zAA`B9c`km9L;VAOa?_J9bQ(7;+O8z|HUnGE<@zO6_3f*nW(@!&ZM%>tElcoRwfP>vqh8I14 zl46Mdc$_qr-^E?L1kr7x1+8(+b0=gk6j#1Bxu>Hes~3*|pMyuXb}5cMy+ZfqG1h zj91RPt@BWw1#qo|7U<%6+mA8e-w}?o2aB+%E9$<+Gblqsw%xdjxHnPpL zzii9R3c0pc+M%hrQBclHE09b;r&ybL%e@t+N;k{wKswpWn(QUaVih%uusH2hYQc?i@impV} ze9|AKoDZH;DB$ICZTSswYx(hfb;S=qt1n@7;k@tPSf;Sa;qxx943qf~#IW6h54+Nj z9_`UolB=xcyluZ14#E1=nF+9^q#gJb+Fcy^bz?UGYsRnG$CIu=7eD?@jcwehz5gKs z&-%1@jVp8xPyq7_^Lk_2>(+Y9F}me_es$sY0^{hZ&SBwO+K%=_eaFtq7-NZ`z{7Mi zfE-St_i_CScpbJjBwwgTI1xvTOTC|f!}?-cp>z-039uHuK7O={I&{mGu578NtwgQh zJb^J(Wh39x@@zP~tC%f1=;94Fur15rD5zVV^en0u-vng6t`ABm7{7&HcwI93Zi=#! z^+15RpX`REddkh(H#gPP6L;RDill=fzxEty&F}>`i7NED$AVM8B`x zcmHwDQ9vJ}Ft-VhBM19ninZg&^!Q%_tmLf~*=Z_P76+sn4@e<-2!p5@2iyh_}=>4nAFqfb< z>&cOr%M=cJ)XoJyWZDbAR{qMoZeiP$VeU10h7{Gzd;Qa|j}jI1(7Recs=l(|sXh1! z?@rI+v|a|uPq1)KjRzG9bhb{_d%i6Gi*0M zOiHn(kF{+No=Pgzs~OD2^b~(IINNkrE8a0%wtDj3+8JBB(q~#i6@ywUADW zBTDqFi!)bs6;m!bmc|IR_2`!4DxZK;4M|h2>1@X5`^s1b>25?^0pUz#M6U2k|(imegPLyWM?|ydhW43l52v^))*J~V6cXX+Ri1Ks~Cw0DQ5$Lw;%`2Efmi<|? zl;OOhy{GzC73gB16Ti)IGDc)Sb|UE(zm4|8jxiY;0JnDijXGFt_E^?Ax*dKU?St<5 zqD*hxS9=wIAAARYbJo&}Y66&j_Zd-Sf)!buuxK1NFqp}0g3Es)4W~{Xy>sNw@;|pG zu0MK;zx?Sr{#MWZ8{VC~)0>>TK55;t4d*?dWd3+MyLFxCk>3EV%}1a9zJ2n}?+0y< zzg1zQbQFekn3X?|Zc^Df(qm|O=mia>Ja8~5d47N)LftT2(V?_nLl#P`tkGi6!u8wy zlW-lq5T=X%qpT*?a0xCQ+0R1c4(eUFQ7zbP^fKWmQ+I4+PlQQ*ZRH%gzt8_6c=Q5q zwiekLCk|lU*4p&pQh6bOOid?gGjw`oS-XloQol&t3j_KYxh*}}67n@1SP&iL-Cs48 z=FJV;N!oye_QdvugO<0j?uGdF&bs|5e-RUdL z6~2cmW%%kUq_=D2z4^J0w#3iaOmAM$F?=FkwzW#@#9kAH4qoBh|5q1`eL#1_dl)R) zpt~f9M!V6dLMpRX#;mT<2`jPM1QlW;VzCI~vOBsB@&ge~sBmoj4Jo+VH~b2f3bTs7 z+QqmIReI`om`v7Wcvv+8^@NZ2!`+!PJ$xUaIr<+m;x|@ke{P858MTv6d=QA#(en? z`@sI0!xGW_x4m%2M zAq{~#6!L^`Y`#wZYax6D9+F=TJuAW#ihjnOo~se)c7Q0VL)@F~$5V3?P72*G@NO-V z@ytcHfv1&(3cT|7$yDfe7uZJq!rp<84YrZNhKDsv!TI>2o~%tSP_W19SRe$pOSYW+ z3AR-f%p5(9`0>G}8FWxY`y8D4UCB5Bo2uX0Z2~z0O#DkZlxt3J44*zV{umttzFmfT zRhlrRD;Grd z*|UADcgPmb{6c?i*?}) zy41$IwdEQudcHadvxA54)XNLHbj zVHwaV@-588P~a>DB+-l&C%-R4Y=zz)d+1|I(e8_1#e0a|hWV$>ceFqk0$bdz96BbraSYO=(vyIw1D~u}xLPfSYFpe-?d4#r4DJvOjxB21LMhm3i8uMcgoE7KupN3&Q z2{@kwlnH7HW^2bg$7L_xJvX$)fyoOA>-}z1)*%1MJIk$*VeiRx;NUKW@z*i9c*_%~ zQ<$i{Q${eNnZCgdbB?C)dc*MD0qhgL+-%`yLTCc+{ZD01?K>3|99W*fLEZ0dJWSZH zp+Ozq3PWSF(BDYQT%W`}#s89Y*IHx|=o6ZJF(XJ@xo9HtzN%MR^wBnAru_1@{7!e|zXxp?QKd0 zBoA#MVf)Lo*gOi5YZ3tmt<>a=!+Dy$9*VwPI5;!Zixj{x@mVbtt43A8Bj#f5?I^`wNK=>Ah? zwq{nkZ5V_I!S#u@c})1aS%0tgzv|4OE82F%)1bxc-%D&;0aCr^ev}NrqP~v2(xMY|)wrC+X=J^`NwuH!;5M5t z6ECMw=PaN@RAphxb*wz*4P`7KyFP!+1h&rVg`dE}!cgq}7EU>Yw;*h1QzCLW@?w8{Ve3i1bt5r?ZCwTvyg(v|4ywz z$lkn$1JPACLla%G?1z}bp33Yv6T4Ax2UP@?7bS??UU$(~ zI&e1Iu_}e#K=ewj5y-|?X7ZN`u3}`;&1I&Iz^J$IDaCQAjdp`7DS+OOOrFVgp&n+Y z)^H*U(t8)FTM$#h~Kea(I+U8w9S&G zv8_=IkslU*M!$A8=m^72WmyC|ZkR!>r%>HgoGdts8rY>uE6F_#n@x^-B${6`M|tG^ zejx8_`Eu$?Ru`rc+V0~nJfv`ubpKl5JgJ}aGX(9VtW*Cw?h20GxO=tuv6Ei z59JBXM{@*C%s4@9@i(*iMTb0nMX2!7ce@_-L(TY6X+;4Ja$!2x1*W|BQ~P0|OQE<9 zO=k6ek>|PYSIEv7EnkJ8)bKxW4ZcBxSFCULXO*S#@=WnleooV22>y$-+ z#24fUH)G?|Ac0Py!l?Q;{H-X-MkGkNuD#WD2jM#ppl^#yw4$PemvpI{{e=eCGtW6k z6}MDf#J;490q+CAvVSj3zI=fA8`GgsKCOUHD_m;0K5^s)ID8K;SnKL;t$0q0gtF3! z8zVKNN2g*s;@FP*n2%lXXD;h`GleX8#$H&Fg}eRZ-!<1LAWAQ$qxbt}^G-1zr+HG( z>-dYUdZG+o!U9(>_3|;%En$@*Bpz&}HKZyZ5U)cP0V~FdybkwnXp!CQjK?zQI^b%3 z7B%>Z#PcYi5g|vo>?S%35j0`ih_6q3QYWw(3lFF^`YXE=CW{_~9s@6jHQXh%u#9hV zY%4a#LpB^}KuyhQuBiyhgY3nrCV_q|0T?naTBC(Izhm0THGyXGcFl9? zh`)?C52>NB@99@~?2NsUJE!SixY#QYXxLeH*shrM?oIBi zJ>=VqGnc_)@8on0nIL|zNGF;J?qgW%mJ(x9gMPu-Bti*1*e2M&qPyfTWgs6)iGx+# zQh%L^n#z`*aiun;ldJoU*6@bMl3uIzl*n}blwz*`e1$~{Tyq+D6;{{P@9AISrN5MS z9Yf;kp&Rs-*kO9uWuWo|`JCTiD3qWZ+sVsHmR2>TptplIB(0)grRBxfSqA1`dZQ9W zS_w06QDD*cqV=s70k7zJAyiSqvdoUaA*YDZ;sD?xe5l=d+MWd<-le%Un9$d~ zrhLu2I$X#Hk}0tiwVPk^8%~o(UI70?Wpu0bzLKb=bl+^ho0L*%mP&gs-;_tXftK4J zhf8buo77=iL9eYl@E7H={6X@wl26i?EN%R3I_8qWO`4W_%fO^T_VU2L8B<~o3Y@7l z7po&mYyKfw0;Lsqtl&U8VI)d4m#6woBSm6=r3V?v#@BOoWtU;K+VMDVLql^-3~L(@ zX3@ATb~VgQ=66Ow{>i=j!L_!x>RPtjCplK$*nuic=r$y{GWf(^U)>(g9~S8`v_>^) zCPJs{GU~SR!oU(!%F{$0=^^brX{jrgxi*MD>)f*H>|@M^`Z*W4>>gKq-HN+I25BuR ztO=%=wEF!p+@U+q&)DY(reib#OdQA#l*O^R%t-jWutsX53V)C{?jc%6Z97J-{5d@Z zDfxuGR!g_Otk>h{an$m|%p^rII;o`8UP{m9FecBsNUKi2UKGl>n6T!>|FcQBA5i6k$j|?9sgb)3nOHo(< zsiU2fa>!Z0^=MhNX}0c5oX1gTcHW)bnR^q|;m_mvb{Clda>xAC){AUvorpYAkE>UU|&%PZntO1`9G5}yO2 z2Cj|6LddC^7^h|b2{?-v%3rKYI`@13 zp&c0!Ruhy+E&c`#$U+?M^yCG_<(F|H{9zAE>5Jc9yZ>yC$8Nk=5o ztFRrgCmK808)2A|^vc=-)1`2a{}6W+Bo0myRzQ=!?inYAd=*7Ef)0UKFOol*_?)Kl zKLmn`?CGNfpTJ8bBJ!!C3xj9OFw2KMZh{_+nUEjYwqp~rxIDIAiV@pr(iUtZ1rvDWXKzAiJ>81C&M<3(A~ zz_tcg%-f6Yr8jt(;3}jtbfJaZcJ6R(Lc{`hX>O@AHL2YP5}mgS5*%RAzkhniyuFg$*QbY%}V) z&i8|t05T$cSN=tC{RnOX{=i77O{65`86#^Kw471<2oT*sj>Z?&*t~)Kh5glbG@IUE zy@#jBWj|F|Cn4`>S1?{(%4GSXm+-P<&goxaFY5W7db_|�=?J1t3@Y@%zr7b+B!b zuKt=++K4jfA7$YT56?>s3nAC<9_FQT`&^UBbvkKa_ZMq|TTmGyj8IhmKp-43BcJ3` zRo=6#gZP$c2ka|)+E#)QHlJSXr90Exw6s5&M4t~-xIu?Awkr^8 zBMT`~pPSgN5N;zZO_%~)-2B=bT9e7!YS7iE3X~L1QISrY9HczK^InC#O20LMYT=4)ay=OI-Y+Rv9#rvB5zI})Z zXD83Y>9xd-h&GB3L$QF5)1J{jZ*Paa()1#m&-q}fbBIhtOS^L#qEhJ_+u=7swo{t? z(QcxhW{N#xiOOCU7j-lwlioxH6633tz6b5gxY_n;g`w+p+gS<;ZI|Dw84mypKmz?V^=!njV@B6Wp> z6L#>5hj;rsWzhUvp{PCu9Nk3gq;{e%gJhXXghoi&1_NldWfssNBCs2Il%n~If(x&X zi&#N&Z>6tyX4kIgS1N{+e&c0Nxb`OqO7H9_?hyqtzLScs z5kP<)$kCgui`jvFNs&yc+Yo57$b+J&x@a+wv}SZ8IWq^JQE+2)tfuv04Ai|3FWhBw zH6%lTT0WM`PUB3*Z=gpWmAMuanoRrl%=Wj05xY)B z^*}NS_B?g}A=fAQzv9yGN{C1eQib21W3?)P<=1@@KALhQf1)P|j-b zTtmpQh*g2p`LMN+`w=HdQL%&YfZs(c&gJ)KpD-SFQv;JI3K2s+T@Ww!=5ITf)hJi| z^zRDcJ@vvq4qT4irw(+Mx*XyG8|6FAJENFzO3y2hX`Zx@)fB%X%KrL zo(%yYM5Hqz1ix%fO0I6woAn)gSF!6(jqCd{1opR!nTD(ENjEP4zz6VBCi5+7i-pe~ zqIc#N#Oum(pza2L!LIGO^lxf2HRh-A>;(PvP1qL0Ro*+-OR=hD^2>1(FIfe{;K?6)iCzKa7Q!zheM-^gljoOb&K7eeXv zmWF0U7fwV_c2Q#~Aj{-!f_D8aa@f>ZYeE!U>mp6En}0TA$jf@Q37S_QSzT0!nC}$k ze7s{S(fIz?kNKO+SH*!8KcV_~ZsnH7aa*CmkJ3)tB>JK+ADR!JIsKI0FTqvJ zUmiIt3uFH|r*TqLH-;H6I`V2mZNa#TRO>xQs!yH0^aWEc&N2jQ_s`)@!onysk9m*} zAAt@$cu)?{@c?h;(y3_Pz*7(v)c6Y~sUwc>XOqq}YdmGw0Fc{jOdHDg$Sa)Q5^>{R_z8{=eZ-<7;_o+ra6Z(?ohJ znMA)2OQF9yehv<6&Aa`dTThsWhBAfH+b8-2y}ZJVmw=vutdh^HAzh+B76rrGBa%gL z#_6o24YGtejrA}Q@%FlbxjNur57cs+Ja6=xFiPwUOI0}*jHDbq_KuVoYm|2R5lf|- z>#{4Ubj5QR?_R#G_L+;H38wc8|DN2;%A!O2AuVQ@Epe2LvnCTlly`)N1OMdX0(>+7y)xU!21Z^KJ=7lSb>bR3LRYkBQWAO8CD1V4 z^Cq1195&%2Gki3H`^dB(FHY`L?%ew-&bLNtZxsHQ&xr3Z&~6F6jO%te92F$¥?5 z>kZC%e$N{Pjl@I0!}9M+m!Ievn|0b`0U5&Kmix(DI0a>KMSfRN{HY}cU~T0kTs!C> zT%}2>ah^{_cOk70VpUd&Uv-OhKlj-MwI-QpKFpV>N9RH_pmzX2{ijukcL)~8v;z}e zEn4ED)ZpH>3qNQB`*~Ynbs&q=5U+X6a;nIrTnuV6#~5@oKBnj^WL)BS9`(N(ol!nj zuJyB(zD%sRLTSAcv`wQKm`!vA>4`s}E=4y30oA)e+L<#xnufIpXKVUlC`;`@u~j#A zllxJqZ%D)%)o$xzUT@4%FnkUBdwWl^-_d`MC%Au$3#0n?SBnAG@^{m?ZYekSx^B?d zw`8-1zGI41F}?&-ko*!AWO)yf20scN3ovGm??{?(b^@{a$|vVUf+BSwUr3}Y$2e6e zbv1QRWa**3n8M522oZAaGL7BG>R&pu=G0puh>x}_33Kc2_h9(tmZ>ZYUUZ5XeP6k1 ziJws?%e9z$s$8>gC#tmhTjJ^Qm6PKSO} z9(~d#hPlNL@O}G5Wf0tjco=5*k%s9^9G8KT{aWZl+7D)IeIA zKq9vs)794|yr*7E>WIOgVtsQHrOBkdeXQ;Qopj$g1HxmLYmg3Esu+Se;nGFvl+}75 zjvVjF{)Vi-tcn_jYZq;vdNh=X>G=_ib(Pe6y7SH65}1pF>_=y^v2-Wu!hCR{!aUe# zj5OOZuR4B#jF4krxqhCbt6D>)3vWpxk4vM580nbD51uPN5IzN0`Z7M2ic2czvXm|| z9CrTJ2=yc|@!X09`a@Q9;KSTayRpK%?J-v<(wt z0nDz1_YN&}s@n#vcDyh_8BDf^{cM+Pruq*yL;4!Lax))e2BLmBbI&H49z!Wra8Zq$ zk~{x=7{d*KYOdC0irt*k99h;KSvNd5R5Mh32f4{#a^y8WnW&m}mFgG*!aK^pWi-}f?n|uG1G6!ZZvn01 zp^3Y{HcPc&!UOZZ={&I%@C!vidX97&`r?diK!q+p!^}Di|5dpYni?e{#^tk2n+2M0 z*a3NHwd}17i@hi6by+L5+xWxrabluP% z^CY;HZy2m2!i=)-_hg4Crr>5lRjvEv}Ns&kZ%1Wmleu3am` z&icqi8o&h*>fbr1)(@5N7<-_l@~dX(1I`E*#~^GCL2ULf62@2IiouYM*N@g4@EG>zlv7aw`XS-}9YPNVlZgoYY%i9%+$3a0wUUH$Z z%4@My_es)j=Wu%jXsL)gEm{EDInNjV9N3BFn!W@??alkZdIigpRs>|CA+bN@p*#+^ zQL*f|L3hZ5)gJX|R&nDc`PFuxbruItDdB`4$UTXeGSN$y^>cus^BtC)s0nHySaxm7 zGIp#;aHBBclZWon@As_|Dr(ioL0{Du>Gxd`Hj4fc;_n(yo4@s$!Bd*^b&B_(OH$hr zFTLYW;!6jQV$SXdrYcf)JrNjH&IW5vlIx~|By^vR`M1<(xsx+LN}{kC8}+vIB_Zt- zSK!AeS4bfAJSuQ1kx(PO4S9~*S8YH^>x*~VG$9QmsHRjH@-Ix)`MMxh4p=${@B?OPTpYD5WnX`e-Alf72?lI?+Q>p zR*j1|)F@IVLU@42L#piFhGsh#hc5kJ%Dip3`k*dTK6sBv=L1_C!M6%oyB;9LL~PnoruT#^>jx%^u#zNtDCMalS0ZpUsN_dWclmJN00Zl-tE{ab6A{LWou+mTfc2 zET(glMvYKGfmG5C^Eaev&k`?C%Q@$}>{Ei!{a;!=e;Bf6i0Tj-PdT{x2fV?5D~$nw=?FSyT>1^V-9R!&@o%R-<0Ud z+#7=23dc+3sJM^O1^KY9%8srMTYd&#v3cCRfcs>mF4@I`59fNPLV)=lmHll6{{}iG z-5WDON5UT0oRp5kKaK?q)mLl zLr(fcOD|qRm@<>>dM!MGSC^b-Vy6X~N2)Bmz`yQ@O~8Lf59u4lgYmC%9kn>jp$|8m zpBNrU|7@mqud->Py5RT8;o6#8f|2O63c*$VP*`u#8*d5}6O*4-)oebqvss#+Mr|^o zOfvuK6jLA-+HMLZ3+b`B4gh_rouK7Ts~-`7`gaz`gnaeYYTGP}`S9VMhyCBoc;v!r z%hGi&Eg^*WG4765SI02_fw% zXH-chrxf`B3;>)0LVh{?U`(RRH%8UNpKPN1#OzEsMK{BqVy>19>$jQL`texu>J{rg z^_%^v0}??L;(Hf!jw>(u)vQ-)0+${=wkoof_yC>gr?Tk)r6N|Uy7|-`z~?Molj=c* z-_W_M$s;8uFzu~}tQdV2JY@W%JszxC)yNGRg+#0O)#n;)8rCSM$;_A)3np=yS2Q=T z@b}8MEC3}p2er>P^Zx9+ws2;a-gA>u@_$2jR~F4dm8Qg{eGTynDGRx09J3_Jl}{wD zqr_0mrGAbO*ccRZ*3}GUf;roJBDcy4FhKgOM<#vv|NX}h>5iKqsdvy#I|MxD{7a> zRha;hW<1VA8f7!z3bgOukH!oh_A!M7z;o1A-eVUjFyF}HOy%yM?Ons6h3?mnJpqqE!;^7H}GK)rZL7g@9wg??l0tMlyt)4K!tUsLcPEa_iY z80CRM4(qs;(n__h!&bNd7jLW zTy?CGo;Z@6{EIVB#_fviaR^m5ydK-Iwu!z^N zWP;KuBR5T7NRcDua}fBj6;Jw+_G&yM<&DPmN(^uYcDDcp6}a%W)h_-F%&y$x&>nv$ z{6>aEai9&IE0!=hcOW-({vN*nc46$*Ir+^VlZyke*N|SzG)lLy?{$Y_o&`+pcmhe% zu0{?bg?Po8UOXY;zXH)Uzf3FJbl266sDt5eQ~+}b*s^X{B{fEWK`foYA!{i8 zjitx+&ro@l2KzDBPwZ|@HP>Eflt8qX<)_bQy-L_xn*di@U>;ytBh^7)z1C@>^m+8q zYeo&=tz6oKvp$D%0qj%iJfH8F~~M)uhJ7iPzQ3F2G)EngCyP@myJpiC~$n z;rRw0zMVQu=%?AdR)n(~ZE?S}_}8k6Ex4(8TakqTrU(&al^t(sWJ!wjbeDg|4Wb#D z=dcE^EJvT$r>Q>)O!@Lhf(Lx|aJj>3VIqNvE*9l`y4tA0SHO$yS7>ivV1+-OOiP?k zB-Op!f&|t&5XIg{1M`yHZ-NI?+J2-+A+mk6NWYp8_g6xVuP~r$!gm88<_>@h>=+UZ zmHUPnpIVn&P^ga;Q&*}v*=L!4wh4A^!1~W76az~+ruT*CP2Me7e+K?>cx&2Ylp!U9 zk)s&dO|2F3v~5hZNAX0Rt7Cds%W(`fG;Yu~uIjBc@~WYSzSZ<2%K8y3Zl$kH@qp@* zZ$*w}#DBAP!zs2Mwu7?unA;@=t&nQ>IL!AJBKP=lMuS>+R<)Ai0V*Eg=gSn%$x(ac!?ZX6Rd#*ws+kBvJxC;mIsLBvF2k_F)yGcTdJS|soxQE7Sdki%GfhzAYK=0(x^h%FBflT zqXX0z-xHZrv5W!}tZM-u;GY#fpR$*}q1N6a{<3JSJ;y*$ul_ZWa-SQ=t;^klP<6R||of&|0jczlvcf)S?q3Gfy)9q@(}s~8?|YiHAz z!yoNMF!KJhZFM>cPlNVq7dnMKx$mUUP`dm5)W5}6qk&}P#8S~4?H8Q9TwOW}ySLq! z&Ip?{!)2ha-ETo260eEL%8h{1CK5QczvyR!oqb$lN$%*N1px|Y&L2fR0@+?^GZ56) z6-5%K8Xawi0XYDg?11)pcL>i#`%;PB9lg}QEujP9LuW{qfdVIouNPcHtz0P)X~Kro zTzNYD8fDmcAmv+!dA_9LA7z4@EKRay2eTSPT5PFsd^Dst!*8_ZkY3XP$%q{>HRsz@ zWZg0@q#;l$>1e=m>H-a6+8j3X%K!r2j&yK^EK!`-5=4>RNeFix#n-vg>?G)`>IdyQ zZV9FG^Z|fGD9Wl&3BR*=$rT!n&fkf+LE$w1?kU6iAPw6Hji@}&q-e|?-fKwHa4?C1 zZmLO!-J(np9<#&oC&X(4aGJh5H+AV&%RzWrKn=VH=c3Jyv_BS(iv2XAv^pD%F;#=D z>&)Vd%q#GepFs3Eonzx*Sn)3lDbym+&jJ-J*?TiDS2Pzl{S~_3H(`7(VMv!ffbV3IJO{{XBPkzB)R%qLoJs=ibY)k!=?7!+=& zDjeE$!GF)O;3z~F;I`*tpz9X?)?HZKC2cDyhZqw#8OiL@&t_pur zts3wC4D`OBZjVyOZ+u@Kans8Q&P&~eQLse06AY-I){OI3GuO%R(JlV7aH6UVqhX~I zzhgf3Dz4%YCL`DJ^?4^_IUS?!!Qnrkmid-y4~Qh@JML^Nir7loMttjll;3ZRLg(Kz z7mH(W!mnbD9@z@H-`|2#vsJ&;H>sImO2yIM z2pBX+N$Wp;whQRlgv~h+D`S`x`~_c%fRo}5paOh$$Qn7xD^o~$m#>MPV< zjGppue4)gL&vj}FNx9E3!=5`~#31v5+IY}KGap|D(k6>G# zSJL=4XIM+&Y)hu5Zzjk5&?1-!AVr&*ju-FMG#^E-bE%zI-k%!z);J0mkR~deL+r8J z1ba9Qhuck;5)z?_+Q+;wvrQ5=!r)|zCu!iHK9>vBdg3kOVRTDLrH|JjmH*>9V{U?3 z{fm3HWBw>%6$)JYIj9F)eedtJjCo)(>**!TGP}vi9YXASSc|m*nC4{x*yRo< z^py{UZg=yYyJIi|YD7g1Y-a;p3cQ-FW+B5ujM-m%|u{g~LVJ+^Z=AKL5j+*jyN zbNqk1V)t+X!xtj9A>Vd7^p7zo*I~(?Gk+u&0B(_TFWFFyuwCE*O8HB1#S2+y#6hVW zr%jm8W8Q%<&)0JC5&jb!u0mn2hhMk#*g9 z(4 zJS^1{RX*L9Sd#Esn>s|~*60lwp0jAl+V19|S40AaV~U`sO`Vc8V7EB>c9-|=B4(#c z`$|-wIim-1jnpxrwM9WrRXznHQ)y>a$rjl$y(HO@sdL#Wdt(9@!n=*XB zY73DQTs#NiO?aI@5W3Nk{K+{b@;6HaE6(*>WVEF{W*f?v`afO0tW+5IgD|0SBi{La z*v4iyuYrj8XI;X+1_M_#F zk@D_byO);F;x5wc7*?bhmM3%YCjRNTDm!6?wpsEJ7oKw%xseX}s0v2_@ZS)9iYRie zE+JGvuSnSm z*#8sSwCrE>Idz?=rZS~^ZigDG2zf3H&D6AN&sP?dqj;b4-GpbfN970Pv4SNNHN$@( z56&~fHN3@$&UDpX$bM3m zlk!si8vWYqcEgmR%~k&gmXd_sY!iaR$1&bRof+z4v5kXxF8(`J@KWaWPX}gXz3cSG z82~ndcQky&cqYJ(a0&Jdx_8!ck5?&lrlyCTK7rVJIo%zzbKxYrwc!)p%?Gf4P%2~oXbZ8A)7uPE@gpx4+i+_cUUqXEdB2B&~cwuQVtW(J)odKMDZ}ba- z#>x7KW}2_F4)&@-o`fqu{|zDEmyKKMAvy)OvOPWvy{0Roov?MbmRR1aG6BJCsOP02 zvXQ?SkQ54Db6mH`=s)-uxa#iSyOi%OSf+eD_m~jy!{gtEz($omV8Z7BvcuCgS^6ht z*eM>$x#N5!XY@t3gLTaLIX`j9OIdk=BK)k8fOcHtt@AMVo=hGY4i|D#B>SLm(T3W( zc40q?89zglEMv^O7{c=mOe*~4(W02eo!q#(0x<9MQb3->Wsis(G*~=9bG=8LY3DQWj4t6@% zX3mZOOt)T+-5d8n_grIxveBMK-LobFt^25$q(IryGuf4=)8{PAn8>V%x(GU>h-O z)@W$(u{Yv7G|S-9mY>MSGn zpGjsai`l={dS!|q7LYVp9i zdARxF@B<6UP8c+-RQo(`w^}yq6+2JjN@kq~^{w~fU*u8KwqQ~GM0uSB?M#2IFX15y zE{*-|qE<{85a8e1{UnD8awMpko`X6FLAB*$Z+Ns2F+pc-dkG%` zCJL?681PKjx?zAm%)ZX(oVm_@{?i(t8tMkBsfiqj$32)|7W z?G!zQ-a=4UQE-epUO)Q>#U+ry$O{+iG<(wrcsx< zuWt`Ol+ioJuya-&8GgzwEws7g&2d?S-KAc#Wk0jRM)fiNuE0eRN!IcI0k@GMlnXT4 z{B98%LvZ|?&F8oXu{%}Gl(!NgMcHdNe=f@&s&X~QN@{WpyY0&@1 zToOGHI#o5j*a*1lsx;+iN#^#!e(JYMMN~tK)gS8~@>-8yRTF#iepB$p=;Ol9I4Luc zP>_yFfQl8-4Iv(E;Q!fO^dBmA5B;9XNUg}wWM}GOTp=@vJZuze+0ap&3)H5Jt6n?j zHfvnN!F>dK@OA301@o!wHr(K3V`mdH%U^tB0kfIB5A{qzdWR)&t@_8SV>u^4n-*ed zC;gl7%j#9_)X~*uRwm4Bhi{?YRovS7>uZGn2440jXh3%0l?cRH z>fh*4fdSp!X}&N2CUpu7JIa^r4aR!4*PAxg_w`$!HTJda5H`jf7#sYW=B7POAO94GC+o}Um{C!V>}1nw;mwUGP{$iK-dCcLH3ZW4djZ#u(=X6?R)$|Sn;Q>%Fa z|4x!`w;*}S{0dz*-#v*qL5!hrtX_h*lYC*y4V*}klB>+W3F}Gs$d#+twq)@il&pOY zy%x5zivNcN+ZlA%MtpQKvMbcAHragq(ZsKvxb+LP#kqC8&ZR(K6V(S{DlkKrj^4gg zMc?zCea82L0PDN3DyJYcT_5+{+{W&i*7Coaa}{gf^<%C!eJzjXMEl>G^cPH%Pf}u~ zx*r3DcVt7hCHt7Yp02A=XRTo!SQ(muYD)@LB|IY>L^L^x6u0~-xJRd9EA&6j_gjSA zUUg)silJ|-*8P$_IDq&YCbNRRldo3MFhRn6vNhqW$s^4CqjHNzhstQyCIqu3gXppm@<;$`&322=9pms{0>Vg~UdIAoXlpEna%kdYF1?D&q=An6x z*+&EFgxKR;OH;ZlK@~ev#72r!VmQ>7lhurwD}g_6RVo_qXqmD$kH$EkNtW+1er#^T z74bJC_yATEd~~oma1<&hH&ffq_uTRMgq%(|34VP)ImZ2492y-5>B=^VOKTMLtqqPySW5+#~HP^s(Z^ z+-rl%+rM2R*$;Gew!YGY0Qp}ttBQ9c?;;c#=9IHe(5TUcdTd$iFHWhjIk`D>99#SQ zfAdHpD9%N8VLd z=mL+33=UOA#TAJ%Q5BB0hFn_U4h>4nj>+;U8 zA2K4bpM#9{5I;*`Pg}_h*Qx4h>O-4y?JI{v*2q75vivTLo({(Nbsym!mscq!RQa0%`U5DK)z1jN$g93{Uu}40t&YFP*w9x zpcw-Io=-FE2=5YtKFeB8fR=c)B7i%At%O;BMVfiODwRZQdNx*Yi@b$lyTP%cim84& zRL$-LDz0ZVcPL1Cj@Hb=08s0dKI2IBiukMaHe-`^%(JS_$XY>Oi2q-060096;L9c1 z(Hv%6iMqT9aBbN12Sd9<%)I=JdRO#R2@U{S@tiKIzc38L96vTfH#j6y7Ozd4CJNs- zcSJA6k!^y!c}b;+5r#`EFN8IHS(bTteI<(K5xL`PixBkJBc~crKMc z!J^%`h9_y!Fx_}wQXY09)^YdZ4QUrz?typ56Y`YxIbd3r{#)|LqBk1Eg)o$@YT?7w zPTrHqx){+yXLrwG>XoU*!rqSP*mj3UaI9w`})}jiEDofUBtAQ zk=je4Z_mD$@ub;DhikMq`$bM~R`jP4^Ti)MoT&0Dr$zQiyN2zyRv3hmNonXyK& zR*NQ$Yn?>+ojtA>g1tUo5^8I>i)K+V#oeoqftT^615$3k}Wa~P21eF2#Q zdaX`ngQT}7-W{fRWB#sC4tB4dtmyk9cI_opH5=iw&?^(b;cNScez-?~Jqt3TUj#0p zt`R?c0TW&L^}@-KzwN8Cg+laSJyMStKc_evUUEmVZ4k`-6_JaSCP|hPV9x3-(M@f} z9o5|dqPVY|!IE~NaqevP(w?%~b=I;WWKW<0LQkL_Bj(vnI!p&1;%$J2ROK&{kLlp6 z;Fle=>-%&uBU72gHHeUEXM(;a`wa!Uw-yuS)eC*jdz?*F^c20-=JGO8Ydb@OnqR{+ zP_wlbavq0Kb9rI&WP+0ye0Z>&lPDYFZFJnf85(0g>VlFSM23>o-VLT>89f$v&$X4g z4Y@0K^b2d95a%^F)HV$cD&~H0Z6fyVd=|zLD3GyK4#f<+FoZi#&&klX=-cFtLEm*D z+WP%lo%L&?_x#&7rjG03lgJIYIZ5nsY3U_kT??UC*Lh$`Ns8-5%^+eyIzXncXO&Xn zkNrsn+PA`+wY-Fxs!7OO#$*npC_a0^!9RKcTZB>WqaH2L)>O?i^rySkeVo7rRZ5U) zpo+Qt5j){WY81<5oQE28*R!8tRD-%0P6PQ7X@!5d?PW$lwe#G4lByG^On2>DBw>n> zWRRj)?yyf2SyF@REJ$AsDQyhLZTBZRq+wB$SmiD*q!Xqo6T5qH$o>?zBf~r)tQT{x zt~EsRYP{+0!8G&)-|CQc{8s+pWZRGYFFGS5?YLIfMOiqVW}XF^YU+r2d7S=?GEvo# zm3tlNk2#?jHq>-rc6l&f#5-}fKnBuX2qjp4F(WE&d zd&4-_MbmnATL=!EA`GT?xc=p8J+Ce}g(wSJs7^>N`Hnpv0dPV5q`d?zal#`3y`;pC z+{xc0aHqQe+(=e$6ixv$(t+VH<~>ggBhqeDRd~u+c@FjJZSRFM8tGkco{SA;gm~wH zCAYMphDS=$mioR22xt*kM2~kged+_0O#3kYB};!9)UWB)TSd88m;IQ(r@$QJ0SCSz z8gxZp{MDgAd}PB^ufHRhdv2hgCP6xsv7jui!c{Nj9sDSLYc*a5tJ-6`QF~l8J!M1^ zwuX0?*&ic*^9d`-6K1+s>vDN%>J&&-kti4?IMoy|aE}d_mUzSminsY@J3kd&X)Op| z;n7<`;EdW|DHz`t;$UF=PGxlhwfgN8a`@QV_Ug}s*91%q8n!Q=?s@c zpy)r$Qo1+C-*b5->y6r=Uc#zy7TKegfPA>iVAk>1R8?aIDdzIr`aB}%huk#ulGej) zgR;s)9`#s0>hl+D;i^Sz=nE~-7`DXE!q(fGFf?-Q;TMOKP$zZ}1}MRM z>)M^)!eooqJubyWG^F;2ZIqaC9PU4E@y-1Q>;PavF==vX3nZAgUVA^`3EPXVybK#R zDA+drZPIp$W>4r|QJ5AezapB_=?5n9IK4|1G@8UR!B9WdI$AzrU)SM()-2Obj1;yR zb}tp&7ro#ZEk6GO*47HP1y7SbV?uwsjT&|9FEDiM*K2y8rvv4)>a4UDSouZH-iVlsexKDmw@gnbF7<{X58Rnw0ZgG&@~W>*mIhGVS*lAd zI_Y0GVYwoT)J3*jH22b9UK4-D5uaP`u!HB zs@||}_+~&mWJd4nO?*P4Z3ZkMZeeE>p09XpSf76LCqQ`oyQpFBkvkE7}GGdZ$NkcyD{+% zS`VsD3omhypX}9^X}!RQv}(-)16sTmVt|-45G3UE0-=O(F_IV6ZT~?)w2Hf*!;!Pd zcHo70`A+QFsG7Z1VgB`5pFgsX5dMJ~L_$k4$cqV^v7QsG(Sb&>DnkKfhSkxB0)*Nq zJqt?ysqQN_hrw=Bz^))q7wO{dCE931x`CZ8(5}HGD)b;Oxb9uQ%p4hYm!`3u{`Ee| zeFn18NPJ&@OfWb1bC9nC;3j_HT zAQomhVKeE`KdeD$SUq9Jyr<#wm@x8_S^#KW(puV?gl7L6J!U)fB}mwc%6!dUr+rkP zAz2D}UpQ8K*)}NmGsi`TrwZf(wsci5#;_oUS)%;whH%)y2 zF9kX5)h}7Ey`N`Ks)yK1bxti@jGzDFhi49|N1<4R-=LdH79{38SEmt?-8Q1Bmek_MCl%NBcBdhCh zldDD^z_#wu&c1K8o$|_)b);bsdN>hy>N`z!N3F@~5DHOWgR&HNop59~?EUdfy$=B_ zI;4(i1IP)-7@`a$LcKM5b7+^LibCh1UU;R#f+blu{fLuh_u*bmOOw#!o(kuOa@Dx*5n+dHr)=c!>3!9}`QS+H5w-b?q;JGy z1tKru1ocC?5| zRs1X8p+hPFK%LkKzQ}u2n@KkkC}*3}s!P^KeOm!m`Tb`1#e|Hxr>1O6#oIH&VR3jl z&IX@m?EI8;9c9Ssr~VG5#Ml;F#bF~+GR8K9OY@=$^_Y&jO@`xnN+j>8)RdBpf=2CZ zAEF?=c6`1Q>w>svO|RE-JSzZFW&^C$)2#8o#t!X$eowpclwMnYWRxk49W&a`djqI1 zdFE?KPB@|cE;5o@1cJbaLiK6o4f0Wa)7Q|>fm6qsP6g>+N%z!c>KFW!3hldTcKl9$ zTetCB|9<-CQKtgM9IG-YFiU++Q6FIxH*zZdw#0n6ubiVHd|)=rpaqdIE&ra^4dW(Uf6=Y zsNVNDsoZb$>l5&v5HIhAE5tGCJ58d8i*3*PL72@Z(ih2Uh}R|3-}+nyKM?WgkMt}x zj{2`&OlV*iQXIrPrNhT7Ftl;UIg^?dj;ud)(3!sPR6@JjYoI8HP0na%2xvD(h-DMz zYafK^U3-BNkesFbc#FWXly#_oLw+cztNw8L3P;sY9z}Xc_Cy9e>}Wktkn)0B0+!4x$9_{`G{fgP@g%R>jj<9?s#AG-uX zXp)bzt-B~QY&(!*Wvizf^w+zbQg?9-fru)tkl&y)VngRB*xxUvfmpMdbIh>_#B1#; zmz2@T1S6N_(k(1Y#O6idecAPSSTnpIv%l|P6(;uf1aNA)$}?+dJ}6xkf!`x>;NLRi zWf)++YnfEvzydji_>h3S5VxA(=%x(o=hkixTuGNp9M>6bgEl5Y>Q#g=xZrptm6K^T@TsU%Vb;b*cy1)62zRp-agd zh=IUX1>G05=vi=hwo%i;$d!&P8UQY6&q-yY13XUPdyA={yNLV#2N~~l2bJKdMoBx< zFekM!NMm%7NYiYj_VzcLfTxvz)}v0 zPx1O_kslEGpVmq5p<5?tbiZ}I1W|iSeTjDo+X~FIx=%ZkTSi?%d;%R5H^NPL-_LqE z5j~J|HofSxtW7F&vlq^tqMZZV^7w=e*rlwk*v-Hi z&xK8Ng9go_|3Y(4z@(9{Oz0!+z*NKJxDD(O@2R=%gMX`1;>n)N0OLlI>*n^T21W|< zpccD(?zhrL3V=jw^%Nw=P()S!$R85OW%U8nP>)=%FfTa74@Knsy6E*TZZ+>pGVYpd zw;yjG_RDc?%61qx9DMe8)j!6biC z3|pSk2>uXS5AIQpEH0QuA${dnW(>~00uenyyXWjDPK!@2gL4ebT0b6e9)BRC2S)iU zYi;c{#F$x=?n?6W_Ymxn$?MLU8Ar@2e*)L@c%=Mp#&py8C+f#PHDRHwKWdfF$4Xoj zyk2Ps$>vuL%QnV<5X!k*tAAfcK-WwS51o21y0ym!m+%c?vx8?ko`c>+&0yltD9?q& z>eYgOI9A%TNwzk;LAYun?vNy+pRF8vdOz}@QKaDvjx9VZ5UaOZ@M4m8j zUtF#mEv;KZFkV!dUSR?b(ckABUvMKCVS^RZTN_NBohYN5Po^qQ2kQYW4#vo4BYia=I%`Gm3)cZsWM!M1UM~C_`Pmv6G55DpWdBv z02F}+k67lXXe4!u&ILEf`r6lEamZzlDnHH4k{?lQMQffx!v8#-TA=QI=Ty-B)?-2Mf6p}{ShCz1Mk7lQa&yvds3vJ_t*k3&(%}EB zqk*w5bsAy>xjY)WA65+oEUuiDC1O+gj~q*uyC!SCfSx1vO1tbfVQ-={XU&FXnQU4z zjz=kOv#ZQTj!ee(5X+IUAGZ5#(WU&FZTF-`zwcaC)Xkcd7|lAmwZu}c%(M>yeuqJh zMWenDBIWzIwf<&(8nsyp-Lc;?&p}-y1?w)hG}w-h?cTHt+mU(gsp$0f=g%JMZfH5V zYkM2Kphnj;+E+Pp^{6bkW zXNE@r!g$VqPX)p*T%p+>5}w`zzi&inhp;w+@2O_^FTo9wQx6u0i`k3%w{1fi0+VmX z2&0}8#hsKQMfbcX;l6Fn53)7YX6_-RoFdZctoFHS$Cz=T!p&i`*?klK@sbmGF{H|e zqTd?C7_Li~)oTM_e@=V|Su`d#IqE$%ci{BZB_O2%8Amu^Si{ot3Cia zjWkK~ui0OF!%*U@gSb_sO20(N7HRgFZ?40a9Lwlylq>~x7vuyy1>(FrehGc8bfbzF z{UKnW7?%Y(#VSg1u;Q)ttz@E`B&`VpN$-t20?C?JM(LVdxXf`8FkSUabv0|{c==%F zTlaoR%|PP-LG~|i;+cBgND3=-ecZ7?KX_Jj2BGRwRRy`gj*!LslX(sEL;Di=$!(5j zcGRIe@G9l##Nlif+N?ed6`O8!w| zmxD9wr>u51q}#67YHAJdo=3+j-4GWSAl7Cy44sXj=g1P(LPDRU`=A=xS>U>{_InbZ z_zUp+kH-h#Tzmx#92fgSU^-yuW&AOn&?*G*CWGUyuFanhLW~XxGMy-R!jge1OUAM&aQ&b^F%9eDxatB7v#4 z6}B=j0<(s5&`rEYKO8q{e-0S3=#R8MpVOx`im^6O2>@$789hqthz@6~@Jqk~cNOug zrw2YrweJ9&=4wJ)P7=_f?~5a9{0BD}zvc^?VmY#7I?j3OepJ5n=BfErRc! zXL#3yWki4~>(=cEwN)Pd_3S;ykhfB|TM$0r3e;FGV%;Y&Uw&OV2n+M>UyE?YEjYuI zb;To=_9*{&{CD=-CmqfaaZbWY%*OZ(z!b42Z|14m7v*LRUmNj_d9C_9@JodYuV#Ks zm-06>5KHXQFoX1v&^!ITZ)^*>*X-EIP$QM7?wxS9r))>c({|ToBc}PNR(hEv6Y4{V zxVi^$(h_N`gv7SUH1{_F3 z+025`x5Y1=&^2Gsz4!E$d+0aX)vzMMfm|+W&O-SYeC%_cET;oWJ5)SgHu)MW(K@?_ zPJ(CXiNJi*r#SuE@&(Zh8zwT;&7S^jx!nZjAWB7!rgZlYnV|PlmQxyHfV|k9pos?+ z9rL2XABacc@Td6lUqCC^Qth`sZjyc#MD`LjM8~Df*kOVy5+>)i3Y>cwUJ-Ma|IVC# z;GWJt9WPmOnGMlX*r5@O31ySYNA$?WHPH4DH zlosOuE1XOEV%n5|ZB^XTQ9x?wKGfS;%YkcOLrc zwLC_gR8AU0b)>VmnyaH+H?iTvkrlPAw*QrEiCXKt3bQPBX8k*V-QHP;&joHCW;?a- zO{o8l<9b4WV3%p;I=p znycnhLp3;AyBxNT@K85MxCls#@6;Cr(6U~E$N%_cxvOMlt?T)?#TF&kamv_h9Vi0h z??EmIKBU!OF`%1u6CUa@#7Y zTT3m$a^JnA$c6HWPf-5hwQ*O59F)^h(8+Q!|+f|3r-6 zw(=*4&FVb9=^z1^@Hdd+$&EU_Q`;|f&4`L_{J_0QTcc`xb!g8J`D0lZ<>#sRI=OyKqwjLL~ZCbn_ zdo_QTVfq#wOF7+PT#qiy?T~r-QBico#bVxfR?s6kn0M+XtRGqzGz2NXVkC+IjlA>E z&-7{#HG$Ks*YP*l{;lIQ`2Otm0i5=KxmllCOUi1BK4*DfvWop{vI>B&rcbB-wCI~+ za+Y+)eOofWcMSX#z+p}d>$?4ZALI#fv`fkBPY$(Fy-0@%iNh* z2wv_yrJ28W=>0ey;FqC$pI~wv(PR8xm|%fg$ot_XdaMq!_A^$`M;;61#P>0uV9rrr zC2XXy;u6hGNkdi&-<-gJBg$~dcxRCbDd!zpaa-v|U1Yj;D7456iope8CAUsDB5x*t zL+-KNBwk1>WjDyqIgn%4O$XAtzrwk!GP1P5iNRTP6K5pD#)i+XvtYIl27zDsZ*Vo2 z0i`}On;b(GZi+MHoxG87DvVVfRKOp;`=>e#Wf_AVibUhW?%}*BIzKmEE<&PA%HC3Y zX5_(k{QJP~nAI?hG)odS_$HqVL0wh^NT!Tgt+Lz+Tv~e2@8(g5NP6vq(7+a4#3tT> zS*S^Nrka&7+h0E9t}}#rW?0eBjy!&0(IG@bMV3qFA_MV0X@r|>a2ELM?9*X}y^A~; zsap`f(g-b`RqhI!l64GoAFGp%-h>tM$b{wWZ?-vgM-<2cat9;O`ZE3FE$l9K^A_){ z#Y+{W`+oCfCCzO^B_5hcwpF)Di_b0$C}mPMbQ}R1)8-I;!ScwXI&q{mxJT9YF_E$c zvl!`PZ>_W(`ufeKPu$HE+$|<;e~5@}%a6u*^UhWg#<&LUZ1{81RP9z#fzc;|N}?qs zfh7^_oBlbyr3#)p$CEC>o&^0ia-7ERnNwWhj_)Eaum;`fFC-sI(=El&u$t+#v!~H5 z*$dEd6Y2x)CW`dI)aLK%E#OO+w3gy_jkAjCJ;f2>>}L@`TV62QS>p}va!!9r7HywZ z{r3WV_$jFgy#jp%HWtkk-5VBt*U=2#b1%pqOHP?BU1*uVHq35aOIJ3EcfU7d8SHXRMKE>Xe7jTMqmh~M) zSH3L1AY)0rTSjcc8W{$P?Ak+56E5LhPRjZijl!3qaqJ?QSgl>94z{zU3$O7rB?tW) zO3*i@;gfDRVXZF;lEKQ+{Ihl8OGKCCRhvI%efQ<5_cM}(iT)lP&h*z%!|KXtZW+D+ z>Z4895hR)qwmC(bH#ea#lNt*4T(FI+_y@wZ=mEbiQBlS~#&7QtvhhNr8(2qpx4XQK z_<-G$a+DU+nNX##LcQJ%KQUK#1`?KXM=JpHDJX4(8vlFT%=T7o zsjsL(wkA#_Di^y>XbHO+WtzRyKo^imt!I(^sknA^k`1QVJbC3IZe@?V%P=;+6FWKb zxb=aX6l2E%`+LZPZTOxNADKz`km$4?xmIpx5_*@i&ROfE0OCX2u_rG z?F!qR=NLCwx64&`f9uDKO|Q=eyH0;i%jCO${R@G&)PrlMEkvL^1~s<$2`S=Q+-%<>;3xOV}H6()@r!ObrxhOYI0>TojLxo`Xjb)~Zb z*wFm4tI*m)^>z`FBM~+eCja4ntk9EPc1ebl#EocY8BdmOO*68A^P1mmzLomX0kusR zKi^S5{dmoNofqO)+g%~QHMX&wZJKPG;Gw#W4IJOV$R(Xryw&TkaRLA@1?j?iUtE~P zULW%b1^3H(UI+EzqUSCS1ouxn1C9R)O=9V0affI3M}PdzR<-Gy4?Ge(R9}Hfeq|Zv zQInS0Tby1&0tC8uwJEa>Q5=ytcJ3nQ6#92Zr42RE-0lOw2_}0dIZ>T%q{XF7NM=h_ zkb_=|;gT1s_c|JWusr(_M!iG482Nz_yN@DiJMixYwsL{th^6@iVjG*!~VoC3>Nw46oEWyN@t>t96pYfZG6dL3;* z=_v=g9XZaZK=dcTP@L4Wb=SoaYTiL^oPCXfRvUXI%!hDD)_Zi2@bOP2K+v|(JK5I$ z|7beXsH7G*+-qi>GNtmAl>>FGJY`B{rY1vWWgRo>)X5yF%uG#jRumz#va+PIG$&}b zLB%m=a7b|k%@RjIQPEHVL7Cz5U-#b4hy7u%#o8ZOd%ryI^E|)9w)pXIj^`zzG42NK^{w)mp zHE2?$HsqbRNg>i@ff*@D?Lz9~6lGIwX$tUCe&oh&X4Go188rh$Kfq z=78a0e09}luC=;ZHhupF^P6RESnP*<*97o{=u$`8%-9e;@fdd%PkjP^3NFC+KIs$k z|Dt#Pcmu2g6syY5QqLRxNvK@@VA#nL1|IUgf)tv5N1&pn0G9OIDv)p_nNUu9GfRyj z*W2>;Z!*YNos@27oDTF+9)R%exYkroBCTAtdDnIIg=mWoin3n(bcXRcgpO7bCT`{N zb)57m&$aTxnJh=E2Yh4~gx?`$>(WWDtLLku`=l1vEV{RMnPZjm@LQzAaaxhVCO>m@{veLTMQuX;(mbWXR*-Uj55ytqkQYGr_E5CGq4O0Bv1m(GmsJF$e#1cn2{C|k#-*+SPcoQZ8 z@aY`TB4q+$qP*;deO8#^h%agZ^Y@UyWqNFoded(cLCR^5JL{SO17T>HN&b2dw4Yo!bptR zvqf8&8Y%=HM8^59OOMiFiXZw#zxbV8G(^E@S~0OO$Hfu9uQA3{T_hB9tEQL~0Lw(6 zx>i0#Qr4*e>VrR9?Hi7Qor2EDpb;$(GGKfLN6iZo3T|l5z;f^;6LsA$X#?s~9dWSg*Xws*j2|GaIVmQtdq5ppOa?kH zoBlJo-0u&Fu;CHz``<%|Z=GC9-0fV+{EMlsrQxuOBAPuh?lIxELZ$AQ!sL-M6J_pA zTQpzH?Xjj-CYr)wRiN{j@_Wo4{!_lMB!x(C@padx=mRKHg9zpjPWkASWNS+;2rn^5 zR6UbNss9!s=B#4h#9XJefj%z*|07R2^j4*UX1}Kgu9_ zeY-NG*}oC96F99Sya4_6bstLk{l?mvp~JmMEZV34n>sUy0+*;=!$Plcg|^?-mt*EB_ORC%uUKvMcK;eqv$o&1-Ycg7vtxBiArdkVIl0 z@vIWYf$pgTpnq?2CMzxErHKPkR?GeQIh)`s7Cx7zy%5cRl}LQw_yqPKHz#qBWvs!7 zj?`;)%~y(2MUQ=R6#AMtsovaQ>26lUKO_pLmM-)WN+VZQft|B2p#4`YA`lBndPBA63^T-;_Sgs(~vW z`}x0{X(*Nj*BDKBSB(}0#br=dMXdg)RaEsxZ7-=cqGOtq$BjHMim$)3>r@kQuNA^j zKol7uVE1j&jvuw~6D9fqBk?t5u?$Z?TjWCKHB9XPrQ4EhgQ6GmgR_EN|5~LO&~raj1yi z0q6nU(f}5=Psp3}raiwRbj{1vcO~Y;78R~~*|4_a%%&Tf+HC($bv8iKb=udxE_|QOOkDdpsnWOKD14A%l7qQi&mXlpa!;}{{dx?)pwRWzDq8L4N zbU98Ec7f(!so#mt14&mD8hsfq`Sj&W~I`lGQz>lD8xiKo$RzjdUeW6?7kRb%#XPOKzj za0=|F@M+-l)Ez{W5$i@mbb<2deOJUOdbzygRM%LNV{zD23u%gTdk1x{|i4NZ>ofyp_t7Ym}t4`5u}M zW~?6lg6mYnx8$Dh`51S$aVV{-h}3z3B=xI-DpcZ9{nuoiaB6fjS?N0 zlyH6km6oDf9YxA?lk5Z8+nM#4hq>?=Kh`BGVZKmCTbNXrlk#aVRma{Ktt zE1Pw;=|tbU9*MkmHQN6s_}0~H0k@)TLjyuXuZI73(NG6qj>x`l+?Rd*f9IHZ10CI! zOS(Eb+Hcd*nQK5-Gy7{a6uYh+r*`b$@%+K1gLgJR+Tij4O$d3g#>@Z7=j?y?{CC0o zV#qDGpO9};f7@U5{^#Ytca3iDF7W!<`!ANkm)9fdF-2Jo5N9&0MMfx>z(e44k*sjs zPg2W(vND`=D1F8`vA@5$D;k`0W8WxY<*xm-Tsl=-c;8W6AT0q6LGFn@ZJ>Nsa(@Oo zj6$AL)~o&H2W`R?uftzSi#|fH$2<8MT9i)*d9lkprt9v>egCovUxX2J=_!kejXz4m zT!%F8W;G=i0k}l2=(8$kIkN)vr%_a*?I6R!jy_~?X#R0ypf)QKdJ|>$Px^8 zuswZ(^i3nSa}LEnhewxpZrIwu!2;q=o|#^eBOSZ}ymVWY@7R zlIr?Y2o)9%!|qnkK!S@;*oa%0c7IaMHQ2u0P%6bC_I zS3urWD>0cwIz>UOst`4-5VE@k!1n|7VcK8rD@=nZ{+D z>w?1SSxjL8ly3%*M%>JusYg!oTM*oP&eAarFR)|H;2m? z^RzZMg_pM2g1ht`kc(%9y;+Qt*374%1R)OB-xY+6yK+qZXfrF%J1!8n*OlJtNoE-w?&`oDd*+L9hetOF@?Ov#;m9nOnN6@Pt>r~ zWRk}Jpvf^0E$=-lZ4-T$qy#1%&?NW0yUi&0U@7%ry09Lr7w@mTA{v4wz7(0SJ1I$= zm6$xY3yuy>^_!}m(R*gi_$gBsiDw%($_gB$W28B5C1Fa$wxfWu>1=D>379L91sB|g zr=HzU>iaE`b^{#EdcXRZ#EY(wDZ3Po=cJI;GBDpJWz3QewXP9Nw1cvOeFunksDZ>= zSmGY&TZv)4o$7mfYYlmOWZSS_=Mpo6KSWjR%sL9XJ>oGC>_t?Z{(*v&1@vDFZ19y& z<=-K;J3U6BA1XGx0eTCzyyZ%!MHghs;cM<)HTSTu z!t^vB>~PD5^+kPloZaDvFeRy$63w%yr^3AqhTsoanrd^l#2r-+?o9(vtM3{~{!}J| z6Mj;(Y89tJYbW_S8&_KkA01%=2kfBSOvUO1`-5~okuUjyaYRw4&1_yg(Y3X{j-8<- zAzK2^dQe0+d|0V9VJtm<#2xQ@-& z1mjIvTpP1b`cqL>&UQM$lfPc1HVOHctyG1ki6@+$t}7mk;urd_9)=scn1BtKdR1A) zLBsbx6nyU3IJmwqE(O{D9v?93aVHLNW0VyHYq`eoIU{P=rgABGEvowiPtNDVonDKU zigQ#~DZLEj`=k@3?%MPxZ~Uja@v8ZZCES^KQFq%^b>k2>OM<A(To?K{tZ+!he}Gv#dfQ6Hda)E`8z1Jp)RLH)YxLX#r1~iP1b?Vm1tQ$>`%hZT z;!5mFiPx13J4$FjbDiUYg=&F#kM%VeD}`hBW7Y`(AV-on*NGqJN(O*W_IMYg1Z$>5 zVu!sP;tNUYYVz0Bq%D#OEpR}OHUhc}zR&;Iq;>6{GdHgeCpFU}K|cl3rb1w1v&soE znwtlo97v50tEK=?8vVcyS=5zHLgfB=xJS%Vffp!Zq4PC-u zMa*Jv8JuInop4QtE5Cn5FZ_I*f@z0<&ME*sn&w+i`Af)Yr4s0UST zsFwK*EQGS{?Cu7R*>`bkkY_+perb+F9i$=W$+9Q$Li-Ju2~T%Q>JpxO zh$M~q=g~RGQL|j7(9;7JcPebNISwS&XYBP#7Zr7LdJ2Jx&JQ_s+;LWH$SwhYE=A=I zk2=wR$F*)sfg}8?aNc`v`9hk(G336hgh+@s4m#U4tJCo3;-B)}>;uQRL!7IC&Oo#hC70CU zw;xJR0gWnVX45xcq@|%3j1#J-t0hR1gs7S#Ov`v>y3!MdMvbK|O_qrY^5~9i^FMd}M2CQnH`eka>lE&u9^V1dMu8G1i%!a;$WpzyKC9 zG3_3rEklMHsQ2*aZ$th@>(=huA@cc0eV2Oc91Mq{b)398P4djk;A z8C`@nsJ3*uBt7d+BjW=U!fG+edxhPF5As^DBX+e!W0^+q)kr^1RaG~cZUr4`_~G10 zZAPp$Qoki_AqXX#ku+mSqqfy#*EMW1m#`)&07>X^%w!0oN+^ey{~Y;x_^qo8DfT8- zgP$AO_&n|mYy3j59n8deU+l`6rQ~cNrWrd+$Cl@%oLG^SrKQ|Wk@Ot|j}+$hhxe!& zP2RXxiGlZ7gB z`+Da?^ZD(>!V4l-;{I}!Y*aQ7b&3MR5hF3vONw@2FLYu4!d=vNtEF+}jie}RoKn>O zv6YTB5sK`ct`okOm!br8Ya?4Y@m18g=m8 zE=H7JSUbOcKMc`sV-NYjYFU-Zn^f%fw32G9^HEa0nMwcWZbP`aXu*Yy2XK^&RblFg z9r2A_e?_`!G%fZyZo>R63uU+$&bK*SJ+BPFK{rZdwQYe7acgb4J!r~RW-%01zueG4 zo`};W>R^PT><{CQ`vz0-~U-n8vGa1LlkqJey)pgs12qUWd5(9mjO zCohzm{u`4&>fG*@c2D!A&t#w+WwK<%O~W<;E3~8u84J`hNu|Z<3BTz`1 ze28D!OmQTwi@#0*Pp*KCAReh&EtI^yuM)?w+ob!O8S~ZN{VR|&-P`&auD(fRw_NWi zkZz2?YYVN*ZD)Rt2uBVfhiU2+d2CII_aMLA9BHVTRaj_Nk-d>5tSi}o1u^;D!x9J9G(z@TtzTlL>+E9<< z;do9`G(>Q#XGFDvuZ^DPP=ei!ay}@&90L9@9oNvL5}sdiE1-q%lC_-5r}3TN;rnVD zz!ZN#sX8+%-6sM!T+mwZmir#Ld^N4qeI(t8`G^%R#~qOvaYxlG>O;S%1+|@2ULk0d z-3X2cnDtP?;l);UW-4#Ko;hfT)2As4DGEcGQ}o zg~j2PdgyQzZ@w-&3ltrgHhQu&EmacirM&*YCDQC@DADMb({03N+^POZ)DebcK-I43 z^`3ay&4@~WrOCa+DP%iKANd;9PgztV4I>=sk2K{JM;nZ7EE#3x7t%5FUp4X>u*L&v zU{P3qQwy!Lo-Em&EfI)QG-J+v{4EqS{J;HhiY4Wr3GBBYD!lIiM)hP;c$x>%hiL@R zf*x&!$OWhbslBE)kOlWnxTjgXTFT3e za(0&V>HWCi$-m3ufgElPZ@27Px`sT7Xu%A{EjsjqB7oe$vG48?#i+*?tCcP+Jj+me zhYWJ2J+<$W1ScHgg-}1k#rsB6q>}BRFqQD5KB3BRFV)E%EBap0Jpdb^CE3`nfF?a5 z9s}K}XX5{<$ zW+JmAzBj8#1VabkawxAKqA`z=&nvG4W8rx~sT8WUhD3l~LJn0rA{R?G)A9^fZe{qqGbs zA5(jNqlnj1eAD{9PbCCTbiD!2*nJkCDBNih~(lVDL{ibn2`n3B6^AR?X z7m(;+ae!qD*zB$2jQWS=tpfFHxWKi)!jEsvvuBou&B(F8@k{1^s;6ptO_eS*V>7=D zwMl&d?EC4^J+dn;>%3?{>>W}dbP}L?9UXIP8m;B*rlmImIu{PQ3*I6ZzdRX;k~ZCu z^ip!maWlTdzg<_wFu~z>;Rji9Zx(>cJT_+t*l#nCcuGUVRr%(i$C2O3e!oZ}bRvOi zHD{V`ny^o`o%q({=-Hy7mTvyKthIhc2M+PNYX@1`DP8CYFzl7;(@^X37T@Ar;iZk_ zaIho%%9G+UqebjBPzjE{%E^ih4pGUIq~wv4FI3_1=kOmwe!Aj1(c~gCIr(k*99+rs zFH7@=9V5=l|HzCR_s(W4{Al3-J7LLH)6^{h@@h)!CD{Enc|BZ#nJDkslD}jHsL^D` zEMe71+@Wbm(ubfBv424xCl%&L!(-==0A`|3R}iy`aM&c>n7CK1i=(s(i39bUsmhoh zkl-Hkk5QzSWpeNi({m5qc%>WZ1l-Ri15yX7u)up_E>i}-Dx|A_t5)R0JS`;4)` z(oR!$W)ir#k?p#lzS>@BJn;~g*;}fvMYcN}fC^fPE1|u0K~!2hWHCE_K0yAG zSB7lKFDq~Ku(ibO^Bs2hBb1Vj3jJB}&Yt3az(`h9z>8bFxeoPE)h3+IWcr?Ys^Ys+t$rS5 zv_UIe1(w(jKwCTHojZ;{(oN>yk-a$f0(=u7qjs0Y(gNvA~e-RN#T_(D)yB{HAtZysgJIJ`r8 z&Y9l0MJn|%#g*c>JBTdQRWuWwya56w4lS#*f%cCpq9q!*|CF$Kuqmo_u+6NTKr zni`zp;c+8lYI+d-mI8p;jA@wv)cf_cjO?rYQ@pkonPCkxPdMd!Xy)(8vHe=3V1>F_ zem~UZ48bQ4cgv?)w6|~@ z8br>O*=%rXNjNXvF_9a#DeutOmpM(q(nuL=4|uwD;;WxwJ+db%Ffi}yhq!Od=gOyf zEbq)c)N%BrhhSK}qVZR3uH@(WKJq}@I}ihRW$e48zL+1%wX*<|BA&)&o;P!Kr^KhQ zuTV_HTeQ*mOE&5jEC!bWHN+%b5CZ1ejhH5YqVJiOE9CQ0X;o;TxzvwOlUP%WeoZwI z^OLsWN@pL=KmNwoE*l-uYnyoa)R%P7U+~SW5V*nU(?G>K*QVM7OyCaGzr>E1T!C?v zGFvtFl-J?^(+HVtjQ+j1fKUY4`ZZWNI~<2kh2>!Lxp$U)c0x-W&YAn=^rB|w_V&3*T@X4 zz~jkj_*=WEuO`{v0*OB!@kKFE4RL7alJ2<`*@fwW?xDbxeNYDBWTv>a!BbpIka;<5A2@s07!361k%A{(jB~JWo3a4PczO5dh zQT;QG$G+sw14}l@$d!00@HL8jLDZ-z%Cy>q+c>vEYCm|kQN7uy65`Nn%B=|)6XX7! zc_(^UZcFF5kM9&z5dU$OPuG7!d?}Kpj1a^{?{YW6lBInA46cFlOJ7S4D4i3C?~r|5 z*cCoOh-8}2O%~No&Xp^-3bgeUz|>w;2Df8`L4!YxKz7C>@s#_#Rk~F+W&HLk_}YB0 ze8s=&&Aw9?ER5hfqkO_88YkyN=hE#sGV689q=`TUO`3`{AK=6{APpPrv1au?H8i^w z@WmHD;0hsz{WBCd8&)@>k<5XXDZ|!c_CiH*|1I2BD()vUagUxHyX6tns)%P$nO5~# z)?eAhsXM;#+0Qytn_iWpx#c@)SO1w~O@f5AuNl(Ha;yS%*>>z8#xeXpZNa`CgQJg6 zRZAZZ!%A_#J@D;mKb^LEY@xi&g0Q0Kjj5i9$50EUW%?t!^;M6W4|yoJ;!YDwE|}^M z&uYgkpTk(I*Sx!7KmTuMc`JW1`#V$QF$?<43!pUUI+=WjD%Yy6uoh)GxW83ZU0HM0 zm8xyd$6aFWNm#WYKx}NP5d9`AoqdP zXqqoy*{jzkgz#sOv@Y3&+oJn@?p#Z<%$jmRk3+qrq^uKoPZL!+OdLEjX7L=< zb!HKhMsY6t7k~BBj6R3$5u>;Cr)czccT|^JE5lQZKaaA+cwZTI7NAXjfLm;jY4A+1 z;y@KRlXnd9B!BA5nVFcSM9%@;`pc5EmadpP9wGmLI_00rkNap7?8L)SaVtqrO)m}9 zB<@Uz=alnu4&az0PLlSJ&aPw3Z8L^gy*WLJy*N%Z*<@O*&Sr!rw|tO~iCQTJQa|$9 z{Rv8FKJuNlkt1a@rJm8pPjVb@$_b@bk(>-El#A_-atbQxvH-$Gsb zeQ9H!Q&yf8mP^hp5{n6O!zPUM(2Gr;=$)1_hk>F_EuybuJj69wnseOQD4 zZ(qdvpwf!wv7apm&N3ap#Pv73`mM)po{{ofq)Fp#A6tQm^AO23X)mR)ze1prHZijm z3p%q@RF@g~HI}-Phi*~UFDc2OEE;D1;=jegORTkS6c9M+=t!Xb@SD-Vc z1`NazxE||z-JzJ@;ODF)8@wsDSgc8DPet9#BRpyuwO`g{QM%%R{Gmka6W%&2$Kx4u0ViplYh?a~P(qgn?ugU*mb0L>l$SSw03e?R+A>6{D3* zyOasmbk);ERm5|+^9m}pm9?K`x(JOKDKya|f3JLwq{|b2L!)4?=og2EnC2IXQzLvR z+UAFPL~qtGIo@1~YSeu521ubh9sn&~71t1yxfTw@CO5|XD%&cq9TwSM3cV{yclpK= zIC)v|Gpsfw?%^&Roo%Y!gD;`<;ue{D2-;B(x?!|M?ZRQ3-kl7h#WVi1#H_WW!}F-B z(cb^Rs_VyI6oTUJshtUWiH7tBW}S%pslTRe9&JlUy;j6rR9+2bebD&!!(-k-jU=Ya zUlkK1wxVGZume)f3XU|4_#1HpCg^ZW)lEDp<0U*m?9jdk9}zQE*N8wfGu#ch|A40B zY5-+^E}Y*X$(0_Sm@AQNqoB$4^eFG^m`P&64|=iejNb&y6^KyX@frh9Pt{(=tiqq& z$kMmaLn+r|>k|8IZOMktM~G`!XE6m5AkGS1582Wa z@T%Np;zQ@lXA~)rDTpG^pP;eAu8}Dt=DmCPZ+6A4=ieRGV{WK=fHbC97VL)!iaN$5FmB>8fqs5%!&_|Cl%Gop4aNq5!J9THMfH zi+bln@npn+WU?gEujufK4(BWImaV>C+v8hh$+nR5pWQ;?w3aC=u5_vV1JG>G;gza# z3%c^yympX$^B>}P#GSU5Dt(}I`^*N}->>-h+_F@S1vLw=WqX$UBz9VQTd}M2c6vje z46W%afHqumB5@QiL_xDIDFt~W5iiw)af=%#s5j~ot#N0tsi=q~(u=~`S1Ayx@IJhzZ>w$H6HZYwkr(1?4B5`yh>aVLk3Ko;cwC{A+rb3Yd?JnqL$>Lx(S3o06pSZyP<=^3X2`B|GWUiR@i!^Mv1+ zQBH;J?}<{TY(w0|F2%(Rg`?|Fo3Fo4zd5T)fd_jnCJ!uL*^iVC>+j*FMZE6UGFveX zRXyHhVE(PXI%W9>rgz9E0`CHh3b}L|M|_+#LH3V*#gTELUWe^T`xr2SU&@ZHWZDppmT|~nq>%YlWKz`Wl){?=)0)+eZ0FyXBmf+ z$kkbitLc9ysE|o@shY^rcR}u+QC8uZ*M3X1s@ zkA$xVoC#lQI))60!*5SYxQymE+KC319B3?8+BC`}G%a^rRr3;Z0!C2gqZ z1wmA{3kkCeZmirQnLRx8^p_ppZE;AlKLSPu*H9Vf%-2XSF2{t4j7pgi!-0&r6-G&m zD)<39eI;4)(eICl#EeR$-6B)I6$mYy528GWt#8FU_2c#`J#0zJSHzGUp{3Cq9W^kKurdY;UHuF0>mdTQF`CYujZGB zEzyqo#^LaGv8a`t52rfK<@1Tmx}b4QkI9_FCm%!+Q#2|+G5<;BvT!FXIV&|7*~m*y zPL(i(6TMb*c`h-wL-5~6oPjq&DYdffg>QB9|C`sVMu2;8X64@fXIaHTBORb+Ud=(t zTC`_h6=M-qsCx3TlDr8QwaBULa&1i0w9pGS6Hcdgf+Vk-q4zg2}aAYxE^yW2bO7y49H9l|q7HTX* zb$B@i0DP!5%eV>|veqT@V z{s4V`cufwsSg%tO9BIirl=sQx4)clfTXBd2AsfHQJ70if-!=adOGs8cY^Egl)fpLE z94k;qMh@W&DRW$X--8y6xwC8sQ8P{i$hEgoD0vVGL%9G^_^-+ml@NFF8OqAXIFTE! zNl7=hP#p+gMbF#->ENV+sMCE0dRYM(+~UCBSN7Ug58`^Hi2N;+4&rZxdWj^_V>U@g z%}EuE4ThX?$a>)$ifTZ9&A3`G$IzO7{yo>I$I|k2%#aB3^*QKh?o8hsYwHh2e}%)G zRN*im(5ghsNU~^c3Sy>dzUmsJcuE%Ko8iU0KB0?Zs5Sq_Ps!ce0w;Y1H%li0yKK|q zyZ`fdT_jh21zi$Rf`<(5kbbtb8p%t<>pT!aUW*1O-x@#zjID2>bc`)V{bri2Pn_|4 zlQKt~|Bo3cEzy4ww^Fl~VPFOO_Enw{^^SN4i^3gqGE$%Mo#tiQ5-mw)^201+|M$oP z#Du6BGx3j@D52suP|f;+kd@4V2KN_tNxu148v*1PhK&p5B(~M~PJzRH*gNERT30pe zyy~e(>I>1gyaSdhzWClpDBD@)7az@N)|%^9QA+iYpXG;p@41%=E+xI|RO}~RVp(CT zr%9;zg}>2&dgN|_4tXY{D*qf7XBB%*_T*pE%EOqv8DlKe8_cR$R%oVa?p?K-r}v-- zi-VFK6pEXIuz%QY^{Tm1ICoX}VD5O-5T#)e{cbMG8hME=4{2>^krZd@m2Ab>jeYO1V4eXVM^M*rTBuUw-q`LT+|r#te0-#Sv9^2zLr>)EKnhY64U-VpFj^@ z_~o-+@w_sS(g^i4YOp~7uJh+as&m^A-=*won;FRR$AIsY^Hs(P$w*#v_O^H(;-OP9 zd9g->2qw3y#uqJ&#HRDADCc`@WrT#=i|_97?cr(g4cr_@`RWD~~f)x|M+BF8Gb_X`KUr==7N9fz>3CL{jm6;tF9aaIwy9_be8cmKC2NPw{{eN)} zfE6`)CfHT5kn4h+B!$PcA*MR;vxofX6mW4*k+EZ;qT^$zRqEbcS(3xDqP4~9n3c)n zlqJ{?Z4(u)nn6IoeS19a<)KtyNtxnaF=(?(t#eN9FWl1P2iBHk^M|pW9@5pzeq6iS&`CE{;q-2^OQ}|pp z4KcP_jkU?HJy1-tbC;)=m%$}>QFmcU?hyrSu=2*dlIiC}N&ALIEp5-INRHB>@lmIT z^<5ghA|qBA4M&?%{&OPA`YF7&s{RnbGKlHV^1m@xZDa7>whPbza?f(is#F-LUnvZ*uVTDW%e!Z@VZjY?{}b9Z zIM79Py!E2$uuLuZo*MfL*d?cyM}}3CERbJ=`bgn?Y;OV%f_wbCt8&riuHQC707-C~}hGvqSzM?iWb%m}g+hv<^@gQeMVo zK=wV0d(DwX;MXL@j*+sy16~){5zbMkO>1y-posSishQ@O$QoP4yz-DhPNFF<7?DqnyT-# zJM}Tag2h!+$+yE~HFN_K(qPQXMPAeOmOtu_wjCt72fpBBTimj%H3P|>NFJKXER-+i zOd3t&Ee2PC2IHoW`kohir)R_^CAN%-Z@yp55GjrOa|UGV2h5OI1oQZ^zKRjkWH(vP zh=P^69HD5zV(hQnsPr{wp9;bOL#<%b)V&)kpoa6d+7gb#7*GTT{!D z=yF^SDObg2wh~qm8u_E^<#!v5>Q*LzS`3^^cx#p~VzF%~A^PeS+NZ|ye6#t)D}}#z z-Lh6+{kZUQxFN;w3PBu~zHE@7r?KB7S$meBE|66{pJB07^55awXq*< zKRmvq`K;NvOsn2btAvzYBTx>?Jlj_C7t*1>X9uT{z!h_LZ|4|`)UbzuIEz!nmX0C7 z+%v>=IR9n6zs6Hwp7>f+wcL5!$Mwx6in5s*K#|6TBkU*~6I-sn1vzK=Fw+F4c3vi> zm5~wPe`$autArlhS}+$tcx!UNHL4H#lAJT63ofsoyB+JHNU>QTI46)2ftzYE$SuM$B)TQ<8(0UAt4OazolfETkG5i1KcOp;E0eB)I!wJq^fKI1cheMR z)hDA_vmz6;+M!uNL0u?r;izghczmx77H*x?|B${*vQ66R)Pjw`?kWkQM?#i`89`uU z%2^y6RVTV6$`CdrCuJ?2qpJ3f(d%(1hOJ66rjmoSHt3RU&@~^nj3jWy5lMM~MbSXB z3vKEuBa7~Y^zC7~jOEMzBp&isg)?gq`6jDX6&%C4s$}>If$SnPg@6qnk2KG8NHG6V z(Ktn$`2{{r*eEFAG8Xd~rt)JDs>-WMrP!i5Ff^aulf3u>hT->;zGI6{}x}a zm3NMOTT2N2Gw~qi&FotG1-xOvEL={XLS_^>L*x6zCN&oi-JM!6Un!Bbs}7l)Je> zPTAl?=99R6haqx|IE=dkG{s+p34w#wy>%+g71`+VzBdj2RY_+OZiok<&A_)fE8I3Q zUfybYg7g4H5Dl{#u6}_E7vsQA$K0xCUBL-{ix;39$Of9npa6516|Ms;J37urHMR}9 zRxp!=-RnBKPVv)ZUw10$)%x02Xw;+0BP5NEjO0kM7Zz5^=mUw5?c7)#YO^EP_Oe=C zt#zgA-KN~dp<07J?u6;$j&K<}_{zD*E`+})F3kp>J+vv_k*Lut)#z58Pj*%s#6MJ~ zna>s^JI9BS4Pnd(XM2d1M*7q2B3HMK`C#-9nEIe#Gx-ZMqAwgM=v>*B&u zM%{#8sENZ5+)3F`!cp;S{%7!Z)!z~r+9B*2c_qwT1yatYeBocm-c&wwH!vST`*202 z1Vh}nmoPsvQsl5y8zQ%CYuGPd3VbQGX>*rLGm}zdnDLjL7S_plSLl%Ofk^dqYN4d| zpgcEy^aAh!{|c7n`&Nma?*=dQ6vd;K+4 zq?hNqR(7j)VK$>#gS{ZPQ~qP-(XN=(Nd12sZY3F+`Ym%4{x;de-zaLa*!Q1go2qu9 z=Z!qyu}yIi!3;Og0U6P;W4QB91djYu$h7M4K?zKnuDTnq%xESx@wNH!Cc19J8dX&q z{~#!>ehtOp8Y>b)458^_=N|(b?!jY{R?`&7vM`3-7SD%ScM(?1Me@$hh}UT|ze+u# zjFP+7bwIsu{fCV;1IB+>&uvT4NlM&BL1g5qdEE*a?bkn3@?fD1af1A1YU~v_JWd;_ zTMCrb`%xmHL^_lmKcw%8gz?wQaqk7`if3x8NhlG)Zf&UfVeg*CGFSOjpRF-kw5nRH zl>Dtw-I-iU6e+vj0!G0av`hd0A?aKkTJGQfk1)rE5IPK@n`C#~blxa*J5&gz(-4wq z9n{*|L4_pRomA*B2}y_Pq|@3;Wu0s#oolPMwQ8%KYKM0E?eqQp5!?H9UC+n$c!uK| zLGcLrlN4<;YO|0#-TI`8E$Nh>su0KSA-^IS`_n(f<*7@a2KlWGgbd#?g&@f8DLz7@ zJGgC#X?b2e=%~H@!p2wwZo8`4vn68yHoow;9gwRD^Yb2FJ=_-j8JzTtrkh8ewm~_l zEc#>9x&M$zXeTHKtOZS=i}M}MMqMU@x=)c`p^g-zH@uRaK7)@1c~#Zt--mutb!$sL zzs$Uop)SIjY6732f!Z)M?MesG0Xu{HLVK4jUn5Gr?n zvEyPgH0ItQvk<9|su(131Q2F@9(@l4uYUI}!WN%1jkgPE7{!`S#c#cuJ&(CLh1#P& zNKoFc)|HdY|0m>r$>CJTm|iN$yTUxAPNtLLrxXr6ZQ&{o!pTWXv;hm z&;4Vd37WE;hr>sG_=5g;9PL~qy04H8xkTyNx!CTA1X-OeWRHvhk5S6}z9{8pfYlwqv7B z)y0zFvgGa>&%iH~9iu30|L5CK?5CJ@HIH???SNvc(>#^Bxfbz-eT5n{lEXD|U&d^( z)0W;+Y**dPHVSmgbU&c1Ij$?WDa~fyp=JT3N|$SazYayr#&P5n!f1s>vTp4FmPfqe z!C!)Q3#B!vz&bIqPx4JTo6GL-zC8a*u`6MeIccf~ss^s7^e(riZIqa7haQ|c0Dqgb zA$lmJT{D}d|A4TB@RnN5PhAi?Xjw(v;JBtHRF0-^hc6^S`mmZhbc-LPY)IvGKW!FzlB@mK;{AJgR*1u^B%!> zARpYDarMP=qBrRc>t7R4oCXeSvCO5u`%AMFvyZ!O5y1Rc^N|p!p;5h)%yKEz!*t50 zy;{m_I02XyIIRq-CB6kq%>^JS-eNw7Q>FPJO91$rufP!;zXiP%kOYfdz8+G)OOJ(n zU{ha!{SRa(Rx?{<&Ym^IFDyr2?kc(3N+viI*HqeSt{|;OO_LI`AB55dr!8I!^t7WE z(QOKI{ALw<%Wu#18!#P_9=*H>Z5b>d)LX$cL<=L9>vt_x@rHX}-hufdmen6%IVao+ zg58srMTxv&l#fB80BqA58w?mauZ|RK{3S)h1Ib@O+ybG@BN4_ne}})ar8( z{+Fzzu5+==$Id_Y@nkYJ)UWO02YZ^e_>z28Q$O}npQH!wKA2FNKsu7^i|0B05fhgI z#JgwnWsRQwjs@des;$Rrr-n`P`(Cx+WZ5+9ak7xkMe7@0-Y|2IlCS znwcM$fuA|4S4@MetFVeTF@pEf1bn={+;$|>gkyKmusUQ17d72|n=)b`>}TcW|DEj+ zO8*?mcp=$K-{2|k>hn{%_Md=`M%;Bei?Bt0VZIRV&p7I2Cr~DX@O_m zO6W(?hZeuVvuo8or& zCj^>;c)I2z625~BY!ggIG~kauguIvdajH6Dq2%|Tt#moHew&SQWM{$zLf-DJQ!Y%x z9aaE|uP`AY5=OL%ZZ4Jj4_je_i|y`C2O=8#QCDZ!YOe*bv`fL*D_jEpyz0;>+DErP zh^o?FnbnaHVlV2cZi@<){|CdFRoTJj83$k4tW=Ie-^S`d9^hAnTJ67B0;SSUljqQU zFDtd?b#-lWcuu41Uhzo^a>>1@T6!v6y4`fQFLIDD&W4G(kxnrgia%wH+R6-^SS zFDA>}w%*cK;tG-z7uJt-81Zsq_B{F&!s!0v@<{7p?EVXmIyW;h`dcCY9#02BjG*sF z4>zd9J?14VwVv#ia~QEj@pkP$M1pb@NgoSnYgz{_MXl6CFFHEQsznI)C63)Td@Z9D zT9l!%O~!nYZ|rLm6d`{5yZYDzX$I{v!|s1EhgCu`vSYu{k7mx3yo7ENZxnYBQ<%zh zx=8tVuLp7b3Q%=yBBRSpW6e%ALv89jSV5owc$qYQ9Y@Egg)J6NxQg3Ug680hnLHUKzcWcV_<=bedFm_z! zGqQ=XBkv3U$ipqQt4zXGP8RObbQuAtgSw0;s}^yS0k^gDF_$^EQ-b9!!Pk-l{O1Gg z_2wmw^1Y7?Ur=pFA<86m^TTQrtmKD6xjflp!uF5RCw^l7!R|JPcU62I zo>f&mtGjOw8aE%kq8SZq!s*wqa2q){03}{Bb*Bm5dwq)D3T@r%FMu{Ec#CzG|uM(!0Wp86bN$Vcc6dbZD0czNo$V4W#BM-Ut)8Usje zsOM}z*NPA6Ng_-EeJe;W4W*O?O;bW@!Yg{;kyOHFIsXj_B}FJ-{*<8`h?sJsJUgfU z_#kj`ijd<)hEu5BGZ~S)xV}b|=!sP7@=$sCcU9nzfqFUX^NJ9#DYN3K>^}PuzdyNA zPZ&v>H%phj(Qc{1H3YLW48oIh&2^ZCNBv)_ud=1u{ZktvccX!-)9Q<|iE-gCN`V(RGm_UO&%B#k^g(4%~xnM54C?acAaYRAoL7PM5Q^j zXm*7`JZ@^e&%+DqLHC@6@xY&U!wT2`h(YEG=i}O3M00t?7_2n-J$(2}(jsFt&RyO! zYGD^dI)RHM&qRu9p1Md8Erchi4O0A3#x>X(wB3BPb`4pl?w!Q>OFH5Njo64FxtzM0 z;#*FCuu`4O*c|;HI*H*Yv6sftBkI>Q1t+Y@Sv0P+XG&%qYOu?=;}5e5b>-Thh)Fd_ zk&U^f9&vYBIHb(ioeJ&3{lZ8+e;L*!kQ#-utiTyBckhqJKuQ+u9&_JIOYRBs8`y0u z{1ah^i5Szd*k+CKodZoR?6r8!D#fkEe&AEm5t}u)^Ibc_+Jf)f9XZTfLN|P_S&f09 zXRbR|-RfJP^Q)M-H4HHTV#HHY&Kc_CRvBM?MY-oGFz1fW&S1*EiDMnpbxsPqcHNj= zfA9Z)I@JFQf_;ZiX}|8Eh@3fBDzo3nL-_*{zl3Sd$jTISckmD4)`%axFv#li zj04zb^;PgQBrn&EO4R))d<`gc;=*-=w{>_gq8!{XtNh{Fcgl3z9#M|TyJluO`(*Jucda^xmM$wT>dcE4kq zi$Se-k9Jdr^48cR-<8=D4$HveV#gFN*TbJU8Zp;}ci*Uync*fsb4XeFvONmec6_VS z+K$Dn53eDYz9mRBe-r$_nnxgV+;>#=IocgUrgCe(1$7(#jpIACF6eHOhVLz+xlT1>jokcOR6=OGEn)ZpsQS2DyOUKBowm|-~ z^0Ka7V%A3xBokVSyL%2YS{xMHW1kCVe~MJSWe&qo2_~<(hS&*c8vYkH>bsyiC-@hB zF}aZwL!rt062PAoP?Oq$WWf*jAILgFF9!2fY+^RO>PxV4=L0@_jOY!lF7bdoSW*=s&o9tx zNB)N+#KcAJa+$z4+S;7LM*IPOgvyukrbP*pQ%k`)xiAk-2 zGhazjQUCLF(3*aK1-_9{NG?5$wb>o0F=(}+iTg>?ax~4-P8ZjWD=x;GJq(*DaqX}@ z2D@p$OY;xS=VC{`LsWs-mItP}`mUcRT?OP;GTQ@A;QkaigO)MNu`T`pf3c~giP9Lx z2A)4eT~YG19~6_Jb}SSXC>tQJK&!Rivz-HNKvL^s&c!D9n+Uy8T@)DHoxSwcVn;D`sG=_594`C%8bG&FoYSOWoznLNqF3Y}7%836kwf+oty zX7Yg8PNpxrk}wupuPQQj`PLEmF3MKnS#xOl!_AbZ1rrp{k@~DE?iuofnRVAf_CZ*e zy9+itw`0+C(ne&qa!vgmd+0ZWIES~~2(&FDCg!+MXx`@2W~)T0T%{ci10WINCq2wp z_2GpJv|V0YYx$LU>>tTE#O?~}AK_%xTdJXxc>^KhL+BD<{MKR#J&?C+%ty1eezWR3 zT~FhKOUhpNuxK1M>$|Yt7{ybcn8+VXO88el)DcS4(jU4Hx;5C|VZO(&y{h88bgyr# z+M`KE*?$Ii5>NM|!S4>05h-g-BS8LpqF;dr)tUNsIfbVI+2GgearAL2{1Q4rvi3Of zi*vajZ3Ql!IhkR~lslUe_4ej_V;e#)cK{L3r9k@xQA*@RP;;cKv`tY#eyjeN^I8Dr zo{0%d40oIlzyXGNRWDU^@?hYb6Y3uG#4@~@H^P8S7WMrEc(nv|U!1T9YV zJ``lqN9-l24URbzYx1YaL5i@Cm6W1!QkBS`o|2$7N9sox|3`;F)f3hLY%HR6V`qo- zICXs%K_m66&Vtpc4!1gwt&P5;b?e_Rtc~Tyw;NI;{6*pSy@ zUFRJ4*KZS3KX;_F;FXtc=bAfNr6c*1dM{iYw0R;9IvD$d))8XwPlW$7>-N>UE@&HU zuG^|DVGPY~h{b$SaM?yIUro3ud4#MgCj-pX6?U8kX=AUwlDZm zbi&c)2}=X)2^8LGTgc5{HqCwuAlDc#2+X#C56~Fdk9)x2L#xwbHepOyjh>JCh5b_A zaoWBT?B3p>2~BRYA*#XrB;1&*v>FGZw=WDfa9i@2Cnz)ttOL6NY(?MAk@q2zV}XXM zg5gT9xs-I+-6thN)f=>_&36eBw`)-l!xrEIY4*sa#<9g9Xx-aI+`Dy_qd_ggMN`GE`PRi|?8NkE5V>A1#i-9%6zVX;-q z*)?vCo~mlrV*g+#bULU1UHJt=4{8gE9QBd#jh|mM#i%|CTDiZ%q4nF52bEd&v6c=aV2e2Kl`0 z-iFcNzKP?P^jCf$oTS8(-25*pQ^wp+W#1K3y1+ae=&A=bYjJ`6w4WfV<1p?6Za%L{ z{}2^(NK!_y74r{luexFm_p)WKJ)ObZ!~DYe*zr-;%zT*Ypey1u9GH%_c*BEK;{lel*4o2lCx2idsN4FF5JMetE>_8datVSp zw5RG5+upbH>3sM7R-)HY1$(U0qx3KBwHAm*nTH8{atow9* zF;}-kzZK4ItPT5!#hsA;a%~1<_ajeR(-k|4W!AZ-Bst;sC;C#iLVe4AXUuio6={a@ z6LoRKfukCykF6clbtjUAHEA8lateEGSp7NinLhc&{G6+AwF_m%j{Hahg6BH-$zbIt*I1ea1)k4ImTtvF;z@Ri2<9*43!B26S~uKbiPWd%qONTzWMjRKp3)EV*^ZZVVzuW)O(S(v#H%S9 z-uL@i>Q|+D0SQB9s*}a0KU17L1PGI!imqbUayRY`(w)nLcy(xQf8Hcm8Ns+pOOkky z_i6eSxVFG0T0W`7;KLNjR9PmcaHK)rssjb)tFoRMOsMy#aUZSaH88V9Svanvl>g_` zcbpX$%h|<|CkOgqIAyqr8;_{L;`{=R-x0@q3d&V++)Yo^->h93r6Jtaky%UR_I}aV z9p}{Igu>qP6GVN&4;gSZc)+vo202y*bh*lKrpzr=#0RfWhX2GmtM&+EuJGp0EfcA7 z5edZMsJs$7@yD~@5fxA14zZXtOvxRY@Z3X7l`G$oGFg^QQ`AYHK4+t zo!X$7u>p0NOdaOM>101+k|VI60fbok3U6!WIbUPh23{O+yH$xXx0`Q`e!Os|W0Jf~ z`$oG>? zDLZg`bOWN2wqJY=g6KVTtO~gzml=POS_%V~`%GqP1nrQWo*UfbvZ`ljuEXtw!Ka5} zaqLYywKo2GuW9G5>ChnHBdR22n3M z9LXT&Y=MTTW!>0=D3T99JxUo?g^+k*?RR6K$)EJ~(_03;D)3tb49*^`+7y z*InHgvWHOW9_^rNPcbW0MQ-5VRV4LI9B7j);>UK0yed`O`S#JOt25K%fFx+=;Q`CX z8II-|5~iJYr_74r&8QM}>p@I$w80!s9j`tQtldj+bLERwUhUQk8(J?TqKT&UA(+>Sz@OkCAXOvsMlBz$fIz61@^tEq#BjEaXqO>rAE9TXf2-8{a zPxO7mC(O}Z-2X74S*`G`U;^~-sLvM5;k?4&$elX=n<@DOME)%Re^UJ;tztxn+rr0^ zadu(w^cY7-%0BPdLD4?+r4$S{U(n*QxZ0zn#ld~ozh_$K-O+%3!-c`S-d|dpxpJJiUG8O z)K$|=(E>l%W&Ss}WKcoU4aoRxUainthy1;%xMquWo!bc?&nc3+>SjNo;{-G0-67I5g8^Fw9$08$ zQm7S%n=^D)mATN2sT-iM%IK@KRU@t97LJUn{SZ(a{_pH4n~NDtR|ZoYhkQe3Ui>CM$o zXFbOMQ59$S>_y!Wa?UmyET&fE7DtpBVDBri+SXW|<4WLnxY(GgSkb?B_a9*h^#(`c zGHtqtksGCN>^aygu`4-j$_-gM=@9D*68&{(v zM^*-c8esDS?mV*KYtA6@{38!ffdTb_9d0xy7n*E1i8g;3$V2z5j2jY$G{?0CgtGQC z+YKpQD$PaS{<`7+yOGest3c^$Jtc8LDm;R~uuuw|IZ`I}W)X$( z?cA}mrlU&2yk3*a@0-5|5A9u(Ssj7wedc%%;6^Vyz1H)<_`zY zwg`6l55d+t<(V55*wQPe-SLt)QFX8o{W)7weczSBq)pZ0dpkE(;Am=G$J@(!un6j_3;x!Z83RJF78=o{>cz03Lm zP5^{YB_CHGl#Qp>$gj+ojI~u8yzA3^nW~>P{FfA~X3e_DE;5_`@1?4XxrT@E2Q*nW z&&S7c&v;Sw;kvqtc!y(AqTYw}>-a|klsWc=rP4PCcRFnRg^RHja-`~&28dmPeH*=V zPT3)bg#S*g!3_^Sm|M?$2Ay=jX&7nx!}6243N@!SiJ<5u1d5l0ee)Rnrm`#zw+utc(ZXXF-brGpg z@`SNMHRkeor0k6}6?>ohld?ojVjU0~U4t$N?@1N810d9E!A2s_=7e~kZ+gT-PqwL$ zBW?)JWo4#gptek6y7W@x`sfN3^CWELen{)W@Aqax-e>b!l{I#g;+=f&{QCJObizRo zno;eP+;zr~Z{*-&dV}ySRgfi54vSMVZ1YPbCsZZ8o-kU5kuMqHvvME)D<@qf);1Du ztS=7CTgdFg-Ye3p@7Ot>r@p7};b+b@FCL%X&dS^x{TpXbx{YNMJ|^Gn2r)oyFJp+x z*_F-73I4wpE+={-eDzT;M@3D~a17}+P}mN+A@i%;Vty%~`byWkbA#Cc=6|9}!TrH? zUUd?I+)!y=H4)a-Fh+abLfGmz#msVYg{)wYw zPxt`A+$56+ExqjuZ@krs#k6bIX_CWeC`it7f9c=20HbFR;FdYG1z-)#m{3EK;w6cU z=S!XRpsubQ_|p&!9vQm;MiC2+)LR$Lbj9EQGD z4a<)V5&9pZ`Uc8i{q7%`@xSTm3eEEIPip6E1gB)fUtDHLOB*1zI^WktA9qn4xU{N@ zL+^kbubL9T#Lj4d?OtAPNshYb`>O0mIhXR+YK53)M9tjj)pz|Gf(2ptc(k7X8zx)L znW&nGKF3}hg4C6)5y>W#3f5NS&*5l|pPTFnU>+=bI^1up(Y_RBnohc0Dfwyli$0bg zuaXrBO_dw5xRE;fmJ{wK%ZQD+ob5L>7P?#>Km&Y%@DC1_!`eyT zs$K=&8yRv<^2>h$Mr!%!SmTE3th%HC=6z0DyUGTdZWu@l_8Out1HImAeWZ{EKIF?V}vmT(g{x3~Vo?4vm<@-3mQe!!YV--zw3 zEevuAcC@quI~<3#K+J$CvIg|`+26ubf2NNM7Ja2p)W=3=ij}LiNpCm)^C(H}Jlq?b z7xG1MtiQ%wI}$T-a!BYmmG@qcx=;VZPxmeXh_=eqq-MRmaG_pfax zrmKGvw$+{_-|g?PvqU%lOo@lLK4NEZeGkLM>=IqS))(|sI!RY=Px(wZ6ARK9m2ZLH zRt<+ASD-y-0iCJHOv*5jyeslku;O-A$<#%is^--u`9_R;WVHeb?axQ8E$7!hie=>b0q<{#!nxb zP`?BHu%5U=zcYHId;o9TAM5;!R*jz+Dqs$jxZQOP`ElsQ28-BKNR8$Eb)0AgiSlU9is>GqBul217$Rk5LPP-3q&O0|{Th~8 z{wtdA@xWN+;2qXG9av-K8jQ?wcsf3c)> zg>#8Fvhgviqx&y@Kk5*3U`-~`|4+q5BBC=jMljMD*iyk%MD<8}kxzlUh;88C-A;=8 zi*00C8#i-X zL)krmav(d>-$}6s_tnnS9}tG%TZm&9u-~apfzQBo_s;kDn`mp49BtQg&n?j8(F~Q1 z!4vn-W-kQvsb7}3T;eXb9+|`Za>reY@h;E#WO8tb#Ipg<$dLYseAgFsdB(q2ezq^n zN6g#_&E7+i7Yu$GO-NU7(^M*nCb((Ul_`%D`o30%@?wVx=)39$Mzi&mxv!^;my4Hw zf}}k*>#8{+>QRPod*^J13k6n4xQ8iWcObB@66*@wAXw>A@5KK)nQ8+bF#k!1kzuKZ zr|(~sau@Fd|D&9eHOAG@yC@g5TNRq0x^okDBe6HBZYOg7UT8BxxO&hCsBf%flklgQ z#kC6-XW+?{>uCE2oBq^ixEtBVwY`^_=6m62nPX%+0Y!u+@vzJJc(I1%St7V>H^> zp}08_v>Q91rYGZOB!hCmLr^F*tNct)oS;wJ1zJ2mLmVkl{*DxT(SK*ANj~bYi;hej zV>`4MR3}K3zn|xwh^@NeJ$5VRulZL>@w~3~Em*O>3+_$f*RH35IMLhYQ_XI>NHI1x ztn=xFJSx|J$_&yN88$^g{+XDvwLpWp>_pa@YppQ5iCSTAmjQZlXUph#bN4lAXgCt$`NU{^Y(xIa?v|>KQZJ=SWoykESe83_2yicWJ|CdWP_USrCF5=`JU> zr}oY*wTq|lxgv!kflmXgo8V0uo6Jl5j&f>i+X;tDSR;=Ba<9DMu3%;lRCT$AJXF8N zgiWR_HroOjUoYWlr3S8hf(t5T#5ErHihak*cL|E^R+ehmThCkssM!*(Y zWtFPe74}!e>&*hy2@{uvtgkJpR$x`le=Y+V?{FEsnBPF8!)!)_pzw^=Gu7}~0!unJ ze8^SHc6~xrI7=HfW;xUd=3h#}MnH`_+!u>y4v+&fx1u9_cZP*{0)&zl?Dvm(A^ZwGROP;6Fr|mKF1c1 zjXlQ4hS%F%m8t@iw z^GjCd*u5z$d^?vCh>Mc{+Tnj45T@3X8fuK}Iu^T?-^;7Kp@Ft?(S}P@WoSrI;BDMD z@HHsvI$eyUPQNiN%^9r(jp68t9EYc^l>A98Pvae#vTWN-26(%Z{RN2*-TM`7=~y}A zJ6jR~BvBknvod6>>u0ekOoWHSv1n>-K@txmRIFff`zR~b>qA?xSL0WYJC#tpdjGxBbf^L9SWf}v zu^^!&6lD2cnHi&q>&=u#1ROp-^|xm7C{mBoP2~8;9A8j8!|jAMMo$YYVb={OCAGnK zvb!$!W|uMy)PEx{WqHQOm|!y09olU+!{GN<@jn#2>A)vNvciOaih1Z>IbpraYxM^1 zbLJt(|Z z9_%6QqO>s!`riuZ?GaBquAW#fFLgy6APk=fm7m(SY%JbnB_s&{99P5ZR$r26{B#1S ze%UJh$)v!Q!V<*QR3q5c(X1V0B;`?uzhWs#dv>z!4%>&_nRFXYD2uj*Pqo_)0cPT^ z>k9Xzo^_64tq5$TB=WcBZo=oi?gT{)|8G4DmF|N7STZA6lk^6?_%0h>OB1 z#kSHLzoyP$wq1zICFgN%T`JM5 zk_%%z6=EOJV9-l~(J3tevl3{_mCDfWnzpg+;iOqsC1%q9Fth`?62?zh=08Aak{rf) z6=tl(j=o*U&9L#}G8a#XH(jr)f^>0LLWWeTjg5q!GAqy(6<`5soPZIvaUxifEEX}l zABg^l6(prt_jO_Q6QN}i-UgQ!FjsqMK~&_=Z0@R>+t22k*hga_EP%b_Pq^ue1j=FT zG%nCd4oJ}afHODLVmyuPSIcWJjy=T=NA|Yi|1lGUfi?mF(8fS4=jZXOIT=jns;Yf;AWnZ1q{(F-X(K+m0!?xi&A9g5cB|J~5CrdfzfqSi7 zXD8*C-o{x!caIoZAyPrAGF8>S^oh7Uo;2&d4aDHEM zk5VGB)}y$m4tBMmUoMZ#d!D|QAaM5kbIK+uCrv$+v-YOs+lbs8mrqb}l#}3_dy3$? zUdz%qkR_lg@ccA)`v#QexO`s^*V;**4c&l>HCg7M@ShqdR4s?+>J43)2%--&gcf#NDNYYdK{pEt0f9( zSKa(y)H;|3cm~{8+du1h){l{NN+zN<5Vw(AR99XycDo0@NzeNy%1gP{fT)*MKu6wZ zTh4!E?{bEEcX(9(k(X)DmD9|0AnPb$Z?zY23HH!_oX?!kd|N}4{4y)Bi~AmmFmBJ# zm3?@YBYFr6XQV!ke~axOen{_sp*DAKaJ7xjf;ZtpX7fPls#5=Xr(ibLaX*Tj`GK$( z^s^_}`hmR$cXjL+vd%Aj&HUZ(w7tj(^O+jRbhPTkIc%3$-GJRV{X(Pa&d^3oWo33;xhVcIp@jGYMogk2r_$Zf=sJhg*kayzG7 z{6qc0ed-sVUYRZ}oFuHsK1KL#hHNi(r<}lEfck$#(^c&D=T)KTEvW6Pv{KKVP~A<} ziLoTpAW5eYx=J&Er3=9{V$_4F185p7&ohG2j1)t{E*yzBq!)Qf*Fw)atX#PA^o-jQ zx8V6ex3qOL?D-|T!z+u1eld@XXnqMmo0d|mj$|mx$8%8X$iVY_)$U? zikaq?K0dt<@9bXcGG|p1loC_2!NdgN-SZAxIg>o;IPc%gvth2`rm7zVdd9*Y>o);P zUPbwcx(lh=cGc9yEy(LzM3Ka^?x9)2h5Qdm(C6q7+n$I2q3k?Y>DO}v|3Zpv3xGqp zwMXd1{!*5ztCk!8i!&S1G3R@9BBMxNg0)|97D&7jyRp*%a{aOD#`pD<0Uh40y}+WF5c|J& zyy{ad5xSHT_OL=trC$a*8sQU58w;7sVRg0edHa0?L_7K$G1Lt;#mIwAJr|^DkJK#Q;Gp+C1?_S-msyDoqkqrU zoA0sq3b8%RA_I|^{J-qsub z3$&y*$vFIuARIE4(8_#~)h({_e~+GfI6ch#@C^lW!FFv`5C&;}db8 z7(YIwVc*KuA{)api&DA1RU6c1I*qwhaNG3A;|y?zf3uCS!~D}Hh_Q@j!ftAn$ZiuJ zd0J}!kw*cbXfktlEHd+)P5y3TV z*OaU>adLZvwJ)K{fLw%=d~v8I%6R$wN20Jd{JAK2X*SW)`HiiioL5l4FVEec zVP1pm(r%=iA-u?^lHVC?K%`tZ)Y3~n7H;@NL6??-%J^CevBW3fGeJuY(ihx*7TQB$MR1y!o}Mw?ac?M zjyU14LRO0s(0$8{i6#&|5KA)U%kSDkBhnwNcOUQh4>8TP4|unA@p9g|wfYCCrRubp zYWtE}osmk*tpI)3ZV9`^ww%yWSE9Rf_VOFzK~~^t)g#`}p;s|u{a;;NwFCAe6v+Y( z{5XvNCEPDPU~93wa09%*UF(q;T%m1fjHUnmi@I=g+`s`Yjt(F^#Z#Fa{g#XHTGv6a z=1NH_KpV?Q#1_LQ7?`V(uL94~pD=>>hqCc_(29jfY&ISL*q!;8q3BZ@a$8AHJkVuQ z9TaC2Hq;<72*Q(UgEG=IFBWgk3{Dr> zqZGwAU1GIPAaT*pz08J~nd^M761v7m-2?9sI7O~)E1y&y|LzR@w=Q{XLZ#w^Bo9Y6 zM1k4nQa$L}sW|nxk8+>mCT5-KfH)4Yc$f{ABHwnUTxIxY!`MTd4}f@PfG^+wGj7tO z8-J;vuH-npDG?NrZ16W@R_jAgmQf<#fT=4tv(cP;a;jb*aoK6$X6`5 zOB|xy`B41+xi>PrTaN8~D*NpKG0+yu?=63lFfTEm+0f}pP8b!v$b$D13VPEfF}!X| zL*IiWCxNIp@?BI+4m|?y$?%cShx`2`?jRor#?pgJ^;9RrcPp#&RO@`O^^P4HeIqZj z?D`^ZFL#YHNwO{g7CCCHY_k`DrRS-B$F1M?uaTsy2N>%hp#r&o8KsgJ<+dEyW4a%G z%oLm^$n8*e!e}7K^`MqY#;YiZ)IEAM+3i2(CdbCOtbP*Iflxy~Z%%UKntarrtMs2d zMa~&geM@qpmTXd!t=5eWhp_`dUFY6}{00ks&-328M|#zrr49|&orj8KHz_eIwU6M% z*ig@f9Z(3#7@l|kE>z?`X`M}6Wv8b*eQAWKZghZJ^;(xoGiIPQ=($=QghV z5%Fn`>C-woEi+PJ>pAwg9Q2I=kE^d?Z#lpAIz1a;T28oB{7dU$4Oz1bm}AyiTWu`= z`5siSlQcT-&5&*;U$TB8N^Vy?(PUL$Rv_Cj?OdS#`-242Lt3Qjh_;~miZ~WxgsBw$ z<_$w1C#oIFeuJ@hXfdw1Cb9i|&}E@c4fv8$wOrE>QI*TFNj7W13sqzeNfhyeh*;aJ zy|1vY$R(y{CT0RCV=i66b)d1M$AmFftieNbk^t;nk%m)y4J`551pS2unrtsnwad>H zWg5!1K)>CAMBuJl2Q|i`Mq>3;zqfqQ6AC&r)qC9DV4Ag0ko6xNdb(yu6VlZ5@+&<< zWUs;_)nw5g*<04D5Za1n;cRTIbwa4n!s>+(9ao)MhU0-Z6JN@O_ zfr6Cfm4W-Om`I~C{yOX7{ndM~_t}C=XFcvkdRE>ml&eKZM>>-gU7L(8nZleCz=^5| zw#t`_?39TZ&XHr)U}%Q_W32m55OvvY+z5bmk*!*r=JFrrI*#6fK1~ItJ6s>gt}FJZ z>Dd}+f5)&0RE_T@boj}!A2PV=1P5JBX|96mW;tE+$kmo<@)Oc4#r442-1S*z)^AI2 zdl#*AMy+)qM<4Ftt1K^O#Fx3%3**6x8b=V-(jLmdbYxI`fXnmqB_K0;)D+#(Z;|rdsYPuB9Y(3HZlH%)_GEtxsaI)KB2|#GPli z<0G5Q$7B~I;BwP0(aWcxj=_L5z$=Qjj!h9`Zeo~p_27ry)VKKjLx4~mg2qvx|MgY= zZVaRua8P@W7#<+Zf?a+4Zl<9>JdJOfkgde(pp&C4G$(*VdT;m=dQ11UjchmUBmKr6 zik7#o3hlo`s9Q@U9k5@oAV0F6N$eLd*|kk0G`dYG0;rF0?RKGUgwG%hc^mS!%Zd_6 zT4bKq&JJr`cnNR;oxyrQoAWDa5?yXEk?;=HdMEKh8F@R59|1$wj@ePr>+vHe{QjP= zsj3`WN!Jgq91hOus)=pXOBE;dI8g~3>zC;J{|gzAxTNE%)ORW-SSbX=%te}vav9$u zybeKz)0Wzw&4d4zF<;}ZKcHZ66vP=Fq54;-`ZcX7?4>71^8w#L+&lH@8vXQ?Sp~zY zG{(lXLDCE)PXd1-WL*2PsT7jXgFU9g4s?Yx_^keLT;GMPm^!;XQgGfOxBth{xj3}E z|9@Ps;$ty zwsl>#)z-Flzx?+7{STkL_kO=#&*$T@j##0&hPvvg9*(@IMN%g&iZNA*s=H*1{mmC+R)OKImD@PdZh#%|LPqp%Y%=H@gyZsE zix`j2WOc*u8EZD77h-8u*ZLVV6J_zTgi|Q@DPsojcJG_r@JFcIk1v!NWtaW}V@bZDA=3K7g7hR4!MSN0tYO`z z*fkUDr);h%rV`|iDjSh~*gbm;=@~*Jy*J~d-A?(X*&xOdT8;~CBYc)(s0pfjLci05 za*T#9S2<$<(pyaTqO=`~>*Cm^RSv9QGjg>?aJ)C7x3|*?O#Cx&GUPZ|lcM{9Lp+dB zBIp7=&V0h5Ev#uVUs`*ByTVLB9g4hK)a*9k$aO9yLr^5H6YFKig%DyO;-+&}%mp3Jc@<^CWI#Lcpl)AV@@l8S(mU#*ifsVME3O=}Ax0a8 z2mZ@-$l?7~Igs?D_p=;N#LSYmv$&4-vdz@ANoi1JOFq;?jKOB0Z}T2sse9Es>TrC5 z7_PQuiAru}d0E{roPsZ>6iqgMpyq9h&hF4Pz82DTGWX=5h11GuN?z7Zd|XjvdzheN zoEfqKvYj&-f3|gSRGH!SzNR4iW4&d}67mUrJp^~RQqnkR6%*Jl z@cjKC+T|kwk&J-C*2q4Mjj&lqJ5GLwdC}ZbayhvwC=})DNc}S2MDk?bRSnc;GMbj- zWADl1`ZHRne@}5khHj%}PwG1qg59ML~O5% z7$fAHjqL7me&@xJ=af{nqN4d(BY-ss_z7`{=60z4m~i zvS{9U9V?eaF{=BhUI;h^to=A9OBZ#3$jh{^UO9E=j9WGSjQk_kwh|}Y)oMXFrF9kS z=c5p#WeYXK92C1YQ|N~w)m5`~eK6_3Brc?c&EH5a4EgJ}PH z?y`nz55ICZp|yLec9Yz-%tc-AG60g0I>!MqdBUa7)=)1u+V*?G>OyHEh*y|`yDaa9 z(Y@q==fpsz{KHuGYlgFMXwBB>p@}cykUx2i?#~_mq*7qYhw!%|eb+W5@FjdBMh$Ow zFdHdVF_m^CRp3F~VX0(f`f#;74dC0Fk)ZM^3Yd$KSJIo2%=rfQT@1Ks_ku%-ZS`g3 zZ{TCNzoQ~r*$_@vUcfC!`8h>IFDEjBpdX6n0_z5~2|7*qwokKOKF@fO5*SH0bstr*oG*F&j9lz;?wRE{5H>HAPgl4bV1M;5-}U^R z%U3zn9~_Bd)UO7&B%*DhhvM%wdi;Bkks^-6R$aR+z(t*D0W*w?dcs zIi-B1J`aB$2{FeWYfshS$yMW({3^%X*Kq-M{ZGl9FUe;{_K%y7dA$Z}FyfRP`->7o zu704Bi*5`#=G{wzUZE2l7K^5s%&m~`svj3~V0=H=5$^#EDV7+HE)7`CtZ}4LUl?@g z{xMJZ{zriw7k$L4YMzjm%|C`U`XbRS%Yo1No1JdgfuaJ$L#a8BP#f&L{zksb)ipOK z^HDpKXc7;aA*qNqC+pol{El5=+9C@K5lNgGc`2_*M}q+L}+A1}d2^7Ma4x-<)>DHw-^Y%rbQM%JgO{#|O_U>nu0*jmO5RUaYiSJ0FcXYmAb!-W)gbB9>&R7#27}e$u00zNOVQX8Vh9< z*-bQ#n|*_pofR*-n&giS#_(HGe#?Wn-o6vsi`DB?uRaC@e0MKy;+o={~RdJN8k|E-NJsj9+Jz_<@sA>$W_ zBWh0cIw%OxJk=D036jcEcCI7Yp|DaT`0W`sQqCIJ z*_+I^{O9wYE+kE~SyQgs8~uoC-Ofaek>0c3xosG~=2j0ezFR|S<7Y)F0w?2S&5;X4 z-61>F6IGcT1p7+~SX}x9cH8U+%&hZdwBHX> zjmAN|8a<>>A`Ny5qkCaCSQaW13#E54CsnV`n5m+|*K>WYe-HPTh49vZn?rEkO%$8R zGd}>=u{-gfu&U2btSw4g7V}!+UtOr-W=)M>;X#s_7`)6sEDg36joqbuq+W`7R@*C% zyU9CIiTYLwW7Ix2`i?*cP2%2Z<(YkjZS5{O$>L1&CdPY?U#|LAK{klBHU}T;QgPxiF5GQ%&aML zR~!hR^0zngM=STcxZ2uyHlGw0v$G1+@%jG zkcn|q7<=%hNPD;eDiNL|{jE^@-QrwV_X3@4-tmC;ZPFaYS$g6Mjh|`ue)navz$@&n zRFdC^EXQj~c@LR1vun~Jd*L^M8~6*TIL2K(U~!SPIVPVOc#wZNI+d@iIzHvNKX%2$ zFZZ?kISi!9F6AH0(>3|@J)B1MIMkeF1*+b@qvka_pYn42MRnf<6eOPL8v8>;J> zuZbuMw!oTXEa=)kf<^}%V=t^NywRGGKBpgk??Bm-m>tlC z=J`q4I#z)!hBo8cR)0!c@2(g+z!-QGV!0fIuRm{3U6X0lyLfem5J}ihMM*ZuZ#LW{XpW$8_1;p{5Pj z8P?1Ibn8KOU39tCSnxLOKH>8~*jp5G=Q4KlHPeQkMi)q_2)_-p4pn=6$rfmHn7!ev zgUTDG{r0z0%=*S2C|ihxh-$D(1|0QhvZY$EF0D6-j6a~Qp@U){Zl@fl*Y4)Jzz#ce zy(^<>+#~om;G_&Y1gpUGl=jsYmSl7HD1-AJ<+PHwv+ZIBO?det!$f>ldQsXr8LabK?p+rxpQ5pV4<5PgEg_+-dUJiir}Ao>&{y!IeKP$P;lhp*jw zsq{DMXeaMOxvT7*Y9>wMyD zqx{BhM7z?ux*aP(6S}O&GSyOF`DbB$aW>WcJU(E0m+UDzKIG6mxyn)nzYF_I?)O`O z?t-wFAge0E^@U}N?re8ip7Dbnh5V6H=v)Onq_UsG*nmYIihAP|ZB(CxsD*?jN`g*M zyDXI9rT+sP?!-AFo`^`V>9}^Mp*idD&6t)n!0Yt5Rrm++(wW9|KNA;cvyKf8wyH9h zY59-ClYHy_O2`Y8_gh;|Jx<|oTJRY7S$^4&kH(3`On?eEze(FY5cW7Io|Uh?t+g>w zy!49KyIGg5uXsiGM+Dx zmgMH3%XkT?7{{3W!hoZ2d9JAWV9o-)lmBNqJ168hoV_~F-SEEVSw*N!7A5#mor=9C zw(h;?{X$LIFS6s+hGUxi4(=sibVMi}-@s8v*RF{2l-1hqRf~{%2w7Pyc|7)VuLWeO z?B?NIFdu9!AV{UfLRE#kAh%e0Ql6{X9l2)I`ANzcT9Q~@9rh!xU&>rhQRj+JcW^3I z4eH$fso{7&bryd?DgAp^Q4y%m)vqI? zIo|nwr`-hAzMQWDn^rJ;72V&ZWE>K?_7*Kja?o(MRm6jd>b9ROI$x9wZFJGigrlx# z(tKH%+#K*ifNcPX>~4ecLr=19f$lMiMj6|l###I5Q4h0>L{iQz@VL@Ce7;2~fMDC9 z5HMuIbBx$!9dC=ED+TLvxR=`MTRAok{Fw6X+B=XdCG>5ob8{Gn`MJ#2JVAbkvC>qK z0&9P^Z_*)XIZs*ZTqC*9vsIrp$WOC{kapwl?ELo5^-5f7ewuGhxal z_;5B*H&2WKCs9{=><<VODJ%=@~2 zZQe!R?+2UJQK|fgAV3JdQ=zaM$xqH3%D(gOQrhqtPl%k^=jX}wxBLOW~12|)7Ovad!gIN zby!xoPk)xTJA+%H9eWc%8k9c(u?s@TB{^VqaEZgYD&U-z=K86&8$bXYb!xGu-(4^; z^MF`nm`p7goBa$sdO{)J6RF+ulRptQd`xJfv@R@nXLV|pX!{qpGB9@NB=-RqmZfA) z)jv+~Kskw|(Tqtm(+!$eXdUVjB%RznO5Khs0U;k&-=BT;p1+kUfDx#Rtm(p`(V{dd?C58Vtlg93o&9Ysv9;{2>;0&wm2iLCexQyXpWDUuH{Q>jO>123HA z%$k7+((VAcCJbL}Or39R1XTbnk_h$(Si#v_>^a`wDpd!%~x&A>_9Wln71eIhSt*m&brFYSOQ^hOCg3;0uWLw217 zHC5@JW2}(_oO8@ih?4&(9vQ|h&1o(a5GP1lUuPA5RP&`$t%HA#C^Ry-2{>l5?%aC< zssc+>ms1W8FbDx^!FkqCs6nA@UCC1Fp6VbOsb6Zb3_5_T`~{BLLjCR8Z8!#B14ns2 z)Nb7#`%u$?T=E%s-04WKdIYD1tFvs+~O+~yggBTTJV!5 zRGw!$3wwcD4tEM4a-e?TeS-5C%~+WhFZLr46+`3hEvC=PrrPjDqJMAc2fuX6k4;PHSm6)g^v zd4PH=a;&6=9{H1ZV{tP-ZP) zncEz&oa6_T9@!b8{V)-e;4bXf1GTc)VRSA6@-s5uJ)T}`cDYka&Q(3cw%+f|;s@L^ z;Tg9F!mOTqS0VN=R)^Q9Z$}w{kVu6y|6*f>1RP4`+@eHJG0Bmq<@Jj{1er{N?MaK) zxa-uTeZP;87YqNG4<6RIHI(onqWaUAQxsAQz~T``AH?15{tv0$D|IfonDgyQ$!``;x3f$g2RACKu1WYd zi_t#Qn-_!Qy@Ws=$4Y7T-Uw#JeeEtlk){u!wnVN7mR3-eZGz2TELWNam3fSAkWbp0T#x{L6U^2Y%%heQ_}d|wb4iuxI&)FKgHl_qXf&90 zNOm1goVMpY(G4+=xs`m+oh-1QHL|A*$~pr5sg}XlFeFKQI8$QWK)jt=9KqM~$MW|mHPi1`i3)31I(peBjw^20L^vwSI4Cfn{C?>rUhh>x_ za4Yc(pjd0=4Lj!5(C_tX*3%#)RY*I^Aq`5o(GS%LwTA2j#IhSY$|fM4G(dZsQy<4( zO7_Yyezl8w8c)2SiN#n@w@AyPz|o928%zPG<4REd0XZyrqF(-98Va@Kcs;=0oh@s_ z<`wF{(yven?(h}_a7UVZT%%K?M;R0Lh|)kE1pE7yq(q{e!>p&>1I9t0F;**`*2BMs zFTpNv&f0~&mxsDC5%%5SsZ$H)c^^#@B3X`h60gB9qe9x_azwOd7zVcTM&>sOU|@HA zl>apC{TDCkQn4*dyE6s=W3TxD8Y4gUxJ7uw-{_b%&RVbfq3Dh z@AnRkIouz2wa-yHtK=IaksX+e{YS`7js!mMBf6er2DTg1>*Av6%bijvnh__&E=|H> zHc(v8x~F4}3{wXZ+auSmH%VZ%>7UNAMT)C&b(8A77#J&$5TG+VKNcN<-+!VqLE&OU ziWiasdzfz13xH9oWt{5My>ZTh!SQtXTJ4(WgypFIdKKDP z9TI0D3#-Dw9Qe)!Imw&{Y)gTWg6H`6@m>V99c3j?`>H>P$Uy}wSAu#ZK~+#F>m!7{a2J&xo7?7o8b&ez_f>qAZD*{Zyk0=@@xGtO7xjP>ZC=&}E9e;w z_Jgpvkh$|6#?^`ZlI}C2GsAp@!Ay*4i-*e^&{oj6HlP|Y>pH<7wwNfO4*v;U2e)V= z%J6^9Jr>OTPRTl2|DtU&e_oDvc^oT@U1@q^^2UpG8!zEi{ z4NTy>kok{{s*U;&x~yiEK93mle$uG8MK?r_N}Ah|$K6}@n<3?`n_0tD9-q?7{n zh542j2o5vB)3e+2+}Y04W7PuvewLN-mC_6(c%Ue!4{nWn^TNVyy~zgY4Gq$5>?Dex z>k<5k-4Ycb`^bItYP^?igaxZc0Uk_8i`XTCT0Xx_u6*zdD5EGTodvqA^0ul7OkhG9 zA(ytrVJRj;^Y~PsmDV@I_O&f)UDP+xQjiq=aU5w)IA|FY#c)DwLq$Fnw|?@NFy2Eb zVuIPvmBP#NgSL_!!5gk*F2<~1{@r<+Qw`wmXLf{d7LhM=p5-nq znex5IJIxY6AJ;Ah@I5F|-?hFo{?XduAscVfB1aodvY)c7Y@fJNJlq0A~qZ3&H zS7aZe7Hc>q|eD9)^KSL^e^oxPUTg{r7+1l0VrlgU1PIQ`I=d$QUg3HuhG?6*_r%A zFwxv?cZ>lhFh-y!Ji1Wsa#|}mA#;>zPu<5f1-Z6r1`HY?rl<=8jBNviBH>zyN?(Cf zVJ08w*2c-Q157W6P*VBCN=bH@%P9CYCP(`#=4Wh_xQ-t80J|Uj>Fz(T3)1dXSc{oAJ*DY9oe#XEpCij~QB+ z)h-z+u=?Q@P&t^@2zySt{u@Q|TW=kkfOrpL^P9NQmK=ZN4>>uVfwk10(YT?vD&wx< zH1V^!teUu@X0s)%!z7IscP6A#^UNt;-3YHwW}G(21g*TELEd84hw+-aSemIxUN~)u z-Ky2sa=v(AoGDj7T!HT`mem5IG9Ef%JoMWLrqk=ra2;6tBO8BoT9OyFE${qiopBfMePFXK^A z2wHL#%~7VI{-xZUujEujs(#kPZM0i?X@(WxQC1pz$x>2>=R@AqPEP73+Mj#E!m zkI}riyt-oLy=MQdz^`e8nhM6B%yMcgJI8VHgNze*p^nyf*8OJf3C%U3XQ`RPH*b4y zYv#}|zyp@ss!^#1CX`=EaeCz2C$A!OR7bZPMC6V|#$XZjf@ls?%ROK6D&qCGJ8}yD zpfb^WU}Ek(Whm}9Fl<6UiIm{MNqB@U99=iGDEZHuv-*bxJBZVaJtf{i^^leIl|vh0 zp${-Ckl5(&79tBoV5(UTQFcnUZHn*kyb@#3qsez^gbyeHvmN2{0+^1b@J(}VB$(== zEgjcK+dE3a#%-luzZoy23WM&T*}xYC(}zb1#L{bY5q{!$IO-Q!NZnQXL*t=?o_v~0 zZkFOWCOpJ)Z1{gVM}i5Ef}GA8M+Su%^^&CRwK=3r*(H^ydlzJ~W($9+`dyUf%DGi6 zdxK@QkJX82A6sc_N$zE%V(NG8VAbY!^sSxG?_F?B!gBMKk4;@8MpS54Vz9 zKQ}N%i~dt{6~}X$7kn%RL4!za8hW{l#800&YWs}AxLSq6MS5bFfcgabo3%LjKWX92 z_`RSzie^NIhD~R!8Bx6%BgTt7B7r>MglfaHL?7j&)lT#?PO0N>jj?wpN5u6*|A~%a zm((9;($Wwz8Yb+UBAOYWn+QBWt)qx_-(yUYwNUxId(uv0;K&FXvtUIathMUXA7JvK zucd%UVxaVH+#LKM?nvZm-yY0T!o-a@`(;~{>!VH*oa^GQ)-fQLlDF8^9uut0`PNv# zu8H)wARpmwg%^c188XRx28Pbi;XuGEU8bJ0(S^?k5TOV`9a zjyX1Qo8HO2W~s59h=??sL|D3gmvw5Eu``FhbF*LZk@%@-%m69JB648VXaFRr7a@Fv z-B%nvLm#Q0=5B%%PaYTS5ZPb7?>E`D%|mb!CP_4R{~$LN$FkGWHaK*W4i23Py&&^j zY(O)hlMd+xih||v8@Adb3n|FG%ud_%pSb%W zzBbe^VY$N~N+XNXro=egyZ0G|dB|)!9@vH2}W#U?`mkd$+_J1e9+?a zg^MGFx`sX-h(gv20E@LACkz@f`SN_*N-9|VRy}+cb!Qx~JLV+i2Yp%{Pt?jzOp;-n zn#u^g%}tIaS=C<?zMG z#}?h|4J;$tTlHgF4S!fTlZuH|S}3YRFie^g`8vk3MUUL0q298s6JNOg$)WfU18+g9 zu5HK3!*JRLV^(#D5wdoa!8iULNZJTmM;?aiZn}AZ`@V-JE(#;n-it$lN;?}VYw<-t zF2b5Vwt%BNKrb^8K0`m{yRC)lu^RXd{GG{G`*auHx2r+bbW46CCq^6Z(YQ~=#{AN{ z|8}}(bP68<-7s`K9wvj1RV5fHh$Eaip;;)vG(-sIpjGs?fUYzs>N2WObhLs%&u^FC(P&tmETb&6pkb?n&;O znfeqr97=gy4oBa zmT9+(91CO73GuOhST|OmGhzA_T2%#Q=&PTN8$><9gwN!#s3+k+k!I+he7_kz4m?z9kSdMb{eqkQs#+_GK3C=slHaT@8 zV4Kk!rBml>!Ns_U4+P~$cwu+pTTDY3yEH56+H6M06LfCv4A#QToXFA+v?FVYm6K5cw*0 zki7>jUF$1fkHNT3_2{Tb;mx}eSOaCl&i?c8%*3-yibweqyvp zKl}`oFF8H+&?h18CmEB=e;56Vx&ZL8VOV@5sWSX{(dn7|1_;S+&E}rIj>HJOQ3c!s#?9AWR;B{U{MeN{f3fVN!&v{nF_9eZBX8! zR`UZisi1G-4bPMn!$$OE7S*P{=RFvv$oJP)iee6`7A4y`AD;TYj=PIiEw{`RQU5_M zk*m*rFBmqO-F9exWCjHLbl5RV#HJ+b#^AETJw7m##I<-v8aqJ_FDOtL97=*1t;Wkhjz-f`e8=-;US53yIZ9Nb5bJ6xZHDv(o%Do|1 zxW&2L9|P{QX0@f#wYhwVXCF;;4;mH1b=s)Cu0^pYo(k-F`r_cTEfFdU%$%y8!2Sr| zyB7AnKF|f|%IB}9d@4E8K+z|&H*Rk{a@M^m<=)+p9?>}k;AO2(EEHb$Tp38@i~ z6Jv2hZPp#!!(*awx3sz_wPPQ^217X{oBnIE;&ARV0|UBDMGmWsX10|Fk?zq-@C zpb#Vew}i>sVr)rI_EHerh#lJ=?Z0#y`+2-k%1Ht#jPLIhop$>GQs#ao=FsqOL6Qd< zOZ(>}t7KKajHqmwWyp5z-U+^09yoVIdReHvG+RpaEp3!Mi!%=x)G|32+8HZYLh-&5 z$vJMM6Pz~!O2H_OOoCfL9g+L{vjQUMucV(I60Z5O-q7g>RqvvcK((HNJFs9Dh~jum zoFerhjulV$Wu;R)w`EEfltaqtD|IiKA_~~5x?DSwZjZy>q7>XwOFbs`PnG4({^P!b zN)rDm#sxOrjC0Wuk+Tcp9`FivwCRbCF}o(hAph4e)Alj`qY@vNv|EQu##ByFJpdf;hZJ zd_HSJnnjwE)!7?oQX4%KQ1VU+E-X;EM6pM>q%JL9-Kl-A0X)q60h*vpAC3o0f-KrH zxCP_J)_l$wAUmXXZm*+ZSh7rAoUx7yDg3mV; zP^P%TJ3AGCi-$AF#0)s&S4yS`Zp-Ke!i}m+W;Rcy5Ih+>(l)%+#lKM3a|+aTj6d?y zL*+aYb%Nq(Db$k*^hOJb=h8gSa-2;WaeE3jVi}abv12DceSx>omIz|!_b9)*%pZ)+ z1GQ1#pl?zwu1stpwhQMrmlOKU?qKhU|7}R2jWgrwW%{spPLKK*8O+L_3(;@Y_MR9a z9hhXc=+4PhZ%@l1G(^RFem86aq;=VMUaLc4sVMA>fXuMh-7mU8eLYS5bTv7uIoxEr zVo^sRJc0Vb{GKS|bTKx}NLNqHvXk#RXmcy5ebNI|>VJzUd&VjS6KVK1MqVu1G@^r2 zU0X+tc&jz)_^C}dnPr~j$Vo9o7mwO~5 zo;XG{T44XxIgy57pJl*#t{fCpnd);!VMx2U_P?CoHg!9{&2S0q$U*7AVz(*cnA1%^ z&N`C|(UUf*eV%ns)DX7PQ~J>Fr`$0u-eC#=>IxiX)qvn_HG>%2JvtkOyq)uH&@hqa z&DVpSksXwbmiM2Zi*8ok14jDuW47X@$c18eh$Bzq(V(!2kLZmgR5-qnTjh%j_o;%Pw=hAa=B`$8)bH@i7c>N7 zf5D1<#AULO$c4kx@Bp1Q-o96O7JXc{8M0q$pyDf9s}MRt+#Ae2VjlAaHAsk=&@k~1 zRp2K?2?TDgPT-*Vx(M%ab#lF>vahwSplWasiT?06RwlKS+^d8%S#-){iyo$p<044Y zg`(kjP%Erm!!vT4sl~PW3HrL$&C-1$!;1XjrR@-xd28k|s5WUf=pu0$45Wg20x-*t zXOA59CVDg6Uaa|y8$~VUar0`iyzxrEcI39wJZV?@HSHrq_I)XPXCHDGWqED58(Zj- zzQ*H%?_pkff9F&$e1hM4F*}nRw>MO9YBGRrrkk|sIi1Bjj_(`G8C+PN2D9^&J4{uY zk^Vp445Vw)`)YkQU>=YML1&!YNo-tHc-yY;48l;U%2a#1#mic;R;)uf?qbdu&%Dbe zA7e6~k4sxe1$m{GgN`x@KweLj#Vdt%>ZPL$+bxD zpAv(?b={Wy&FX!^>KW}{@<}gPB9_ce-{IciF-DqDZF~ggS`yaPM?w%1ee??Pn+2rr z%)2WoCU3GY=!=YcpD0K%>je8Ob-hx7aJ$;Sv>6o5i)ra|jRIC;)$yJ7j ziED;&8JStn(Sz2j1djgu-TFww0;awQwJ6dy8(f!P!{_f#u)NOCVTDfder-TX=A#&K}Xz87y>~_3fJgiu+ud>H|E1$^fzq7sF(WqC}4n zjUn21vg~VSu~Uno0#nS2irAMdTd}!_!5C@Tz<&V>L>}K7kdW!dPa2ImE=*Lxf zFzQ=pLW$(qcfHW#Ont#CZf!baIeMAOad8A@T3u%o^)6-it z`4VYSo3jlxQQ;ckKhh60d!|8UkzF8EW!8R9vPw8I9bLlE5mg^x1~tFT0R~z{c7MG` zM{GTKPI%?H2Z$1oA^1HV#G+GN+v8Gm^2wP3YwYU$2WIol)8BcFMu$=OLWNMD)iWn1V_FNt3ht<`^u6$&Kochu zI40Cr0kTUITl;R*f|aMCR;qOiRDRWIdxOxUAu&aJz>p{d6_S_Sp%f(33bf2^gt+k7 zYqM|DwTac4%19P=XPiaMV?|s}qS~u4!as%ul>(fuwCj`KjnOnOV+@6E&uL3A*(v`r z8b{$*{8hdiQmYF=6SdR;?d?Xru5G3sEVj&*l+*;`PfZ4k*qP5?OIThanc#e@v z2<@2nIOMs=r!`u{@{piN)ARtB0)pR5j~|)<;4Gf{-;+kd_Le0vv7v>+ZMBayOXG}? z9dDf`@(6RnU(9s%|7L#XKzXJ-bk#`T!A8;npSBEtw40$0{;_0aYS@&vd^p{xPrM;^ z#AlpZfVqsJQJU)WOP!P_Oh@OW!Ic@miK10s4TL?ORv6duGEk}Zw8Y4|-6~gpw$BuCWsCI)nqfH;TY-6KqoLLXHN? zRWeg(_o9>Zk0wmpb?zz{n?NjX&913ikR z@iVYZn6oZnzTX|QW|@6L+IzK49`9fP`AU2R4)A{c^f@)K1;vMLw*cpc% zl@3mcNAFhZQZQzlfon11ocMd($@MO$^E>-?j-8;3f}`Hd(b9^grukaBW6lz2!j~( zcGWJsWu*ZtF}`asZw@*x3r(pnI4y1S3#{gqD(YBQAPrgRMNI`*=#s@es433&kYUZi z@Ribt+zx`BSziEir>F**A3jX{0{z`-Pn^>=AOA3x^Us4%|m%PSFXhHXYWppsj01{f(b{+W)(Ykpun4i~d)(b||$k1M{%fCFl2N zC&&4FT9v6L=;=gN-qZZQ>(TZr7v=N*R8BG-*Avq<(vr4~3ZK}-?g{T@zBq?79Nwkf z0UW-MvY$`hFKqkxwC)pj%sUJY;1x^BP}e(zkU zr8VpmZW7Jel-sU*o)A_-Rs~_jki8LCy7}EejJ9xBw0J zwuyLk!?^Q{qBpj&<$Dni#Jf>AwdRePKWvy3@#=sQUHqQ*cfI`Q+sWt#%e1;bLzSR< z(XKF97DMCu0=seC5t>UK&*Ir-yxA7|Z2H~gioSrzoVgb%FGZDb>UiwuAjLC!UYqe- zPWoF!CpN9xb@l`yH-cpHyD5JHDN^Oeg?<%p!e%ikQ4|}^EWeI)tz9{?ROg)mDa-rf zg^tS9kqsz~`VXms@)*+(5_6jL$}~xHw!%S+E`TnGcRdG;xjX%kIqReI;)MR&1Xj<= zh>0MKrshhVCg57hqKF89X_D-g&F@pak`a?X0+5a(XD|j~@YkKvp}Q`b>b$Ib0MJBw zzLkl>OSX&M5ato^kMBo(|NIXx!NF9tC+2Bo%nl_nR@1L+;8wZUxdBu_sjAN&`+KC| zK|dB!o|7!!#rYk37IjpVM+d78DyuIM)B5}!_n-`<%*gNXK6@k#zP3TXh=y!GKAGTdrd)N`S4azte?Vs4NtI_&$AU@Hl%l&v#HZ1+ub9m#nM4-Am{ z`-=SjSJ5Q=!ThK3kkp0ahoZvZbi@i~0`sDDt^E?GlXx$|)bnmr22CE@X*5nf!8|wp zaPnD{yzDu)&?5;XHvOf%Hvb)LOj%mDT~#@`g^?mRBRYKOIxZ7A-@+}%E*R<~KzL2^ zWnra){Mq7%Q}kB6H0~v<0@x~-T|99?VpQn#e}E5ihuxhLJ>_m@;mrC z>?S6e!)(lY4`{^B)h_;|MrY-|m8Lt7W+NFI6g@xmym0OF=$EnUiLM9wI%BJ)Hc(== zP_>;>bp^6j7NByr`eXMf%eUKTiG{2BRR=-AdL~U`H!qq$AAe=YWJ7yqOYOkDuov)3 zeyu6i_KGM$>vJJ|A?n;qR)jVPV?VO1p!n2q3j5{@Ris09!LKL_$!669Si{WEI(BR- z83$Ew9I+6D9(1=+*WhggDc;V$|Ft;t*I|~;$6HQ>GL+M9^X(~Z?Q2a!cBp(SxMMEX zRWGt|(ylt#`^9|lQbPS!)X_`GP+w?c#{JOk{?rVCq&;b3>M>NpnT%(=K&}u9T76 zMaqcmiMrKr8V3EJFqyl~pp)WhfIJy`&uQVaWJAr(rk(!U#4Xd++VpPac`11>c)gUT zZhmOXJztCrT3Md)spJZ3C^)cyuJiXrWiv03BY~S+vR?xrPMK9#(RjcTVc^y__tq0b`B!hiiesmjr=a7y(JTa<mm9 zV<%OVd_fYUbKD;BRs9sA@yKMnQg&IWc`wLF^CVzdhKkpFB`zBOgj;<)^zf$_9%j`; zlAJV7^xa-Pxcv!r?=)@gBykFQ2rsvh7$U>oN#wWX=ErQ`#Yx-?;GE`Yvo$JYD~XE= zWgl#TIL3sJw6dzAyIGAj0qh_O$!QIU7xI_tcv>}%gx@MwGHN{Z6fAv0pFn9 zDarAoI!PbnZ)-P{RP}{VI#rV<&av)}hC9jXz5GJUb5|gLgh5(=8~Nw*SRXUVt6O!n zA(N`km>g@#!}+_~B__9+4d3HMUcvCTHECJiR?18Hw5rDUSziXwJ}f1>E8$)ospkjI z4*qba|1bGMh?#aL%W?Mgdqg8}#&fdAdHx-2EbAW23AW0>rw@HZW26{SCCDN3Bx$mf ziDVpq=)JcEN1E7mW6X&32VF9PWeeUc|G97#92LT;xWVeO-4Wp48kezbZ)gn2}v}GLbXK* zsi~yX+Dd92x6)a)tyZn9+NpLveE0eN{)D~X?>%3)*Yo*!-tTdjy%{29(R1!|eu$8Z z03N=pCAOkJas@H7l_Q4ZdH79d>5VZg$I6OJlIuw$2@BrS$C+K2`_wYmxNGy^J6&|# zLX#O8_sxgK(b+_QNaxqo zSqQGUt`h$MKU#FnlNSK)FwMqG($D)H5Tmwe0R7f%cZQ>}v=#c5)jOt3%fzcMq3_9m zJ=w1lUhuldIQb;m`GJL&>Czz|-IDYi-C5$*)DHU+z4#28qAnmT6v+uS2j~gWalcy| zh%H-xe)M=M_#gUcVN=y~9fJ^^I_bSo|CDWjee|kDf)o zM>g_z+y=xW_wZG!k#2ROl0^*bed zH;cH&X%_vY!Xf^5=quds*J|U0$zmtSyp0Ut5~%vAe$TQL=kc!f&2vVY3pthYBRZHj z>ELdaV+?e_JqH$-6?xH-C$Ad67=m_C>=HLj+tUn44vdyp&+yDgE@l0(zBW>TW6<@P zRA^ShQbIjw%E=;##0s7RA0_0hRivvGUen~cLUlBs-RMu;gc_E6&J_A!6XA#Q?lBj0 zF3|&*bb=L{?4K(v?SZSzhe6@*1s2j94fXP6#X!xCGtIt!;T<`I(O^eb<%AEX>c&Dc z?xbpMOn-SP)taO}-fEQy<@dHMwpiAC)XKbP4^HPC`mGy;Yfd2U6MtD$f0do*Z>VkP zHe;bL7JKp!H0Yrs+GLprJYPC07Mo$bKlyaxp3 z5g?*N;_DdFcAQ@cDiU4=t`~l2Y{MegkRw^N3BTFOjDZ}*2DaloK%4^G$%1g~m>x;( z*T3N=V0wu8*U~OAixhbn?lI!PfP#7Mq*ZG4h4{SJ==i@DkHf<%VuQ7tXfmxyna5X8 zE`D&2_(DjjNLuUpH!&}EIr%=KuUsUqa^{?&4WZKEEtm;(9`&6+1;j~CInN!p=!3*E z-|skwOhKE^TB({pSyf!(>v8{rUB~==r|ajz_jw5AV?tnRpOXgDB;%-EWF{^N)(dA; z60hl`2>eImR1L^cpX^b0R@np1O}+gpyfyn^WjKW0U3)EMzw49~ejO^v#(Xr+q~c>4mkz06H)*vo%P^pk)FR(i8uHXVl=k<{TTP#i=$hTM{$1SCe`Xa{R{Ku-xSaA) z9{DD2(+JU4b6`5RNoq2cFbu2W%Me!qV@!y5DKc-P9Lf9H>4+O?Xt3{X$|*oWx7HCj zpr;;hlvLo|Qf%o?1m+7u4#Iknt`pzz1WHNl4s4T9QxbS3e(@@^l59|CNnNe>Ox090 z<4CXNC00)N0&w`XljI@XK;~aYm8aAnpPb01YPTrfT!ID>`G#ZKH)7RjRD42Sx%g@s zgrQI9@)`Hh$1ov0mrGnYb$T&R8a30@CM_m?g+U~Q(igfe=riPO?*i*5xP>5Iyi%Nn zKG9$4L|>-q)NE;bKVh8|ux#AaM^0`pE!gE1+S>PsVY^fAle=oz6^+`>zd>p-Cv zD@jpVBjDy9#kSAH@s#}VF@`@pCE-!-WorM~N5(f}HEds1CL8$#DeaaoDa1_?e!zpw zXSaoT;1WJfJb+xc90YQ5()Lr~_ponMw7wI_S!1G^pTrR1Ny#t+x#62IRfMaF zKV~(aPv>wWUd>0e%o3vv^=D#385?f8>TJ7n3i>*%Jx0El>rJ@z$LE2q z`9^g$-xcd2gB^(-w3>c2(^{4RCL_L-d)udBe7}_8M+#s+=LNj!_fry3X9%=X$9T96 zn^yTu`#1RX+=G+=1FT3zQ~-mpsm?ThC->H$9a>A}+d^rGB_y!{woSIGsz7R9Xzpa|Ir>1MUKxbdf!1r&vU|4~_KFrN!BiEBu2 zhlbM;9sGJ@ZVJwMVgHmg%=aEH) z;(H+6>)5G}v+jsI@wCkOBJvi-l5@Hj+i0~7{0pem$|w4+YEig@GgL(dQ0g zsM0i^DAZND(EeaY6BU?wHt+Rf`J02TWO=M2kUXbr$;?7x-eud7g!RW2!}|;*w>>J8 z#j&DC(b-G_7J1h7jwEPbLu*g977*l5;s%71GO}$Mx_v`S?I19dS}$C0ZF4CdmtYQh znr03OOIWE$I{AYtYqHmj%R!jl19He)cs)E(VaVoYAKmM~f>g9xces9cxq|Hpr`4;O z;%pqaNPGhvg00dpaaNwg*v7PzkQ3PT&BaYQ##)Yi4ZNiWyW6>F=2Xzk5>=%dUy^=R zXOF^&?BCheOdgn6Hc*k2uB%{U3jbdHTL7T0NxPL#uy%-Z<7z6t@X`%^ZDi|kt`n}) zPF#KFGVl=OMdSl;?s`fq3gkx}_Z|iI;905IQo@m21D_`tUvt}`YH#$|Wl;uz64nWJ zu#z-Lb5Kk0wQckkmwJ1>yFUItKNkdZ9YKL}?m=VqQJb9>;&o(-o3)^!V3;*IG+sU^ znRGF$iz8?&CxHtq*th8VH~|ch?vDuDJ=yw2IG5ngS`C{2n>FuZ&)pL?yK!js*MVjJ z2jcbS`$!viE0uf~>Su?v)Qs{A5HKmJ3H$`U4aWZl!AjHQ? zr^cp>o$s{Azu`&jk$+&^1ip_A!#I9#zx!xILK!UKfzel-;J(R|KXJJ#xvS`RwlZ72 zm$fg6H98(Exv$a;O*cr=SbqsPF0__WXv%rZuLm`JQ)-^JWbtV(P*uQ@=kllckAPQU zWALEd?%+4s)?7jvE^xmVyc${I@eZN9ILH?*jMN5Iz@~Ei@HyD|3uU=Jm7F#d%UV;A zMU(7l+Ufvmi7<~c(*8)c1OAz|dC!V%7sKs{mE}lwYEbqbS_QwtqH0I@F z>@akSfwT#r^DAHZ{Q4F(e2{zR<7_Y`R@N8J0DErU8#+bJkYvYKGzZlKQA-8o!vWk? z*u(rra*K7c1trxZ5zjxHUU^b>FLILlum6WzudD=iVVT~(nOtd$V`6Mb#4O~b>@90} z2taR%)9cYX`hL44SbRDWuew-V*qa(azSgzyRK>+TSG-R_XupOLtqXPiPb|BHC}chB89?>dqIM)L6@| z@&~te5gl0@b4FJ_eG7_KgV7IRle!4>4I@pq!lLFz9jjspF?)RA9P zZa|l!%A^lPL&97Ekg*71MPkipc8XlqT7t9uWkjC%HDp%DtpdLN-}hnAkkWg_>CbTS z_dN5X6mqDju`nVDX8;?moYm#q;u~@X;U5+u+JN>*m^EkI>UiDi@dTnF=sQzrhvHuv zTEPwq1yc5?pL|rQE&hZDQQ<9Bf+OIynhMw+*=zpJM?|Nv+to%34RaIQ#^36WiRxBk zCr_ngsNWD6RsRYgqzS?Y9s7-$6!~lKG}Se#0c}{QjGqCkd$(oi`Pp}CHt4Po%~xW- zU*hh-98of=>sd#sQnUVG5~&`5-B1FdaL*U_$Nx$j*hRy@$-zucuE3HAI@;m-`lXr; zEw!cVLG!bMn9(1HDjATo5@#p$O)pqPcSL~fQzVs**Q&Z)o%=h-t=qoH)3)h{b`cY7j3GPiy#LjTq#HSQww5I#`FPKcw z3c5~JPZS=`TQYmCT%<92ff{+4a9KtE!pb>{%*seQ=B(amgz zNQ=5lE$&6EhHeD)2SZw7Wf@Ny?+h4o{oS^RwVDlCg{79iA=2zrX(>$5yaAWr7!Sv34All zwQ=-etKqL0O$BY?zvZTnTgmy_5yt8b(rK4dvN8W>Vu@=3OqPz?fP>oMVpWdZgmbe~ z6>;CJ;SiGnK8zo5f)63Q^SyOMl{_EdIRrOIVdiE_~F7SsdQoB8E(TR{3uT!6|#XZwAQ zf12>KMmC>5s~2~b-__`Ijyi;ed00bx<1qt#J96K=l`4@FB*=_pA_UlC4eJwXr-t*m z4XaFOZ>=|$X9tB~a;@S+uvV;W^{rM&vj3+nBtGUsWjc4vO5BI9;q|AwuHniZm) zNeAUbFrKwjV$}SqIG$VIX}`lg3HH$t0CHo6r|MA8U|60e7g(1;6RpC=VvllM`4Hud zVrBvsi2;dPBf~fm$U%T0U7ChPPWxh6I?q%|qq5D3%>M#*n5>)0>xJ}+HkU&suY@1H zrWFpdp>Tp%r8Tuy962eLw`Oyzignl0A|b@(AL+yQXFyJzr!6 z*%7fH{p&i)fKct@R*s*F6%K$)lnlqwbu}L5D)$FjynBRPl9~ojl7BC-YFSYcUr^%F zi6xlgTtxPMdumkG%b${h)b;A!d}dqWdV#%KFqv77`Pt% zwduKc8dLdl0yP+WO-`8f7F}qx%BQ{TEk~EMu5PD+Q3m0E{ctP#Gg6QI8R+PFC5kQe zYG|Y?raekYTgW6^7 z&;8ptz}AG^;c3BU?n>A`yjstl-Hn*Pghe`bU}YYAwB5Ih+fl4y6Fq*yC>uq~Sy~7u zTu85lYa{adwH!VTQxDWfO@bT1h36!nPEbpUm$gaS1G%lL>XImM(Ppp*Az>)V?S^ENwK^{ zZxKCS_@+`2JG>0yjIa+cs~#BDkvdW`Oj%R=E!BZ%cPsxI1_D03@eFCNvVUQ{=gwo@nS)9(#<{Arj+_58h_92YR)6*bh+|xUi%(&m<~54@#2u9ABdsqcSZuz; z*NQJns(jAr6D41c)nU*r?^^$HIs;ddWf?9zDV_0=f85@>A7@7RRnJX+2VSw4D3W-W z0uG3@W$C;hB}n$icOI=lIbyFEjrp%AZ@crEV%rs{OTmjL zf47=;J-IbTYLzOk9jAY4+F^#Th8jm#e*`aAt%aBV{OX->bN1|aL;-1N$DIcjjRJ;h z5MMpYMRn4mD|GL)NrHF*Ye5hNO<_5cbp>z1H?6W?V&B40E)0$sq{uuNXmWrkIHpgm zYPhnsT|ZgT3Pn=(#4mhX-|C&pqi6Ip}`uDTygAn7`sq1 z0V@9POwCm=r1(o57h7y6a^r`RK4}#~l>0<*# z#U)2#({@%kevFFvmpq97#kkelV;;+pPNaMu^O|C)-B@>Bm#denXE(x9cS7x{jg`hh zdmZA!uc=%P^{epS0iI&vL|+6n?9UG1t26Wh3-h{{s6j1H`m6GBzz6gVj0di$?>BSK z<>nLZx%m}}4XTT|z{!LcE-U6H9#+1EO+b&jpE7wkz$VP@A}{qHg2_pPCjH68A&WNn z3Ke2~B3IQ_Zefj*uNt+QIynOOg6-rpLNuTMKd==g4-8Y2=2svn1FgjyDVcHEuhp+z z(o0|fFxh?D%US!0^DedltnzroxLYzOZi*mp7Pj_ef~VX^jmM*f!?KZ$lVtlf{`^_2 z?tyVnTkM&Z?UIAP0w>`1{*-;oiiMe5C|3MNNT#+u`t0{d4NGer_E2$t*qv_T+=Yts zj(<_aiEAenuj9vGF;qgmJ9$6+e+hTDdH~GfD@SqG=mQc{jc1(;bMuR|YP!`W8ztUf zhmL~X>nuZ%CWjU=Li;lJKWsHgBq)yzT}8k1$92>wp`LrE8@~v+i>C>)^uT~AOf-QKA z>R=Z$Qr|hz23fTFe7~NC1&wLmAa=R>rc8_6rLCV5nOyouW?Z*!aUO+NSAxFep@sFm z5vu0-b8iACR?B0sH0)jT(Aq8BU%mHJ{Ud*T^=^j-3>T^h7=5E0r;%5&S}6)rGm-v3 zrYp7Qf7Aa#0Nc2O=tVWcmYyiY?fuX<4}z|6wU=C&Zf9ri_rz+$vF^x+L_O6K)#Js> zbF&r|i-&PvS>!iPvjVT5TgSmyDZytum2U<6z%%D5NF3gfk|E_Vazv2RF56-@OlK{2 zPG^*4pd$wUOghM3|9RsI8{eWL6%D?d4+w zOVhcH&`|+mvPS4O4jp}Lo|_z=XUa|eJd@T*vKPFH1{bL=k5IR^B_A81k-qO@fx*ij zFrT}acT$41F)LUj&K4TfaGBQhuFvdqH14Eq4=wP2HAdNIgvG!f+HIjn1@|YbS{+QT zgNLYv6Rc+WF!Lw>I|2mz(iiYl*a}>Z1zy78)jgWj2|ac!H^~xvkL3yzWfQl!+H)dU zCi=Rtv3n5gsbLE+vPNW0wp87BHK_TEjNBz({6S91TS7ITH; zM$8o$g@4!?9fQ&|UihT9+UcCKLy2@}>xvy3`DhB5TY!sT#!AM>?5*=X-*lF!OiX}p z6x+JgP7&aQ>wo@5TJlI8PZr#+oB%|jM+LIuYhz`%MR*zIZT3<4p)FKjgm;k#os3+e zIflC%0HEsBlrU}xj@5INNXR5^je97jY(b%f_5t>4m>KoS6I~kTytz8ux8nCsxJPpe zC7HCbec8@D9|}C6G7uP6fzmDPy^4vtd^zAwU7fnjsAu;)Ky{b1JrB{efW3(?V{IOQ z4BST;sEj4U&dZQp2xazCKndbj-7Sv&7Uz9;6!+D>a~--o&vmC0=sgP$RR$y6Cs9Tn zwNu0nmoHMsKfT_=0N2#%hl|Z!_g7=jq({a|lGIIfM{kr7beFslEzf%!``qR(%nCQp zwPTHD;>W3OiQfr?=tm@#hw3K5;Y}ZOonkSJ&Obi+WQS%%_{aSigRags#ifoiZdD8- zcP^vnAy;cs7t?3FO6*6Y6R>{rfM2eM)WoQZLt0l;K4-K|^VL1nT<3hGE5MON&_B5T z&D%^ekb2}!MFVoM52t-+X-ep zMcEW67ki(IlsnTSDI&}??P-0_o6X;U*Oey?7R2$Fjk)*V_n+_=d2;#c4JKFPQ(P`n z_bV9!&B?|~p`>~n0mhFS5n%kIQiN>k(8JzyK~^|m>Drfz?bwgbvEj{y>6=j5n~18Q zKoPmeX_94Oe_vx9U-|Km?|+qz(PPB5%sRiQriJhaB4dkfHAg{5M9xczKMTIXeI(Y@ z$h!Q|ZAgep_m_HOub1$gk6%e99mV9R(p7V^O#X?neY=-0bCt~88g8(NC>eVqAF+TU zrMfi)@1o~?;-`dBBj{MAQJWf(-|0Z_u@|O$hv57q?Lx?4=>Pl$jRMNlipe-Co_Sxf z)t;BIaMo=-Bn8J+1YWY?ViJ~vRKuM^($kIH>VN|Ai>+{h(p_gqgRfVU`a<%^*W{y> z*Gb#wA7Snx4})^QL^d<#VrJ_%V4147lf&&MHr(q8rF^6`H)Rn(#~<`U&pc?A_Vr>; z;<#Y}x{-%hn%@(PsDj6UkuHX7V0+N){jTHvv6-U76nD}V$7hOy@5<4guzy&*X2d%6 z^9ja)2uvF-ULObBZxiuUeXeVG)E4I|LvUkLh<^pt^Cm*K!?tLKI>ZmZ05Q=|h`imH z71G<%XTH))m8MpTx=qz#5NFc_?mta4kt<`Rsuwo6;2K8j3uPvp}| zn6jX1<*mxJ!H=i3i5p*_&&-Lg7q~~5&+Za3bYj>M z3pDoVb#miU78C!WBu;_bMUC_%E$$-1H;lg1F8-97XPH3lt8yXSCYS7gXwyimNu@~( zUeE6j+Eqg^1!Ma0IxGM`)xL=|+g-o-=7-Mg>+%gfKXiGr)BtYesh!r&wD8J6A3haq zs6rfFYOv2qr zTx{X2?`hBtdCJv+91Dfmcj#bcHAO{;Ct<1OccN8(1Vi~quo8$SZlPQ$?%<_WU#7&S zbJ(CE@E~squ~#NcG1*O19+K;cVp$N`Ntyk;GxyM8;lGlUyTD6(=K^Pt^q^ao zEH(byRt>=pP1jnO7`blOtj6t~8gn^8x4%yTIKSd;b>dg>_BE1npFd%ST`2Ommzmdz zP@>r1cG^*IoI~UhDgfX)$2d0P zxB^h(M@vB!z@iB*bUg-{GkBtf70mV6$& zIDlLVGFpdFI!|J72Q|A!JlN0q*_tzs%x!JD6bRdDZrw9qys_W!uzGQ>AVq-q;B0umVVWSZ7+cMg z*89o|Y<9@A)S9>jN4jXkpj>2L`YMcY2C*5jN?1 z8bep2x4bB0pzu?keBi~*q1jlnO zHRW#_;Ujn>}G97J?*KgE(;%{N{qnN)7%@~x4GYeL!fW_ zBEk@ipHB<(82$3$*9zN<5L8k(&94akSMoJoN+%NJGP@?^!F&Xv1hB&VXyqq}7=NlG zI{HLAy-i+Fcnv*Jo`j!r9nKbnl(Nz%y(RjN%;<^7$a=e;#*{m@vB9iF9D|p)VzQ>i zn2eV+Ek2O+#Y<8x^j-8-`9Xdzl1;LE_r)Lq&2>JTHQb%_0rXsk;W|cPH}$*n8s$(i z2fGDZ9j1qH`#RVJxLMrYN-YTbUuKNQ@RjEK?Gp#Qqh zHtQ3;5YqK|#tRnY+4$o6wPGqHYL#Naeg-3eCmQ*0LetfU`3`H)yPf2%t2)$&k#^%x zz1v-rD1>a~eez*6y>SM&VTAmf>yPQ>ge#~{mrt{-aB=y28HsfxviWr*>EHbHx}3DMwcT5q;ky`5+# zEc}^w*Xlx0fiw&8r)8Rz=rxgA(OBt7q~*`+F7|P|FhD=%RwxMi0Q-tx_Zu(~Oz%+? zynoJbRoTW|Eunz_;A?Ly?gxU^e1Kli7)bo6PB z9vuLkSmi2K6Uq`2VNW-E|H3TA+wPIe@<5ZZ6=K!(^`CQx9pqL~S=wc+VsmV9d0bYF z0r7OM4pOaZR}TdU-ycNZblw*3ANw!-5dx&VpI3sPjrm#q3$dC|`=?ID_^#`v%Nd-) zREsMHaa$Q9jo}HEzZbhb9#MWEUBI$%%o?5vzJR`4@KXMRv6mVn{5`n4HL|U4+Wte| zZ{*_dQ|yjS=IQ^s`_#krnU#b2xqL4jp`MqR{rzukI>mr|D)+WAqUs+=Bj{vul80PtQ~P{xbFZlxRE)1>iRaL=#Q);)DN#_fhdxD|+r3$mwx zNuSH8zZ^dHJt>&qj$N8-sI0jysr$qu7+%Kay4bnavxdMT{fcNJvT|jAb`bszEvGRfK=Ad6v=&roU0y8Z%Z-Dd-IhZZXOr~Ovs+fPu+)M*r(XE{1Z#L;hq^x+(P1H^Hh$pdX8@{#6uQ(+?j zoF#mkJgvPpq@T+d*7Xrqs@ z5{tU#t-k>F@}M4D*L**FPJ`8W?30nJ(84U7eq8}qok$;2^j#Dt?;SxxpnA&HqICjZ z^d)NTI>of$U^H*g%ZnS}J5=XqXC5&xVm zKotOX1T5D)iKdvMAEMQF1jd?<3GEeF@Z#?ae6zda*CP)`{ww$DQ$2>}MgIkEX(~^a z4F>(`)`ni2%E7iGI4bJk)_TT-UapOU9thlaf=x8iOj{)!wxRCeE+L>JQX>fB_bmH& zRySaUM!UF8@^V}5n;m?kxG`}}p#<=QG?DntV>$nV<{0*6N!4oAGDJWPnt!k}S#4$$ zFOCsWs|YP`HjoEAOGuNz;=b5pTAa^BjL0y0BNUzl`ony3rta-oz;j+BovotM+HEK0 z@pYCHG`UWDCU?fGjDx;iqG>llU}i^v=ydG|ah>fR%*L1~ZpKyYX^ZW0{Q~6RNljgV z*77$%bYm!e$^rbxqr^swpf@fKzvKcx@x3qNIToOz`eJog45%-7&g2PrzxZegm(u$O z@CqAg)SuZWVyp{F(#2Hd8hLExE?b>Z)OykNdZ;U=O&7O7{@$fYQ7^=1`I`GI8O0rtmp25u7$~ zhCag%4KN2PE{RNV0gCs)_f34Y2k!-|vb<;wqxA87Pt6--daN0wW*LF}N=NKNG{#mq zoS6r*8n|yZ1?5Ss6Y+`2uP~Q*|Aj9LA-FHY*W9Vw`Jsle7d1QU^Mtoylb9idOviA$ zh`<7<`$(>WB6@rc&vhzc;K!tb^0MkYO-K1phz{@)_Wa`~2y@I{Z5=MFAmh7mf)<4G zx!688lTT2O^JT_w5tsuy{&PWOu7h%)KYCg8(l^%aEhYEO6G6CEwy$7>E`n9U+I}Rr z32R7x=_%?Ab;53Jcdm1U9it)v<{(YN4DOPxh3rn(FzyJ-#aV%PJ zPfWwUE7OL6dW2*FkOzVtnpn&3(cErX+ut)x=V2tMYGRL=z`=Y1%CAiycs|7A0p zhB9S!_PW0M6lXlYk7viyC+rAo)3E#EZ_F?jucC3c8M;`sNepKCwfN&5BJ$KIkWN!M zp%+gkAyp&6sx`bZ!ZTo!=EnWfdyJ*HM0hT^F@BZk_9lc_Ln>!pqD)!6zy*DtZ+cs* z^#W`lH&B3H7_))yEyzQ%dM+<6di=`pOxnX_vyb9%Kj0lYYOU3z%!;;^LgORC$0YbW zSWnr+8+E&8HL=3MNy5#|1>TA4`8CN(N=D^rS5{wr_a|u$_X)at+V@)F7UX}$U{(1v zZ6S;-C|s;=Y52MOLICclP+u0?`=`1x4Q^VD_AhZu#zlDXw_`&+-r_o0jsc1ruxSQ*Pqq^w@&k{Nx#d%zl6ni)Gb3S(VoCwCc?JMk?%xz%b0=2 zU>^CpNoqj8iB33fl-UD`lbd#@n&sxR7Bob0lXq?DRG_?7c(coD`Vi-ti3Rk5%UN*0 z%OIbwKxIm=Hh=X_!-Njzm%&77nO2<5%Uaxi{-IaF?y)Ez8nM}xGs$;HIkq7X;+q2gN*&Rm-<_dc+k|)T)>23-6 z2zlMym3|Htp=wb_k(KNYfEp&KnBlg`VWxNYUQ+Cqb)Da6t2_>h1l^PcPU+X}WNlOB za5A{d`EO}gRFjdE%%yNy(hv9*T6I9&M9WI%f}YYS@_FmRl8kW(7;*?Dt|GY zfIp)4LmnO?;Y zS=hsERQa=z@Aca>xyaS3J|8KO+{Ml*eoGQ*s%;VeyE9OXH2PqhE~ zDMQWcs<agZbSWjk?PH>Q2vWst(6=ZhP5zmp_a(&fFxxoEO}OD8N)I&{o!S44+Mw-MZPsq;98hnS=90GbIkU_ZxZlRz9Sa*+MsC>y-p{Z(Rsoq2|>6H!2pQd#u)VUonSgdnq z{MENU-+3c1`MP<^gwEmH*(k}StN9PNA3b_>-PWVqu0Gs7b>igD_2OW{-ZEeLi@;;?m~_{U6rrLPK-5$2+M~*AhP0DSa_x5_^?zFVWO7#EkfP zC8gwGF3*qa#O=BBQD^~Mo%eAjDLC3!knKiK)>MW+myfIf3qxcP6m6brn|(+8CwqNR z@9d_=b+Ok~M_{@UQ=*ERYtpIo9Hsw82^MJ_!=5}9;B(l-a1GvX*vdG z!^ZGJplApmgZn@ z;85{WtM(@?(hmM2Wl~v!XjkRhfA$7uvK+Gsc(wbiPY}kD`zw0Wr|bs#e%KuL!!3}+ z_d5E|!?(127_T}M63P5?3)3znJaI|QBSbs!O%~cBjuk28$2Ve9s(pJDVrU=T(TxRn+?7%HOTAHBnQGdA<186-R+3Pb`07-#Q1F$SqHlD7$%PzCzA^oKyJ( zkk;xL5c5TZ8bo}|9K8(gt9Sri%R0L`Ryy4}@bR$+T0GMl#BmxPe1W8Es5xA}0k)eE z+~qtI_%I}Dv_DJRuGEWSakMqlL2W`Ze?h4?xdQ6KK@ilZOz9xjME&E5a8Pvw~(H3SH+)Vj#?+<=$X?Roy&2R z@*y>c6CNCH1#aWK*r?)BRdV+D{ zxw-&(IhS)cyS&p!C*_nc56jS=wV?}JS6tUl>}HE!vMYsj_1rsb;Xkl6i}8a4{uvqr zYJ}J)ODhBiF9wa72zxBS#nKl}W?shtD)UA6Y3&>Kq9nEGpeA&-Cg-Air|tJ^Q-;Z$Tw)yB@(-Jc7%2ecI)s1vk^)0Yz0G4{XAK{GTf%dB9 zYX)4-@jf-oxMH{#AODbaID3$6ubpjsAtlZ7I_4~W#te_?4(ySwiv`;OM ze#ZJ#KBdA=%-ld!TaD?(8+WgP{a36u}FIxU=oz zxte!mcl8>jeXr=?D}oo87EYo|morOk{{%bEe?Ngl@!{pTg#`Z>*|v{hSSx6oDpQ^U zy~KJ$mT7PDXZ*gm8&T^3+_1Px)CuW98~|LimGBekhAN(Pv5AuK9ZwVE7$YDlf4UK{d_; zC*Y@DZAI4g*^Y!HWyEm9q{|%9PwWGG{}#9dnIfL)7bSu=l1GC|%CyB_b+k=E%VOW| z=WUJ`Vey2`V~AGdHpQ=22;XN&P}B>=I>+uJ64kI%m8L)?k>(cWRJGJf<~SS512ezP z4l~ws@f2X3&E9-nWYm<{q8!QT9sM|C4bG{XO;H?b&Wue4+W5D{3xkf5P7`Ks8QR-$ z8@QwB^_tm%RK#YzTwM;fPF!=ea8nR+ER{zY1a7XgmchxpgxAs$-&ARSm`Kq~uGuk7 z0EM**^Y4_~$TInhRmdsrtD&~&NDy*O-hJ>L7mEDKcoXSY9BD%S!u`{U|9>) z&19fUqqELtPnqYZ+`;_hMpJIAaD5~%s<7{8k1dV?9*)eDtG>G-UKlQ;?&DTo_Wy|x zsk1rADfB7vbw4&LIAGIO2mf`>)~NNtF^8rSG8gt9bPUy>IJsy@p3kkE1;=%+OK8e( zb4+lPxz%}2Y@jc8ezvziw7@-<7ecodWpN5NY0r$EKCKHkT!T%uFZ2AAYbbabIT2^p zj`%Vht{QCGBuy!Jy%E#4v5eRE=kH3%hOvyve-}aqT_x*O!_kkBBd1g=RNEF-V<5bp znilT06P(Y8w2^(cE>s)ITxDN87wG8gMLx^xruV=uIuA@0XI4Yah8|~{Y`A>@Zqj@#q+gZ(b$H_f$~bK-1?Ii8deg4=B0)#4$b;_ zWxCyXLWYIVhv}s$Vee%oJ!00l)mmBK!iPLIO&v-hxVVS9-i!KrQ`PF+RHrip`?T=A z+mllRGF{FH8fvcx4DR;Mpq{dUuUx=H{keL<)7MG;9|X&p70X4INY0-sVvn}kW?SE>*$t>yl$iy9x@-Ni=MaLk61{5{1lsDiA>Y9n$Y|S(;v)O@Tv6U=#Q-GLNxbb2lh`K3n-z&o`;|Ws{CWLITu{!|WjQ+|dnEJa%l8 zD6*Z2sI{j*GWT+}YJmvWDsO<55 zwbU;`U-O^E&utB%!RHKGvnvvAJeQ=2N8zG@`DfA$h-RB>V_GV2-iE@E>$QaZ*Dlrv zxz+|aGG4MJADRe=TJM2OST$+kPwMhu672PjeH?8XyUDkD5m$lWZUEBTg z+xPb;d>)VO^Lbxh@8?rmn*0FEYG3$e6#3orU%>NgnjMZ$opxi2uR_CAL!Aom)OS$g zh;|E=cLQUo*CTC^?^cJ7)7BDdj~VdJ1!0U}@oE_F_|z@;A_uwzN#8g<`s-NNLJTwSKU-PKnN+eEr&=I+|A;dH!4CzyL`}~ z9XJ{PJ(}qS4aIVoOV3HOkaHi!CDhm9DDjW3W|`4qlW}kTT1}9+>`Va$v7! zqr9{PAE+6H`906@b`34ZZqcOUqHc4T4^ywxe9DqvVe-}LegPuP;?s2z zj{DLu&jjLxUt7OXtZ^k(VuyGC_W6-oFJ2KVFnyltd0(^7%|C|C0S^=8!(j%U+HOEIya{j} z=W|>eP;p+4*86DRC$kxD03&Q7bCdML`SHr%<25rJZe4QS_di+AdFi;XSF`GGyM_D5 zcXGIm6-EX}kMQ%XZBg?&KwSbLD$OGK1X%3tN1ObBF!vN1Cd1D#vK?xI(%{22D--fH zE-dmrCjPwIMXQRL^G~HYjq}MTXKM)!(sCA;9R{{D-Fz>3&R?8QGrI zsSXx?Z@C1>WLQ=kV&_2LhRW>fmNNxDum5m3Nm&b+bB$?RsOn+_>F;m7nv?5^2RVM7 zPv>iV9vLV=8XYcL}-l{6G+77399wA28Y_E0d2FqO{X% zACvmSU}pc3xMsdFPtk8P<5!d0y)DTxH%-am7lr`hDaJ|M3iLI5$$GR@^IIH8cAMZI zjJ*yEu)ys#zIATFM}2eefN7!`CWrj{2 z#;$2J<<4cNn>r8iof;P%`!DG;MJ$tG;dO>J*DC6T&_2Sl)T>EXN|e8PO?qqmuW3pB zQwQ|a2@c2QqCCYVRHbIuU8Br6>H^r-_jOBz^V1{M_Em%-^2gs)=h7ewC-DKa1nL~Q zv?k*I)YaBz0JdsYa+2R>?S{7yw@gwRDS_A~adq}%>#1MxvjNgatCYXkJQ=y8g#|AC@ zjbQW0l`b`ZExxZNWDZ~=UjYKJ@0{(l4eAsPFIa{?2-%3y?pJ5^5@u>J5_S8+ zI>+X97W3AxbRA*bOsgvEn4#*RYAAUtsVoyvxS1Y;Nvhk}b8zAn(AvM)t@=>s4z6ws z_N+Ky!78p14B5^>2hDiq>K2nzT&?}LDsJb@S~mWR|*{fc>S|6~X9QU)?}|NT1A?gbR$r?NjBvKMSKg{+Nj^mKw+M#aOPYnQ}n zs^-qxk^X@0o6wVJPimKh2tX|{jthd^F1qk5_hOH!xBp=R0=+111W48o#vkvKQ;D{e zmhd`WFEist-P)_64jFI@OV)>7~(Th@dat=(h!;i_R=c7rK;(zBdK%A%ZGI6-^l8xtUH3?JR#_&6(Y8P(<%6f*Ti zZy6hze`jVGSzn;3nHJ=pf|CF_dv8Qs}tkHiT^sgvX+Q=Uy zXgV?NF&CTTMdW|8lJIvkB2vhRPWyF-R2)*Zhm6|pWvEi?yTRzcSoQwXiqwfEl#85m zNJFk{I~$3dYWbw85>bQ%#BlCE*55MJVR51tq;mIa$jG<@dDJG=U=Oaj1jBYhE{x6@ zhH7uIj)P>9VRscsH1HSU3FAwKIsu#-ACNvh;9k(}k$qQp?Fq+j8* zuHzf&R{GVw&es#Q9QSsK#g;5L<;3g<#q34pcmAGd0zH$ULyZA zUBJgV8ZseEEGQG{UW;Pn)u2y)*3!M)rm+sUvsw=clcoDGwVQjoKzm#!p=;iW{FUaD z@IOg|SH6=Wblpb3{6j9{M^I~0uNZBuIYj!;kx%TpNS&;^$mSDQTXs#3M7*L2wQ1GX zsl$c7vH?eH%}=7=Tg>L)j!Cfl_)tqPU?}&cZZWBk&;nSFT!m>@gbaF;$lxbo(yXW( zf%ywFo=GrrmkJyKyP^9sE-X9Aj(e76So}8?7x6%iIwj>7vG&Qjv{OV6vR}XKlI(r2 z2F-aVieXKoPLDG6LUr~<$Yv@OD?0N%@7 zkmu=(G7WJAs;!-X3eJ~9?rL6TZ;o^DT$n68mF1}Od!9X)r(8c;*%I%BvlAhTjDxyC zMx?uwv{#dUue9Sd<3DwrtafBjMaXwtD;$SahEiZ_!SJ8$Wv#Y^bzll}A8G4iB} zxMqm~>Kzd`l_2{8oTA5{R-4FjsY)-?^A%QLGytT z6ZgY01u>6nw|i)I&rpC}HNSrJ&F=F%peVaE7BBs{h|3}crYo(fz`9Syf2gEG>QvM@ zmyOC?VHX7IJGIH1xeU5cyBBIByK`6x4zmS))BNka`DU5^>D_EZabhPH<_KdZ8xd( zk!Kd@8kLv{63kD*9hM?JV=2f2h^ib(ew3UOtm+RViMTYI64rX_@`;717G{ylA*JixSEkSL;53>fLd;F;N^t| z*z6mk-TeOi|MiW)dxK?3Vb{y4ihclBHUAFhmXTO>S}d>U$`D8OFA! zsNrYSn8*4YA+7@49kmMXncMU~yP+y3(Gcko;0vnP6Wo|*eOKO&ZR7FL$8UJUOAAy;M6FpKT= z=PeW6o9sbm&_5_+rLaEi87$8wO|@EgoKYNv@GHq73!CLE^ zmH%3o${>V)nXjp2=4Zoo{M47%myp@93%|Gpo`x8cj;Om4rxf^^ligaKn_{cw7Tsz= zm-e5jp%U`znPVvH$mQ}2#(oahLRT<{mD%#_4-btKnB@uwS|NKA-ML)1F8VTco%|f* zuLXd_7n7=~cZ$4qR=iD6pHpHi{^^oAUY-x{i=PPhll!w7Fl_pFue@=w-s zs}TkOy`8q$T*JI0tdVv`prhn#)zA_HFNUOkS{>16K>TGl^@r-|ckHy!1!7!*ZuhO^ z<&5c=oR}pwOR$%+4a4`-vPp8q>cm>1Vcl8c3Gf;Em4pz&9yQZd>O^o>0mY13FZY-Cz)l77dle0^%?jlG zISlALPV{F#a2Mc}m{F4|JM%+7nsDNUllhb;7iSU zc!%a52Ax+207v@GUZoK-Rl=c&PQA!Db#Xd}MsPpCoJ9HURH|N2ocZh)Ar!G|w&3C) z$NE!8Y8#Wrz3n*xA5nW4sWr$#9wzqRSVv$l|DG&(*hi%89I-DFmpZ2|^z;OYd|$(M zTK?$%Q5(_QM9C)`q~?Is^Fw?au_nETjbeQDtS)A@F_&VOP3t-veiQ|ih4#MBB3EM- z&YTvRq3UX^+A5l-f=d`#D%q~C*in2y5V%X8GM z7Z)#qqISWCWqXR2n^_}CB^!V|Ivoklp?4Nz!JSd}LCs0nsh^BfBKo=6Pwe;wnzI#5 zFE~g$uidUr#8ef36z8J$!UhhoT`nL(UZDF?$7S^V^H~}JB%Jq0k*QpFN}RJ%Yl3{` zvSQ9!vLK7lGb@TmpbMe%KACTb{NOUYf%nZ?f*asGn`##X1f98+6EmF}tyw zbb4Dfr6PVK*#I34{_24|Ib&LN!ZXLZs8Cv}_u~baQ-+!gP7*h-)r4D;w=od=wE^+M zr>t={?(g&4R@8{#d%f5h8itIupM8Y2{(-;Z`16J{E$KDuN=7R-C5hmYt6y|I8 zfq?|`a)h!r%cDGpY)yMX?N%H6A=JinAm z2{NHr0~QIgaQm5J^q-o)rp3<|tZ9{ceHh{<_uqN-eR3Z&R69YwE!QjX6y2HoC^F5h+W3PD++SVU=#z&Wot0k!F)tZ z62xAkbjiyq#c~dQCq{tG_hJWgmuHa_2~Ht49Uj*@FFAN=ky2hXp0?sDNCm#c;eUKW z>W*Ba8|1S`ld`A)QJmaKyh*?#ywn&tHcD~14-ET0u7tK_CDjjTuA9zSsDSEY3KN-Q zsd=(|x?mSJM$|k!-}H%{Gn^WKTmB?qC-IcqJa#_z-h^ci?st^@Lgm6`7s}P4&x)Vc zTZ(f<(I*Lw^y9`Sq^8n<`wJ(Pnhe4B&AQN21B#c^?m0E%D>XYqG8UORs7&ZsQBb2y z%z2}YYd5ev5gjJVJprrY^mN7pR~i-<^X9}x%(!@U#WPvkrUqeqLsE@6|BQ7VvAphQ z&r<5oo&DQ@yaE;Go(M4~nq{8=RpUCBPz*Z9}@9K=J z^BC}1#PyBa(cYq`{69sTiho^ac3DF%r2vBX^JUqyKNOn>_%0g(o%qN2*AdC1=gBsE zFil~O=WlAJnkOuWLb(zS^G__+g8{XXo%P{N2mFrc4PY%9TgLe0lRFznm!_zfbL|tD zBR(%ROWiny@zkvDd@QjbjjFKjQGEHyM`q7v(7Hh z;`meEv;4e*`W@Q+^XGu{rjVV7ifq%^hZGz=mXM z^=+(BCjd{0d8+uKake_aS-h|naxb!VH`OFg^j2=BoH-phh*g7;R8jO(J(8I*=Mw>G ziw{tj5E3W%T`Q2`#Bu$UZ*GD-O}DJa7KZ~ar9EHGC?yXDUSJ~en&sF*Q+4s$7)l|X zs<1{QqASPl#f*$aSL(|DH{Ir&kCoYO!(vX)bqdh(tm-I-`%Om85Q;*+j z^#obnfZGcW6LY^WZ}2#(yQsrd`}eTzWpcCBZMMY6a%;MBt>J0@RNY=O4wCHhwZh1~ z9VOtsbtZUTLH!{{TE`xz=dfda_roGr2zeoJ_ zr{+71_$h#r_dR6d{GQc^sAlK@e1v}_p%DI=TCmswXwxP!0n&k?-7(=9!6gn@ed_=- zn<+)TN!I)otogPF?X>8dz`Q&cJ_D#9TPVz;SvUte8Y8*1)xK%xEubfKWKV|HIx(<8 z)^m}qB3}u1nc1Oeh&q2y_my3J!eb}2UL4~^iC zI6~Jq6(<}af~AusvXTTd;6D@dQ)TI6j?3HT0b1G*A%0lNm z8SqQoTp7-wrRa{!j8Knx_!{HQ$Pnh|oWP0V&y<^rm5RsllsHA;vTO3dIaR&FGL}XE z9FJc-s@;*Nl2_K5kqAy(X?HMG*eP<0jT7+ej6KRo8)kjSf}7M{`yXVI=}lXgW?T6Z zP0=2%=*_uU4Y-G^I?jn8sf|e%INuEI)fX;J^>GOU^;!A{Q`-ukO(Px`6e>P$fYRaN z4|xZ1Z9loHRIylHiu6zgLeJB5LtGg@-R0*TH~6Rd?y?jEAQvod|cOth?7 zI2yVWW8%;N8>Feu2=Cd|jqF1Hs|OkQoe1Bi>ee23Da<2RL^RS46JAANEi%HQP5fD= zb@G_(0pV#9{7{D?0TzilBVAGB1fNbC{i&Rz)6}>O%$Lfk+NHxWB5FNIDZQ**3QEi1 zw?PbO_@G~e6EOcy@GaH9-G~s9c_?ccDe{R;V(|qd*Cm+m#=K1x?T#hD!>WhCeAy_h=Wge+CfW_xV;)b`1z^ORsu(SKpk8iI;#9RPVLZdL4DXpEwYFUv6$OpS5b zA+N~0#PaIdETUyyB5@hwGq1dw1x_?D%*^+4Io4?G7!SU500stlkp^5s+MTgLzt*mY)! z`H_9v5^2my0zYn*CwoZ=05?@_R}DBCGMHvbNUQHwRunCRb-Grr0;K3Ca$@b>j=)xx z>}#=ssyk|~W)TDE98?O*rD>90S1&F|RV?jP4q3Xgq*ASm8@+Y%~%)a`?#W!)ff3mmeyR zPf?`)j%&w0)Z#c>n8Mqmt}=H0iss~HhLX$EMgz|+g;8vDjXhwQ{nSxz)Air9$UF8g zF$P+`(XWE**h$Ob@^I)@X2ZxAy{9qmoT}0B+V;r^NXeum)1~wh34UB=F1R0(k#!R% zek*;S;?C3T(xyi}TN@DjjN1&E><|)LwSUO#@Z$5B-u}cb+|{}E`d8F0jL}KdxlXCQa9b9 z;vO`o*EYWsq2;GVw}jfLuQns?kVJe=+;;K?OwsOYvs&x8M>a2k9PV5%tOe-tRY3O# za^xF+8BKRqixqG7k^v*Y2Ka;oh3KmI=|bJo8s5u{E%7Aw4YXc;GX;)e`?T#xmX@|o|M)RF19u;6y)CT9cb>H+6#dhnTiL+VBR zztr9-gKX(z_YR~bfp#fMoBB_^0;0d^3WZE~<6oJ$8h-ZdM6c4!W=GLB*F5&W^IH8% z*qlVbn>RED4tw}G8wRpTx5z94{|Wdd%8Y)S#NcC^3Yw$*(KLlRKY79zyS)bd7pFm^ z8Wl5&I8KP&iHtJ`R3=2=^ z9}_gJ`4Zh09a+of_uima9Yb|q9X1u!idF3!%#G7Xh6?_OrUThOwUVo42n$_o;SO8fGf!P z9yknI66;7D|LoYo9D)xtYAYAdiOj=Cye5+T+xeYT*)N{c&QM1yeum*AO(4W)pKeE- z+0^}*ojAB@y%DCTT01P;Gb3JCZLLc`ZPRD+J*V`R%{_v{N(xQ47VZSyQJj z^oWf`LRRert?G^=4)kQqddI8M{H~+%jB!mzgno z-6;vEPeWx(-}Pnt0#vWmj2E2_gr~cXED_k5x2qB?&9Q~ayUB1m5e2Nb4)c@-CxpaY z(OYX8t8<*!oaK@ge-1P%9YB4yUU%|D8gKluR$!Jkx=RsJe_XgguCO3$ggBRoZ=EC!Dc{Ta7Fv$7>|onr-M)UA;pia6J*K^TyFWI@bU zLwiVDb<|b;9bb z>UA7|`KN-z;rV>0b90@*o^7pZdNsejr_-6q51}x&c*-f z1AgGMnY@V{{1&X!l;?<%y??3NLcPUCHyOd!-PKiW=5s)tUsqI*G`wmozuT##_ihfS z!n-OcrWNWLzCtwJcYzhk&bQb61fm$AOwD~@Fom{6e&GdXgSy2d1lm`4lhc4ls5XS{ zj>)tcw~WI=KeX=MrCN`k@?S zWd}O)?39Nfm{i>MFY9ZWi6EFAv{At$5!cn$488sY~@Am zHK34=xL-`>yZA~Im#e1eJb3Ru@w%Sgg}NABOogp}!t0?neQ(nn$+yfDukdG*&MBOQ z+uPPj36VOvYY+3XLKa#~P+S;7xLY zBg+kerCw*K@C({)5}mpseDv3^?4@)j&6{e1t@yaR;JXJ;C%|&>6_&=1UEJn;Q$Zw=;X)QU$Rv^%AL<9}dZX#U!qLS7*(wcLUhE zaO%4lyV#+S)8?DxQ=IWj0wefQv4agkSa8zvJz?EZ9RE?{@D))X^QZ0w3vD~TK}I*d z0(iDqoGV`3F6Fiv6bBD>Xl4Ql4R7Q$O7bhQST5tJ$OpYKSY;_YRvzYndnEQUgL>H; z&K_&W%1d4Ct34ga**C#$_okk6P);NSnEjMhmrnaPVs4q42bK{qK^KygYz$<(+%5Ob zD-+n^BOG1#DCHBARj)G8b-NvSt08M7pC0;qtjRsC=N3&pPbxmCKO13&nWZ8o)v^9! zcJ+DHD}FQfFzr=(av$o{bIa%sS&E;!_}f#ptsNSQdEG`Kr|FqL%^;rV8_GkX&2RY3oI*Wv6 zJ*odzsQx#h4Mx52x9FMYQ;(9r;Vm&UCZV(q}8zFQ(2qWcNAEUAH&kEHBfGZwYm2=}3^ zZUrVsZS|NLyN?&)nv8m8~EeQ6o3QQ%>Aj(RB^@n7POk4V5G<^joE;m zce*iUIC7Yi5U)<+WoM%HP!S2EPCi_-(D2dbx!=Hr49zm!3qlTI1@gwL?F&!ZR*y$` zteY4Hy^>W@Yn10h&ujY!nA15&C z;B%AUv)i%Ix16?1K~PuvQ~wjnbl2ZUV#8M7Z;Ken?j_78=0AF=v3#dF18~P=(tqMC zKO)1KM=Vh;(O5O$KHY5OrMS#*8EKgcEQuXTbC1_wPm?K;jFFnDG=xw56+F{Fa!j2n zzU&KxFfc5$^B&`5tSRa8lS0c$zxcbTL+vPWpuYRVn5Tlzxt z`Rbb==B5q~+GWZ*FKPnKFpSdX!^%|{iS8OxQhR>P5q}zWC!>>?CqQf^hGn6L9yUj& zXxwl&G+qTl-{keNZfKJ&(oE_nOZW)$=JN@(sFY_!ER_1 z3x41wXF4}P$P&)RHNV&dKK(LI!~)Ccfj$QovlzDMu{#sc>G}gW#<8Ii{K4iyTpf$!cz4 z+s5QS(Dz5P)UT!b#ggh~9q$y;i>xSJVVK2?z+2KIMk~AG$&Q`KXwith-+h0`0qD9; z$Ei!m?>nIx1;5n>k(rt^%nrehef7>bU^zH1NVGFj=4rALqXlhkxhNKji43XJP-=(D!-t$#%sqBF0xJ z>h}C+IkF0yW>~xtlZYL;Np{nJQv#kSmof}mK+O7}A>w!|+6_uDy9_}%u5eh)Dfb1& zotFbxf^fA3Aej&dCGIz+`8A7I0cZd49YF|#dA|RcL+21 zh}lIY8)5&#EV_%qsF`n#SosVw=O0q?*wo&7O7%{igOZgcvJ4;b@5qjHAJFL2=dxTi zaUhHNfgLIdZ;6_Ft8IrSYj;m`En`ASzqQ+i4}sqe&VjCVW-p-Zim|K6tMCd8N9_uy zsWH>J@unKQn4<6x_ia6umpoYXqA(|!At4~4yEJx@t7_DaGvZv_l&3)_UpRt6a6Uyr zwsH&}A{x@R=p3eQV4!;+BZrQ%QS=UZXE3G(Ax5nW`Bcbyqv==&ojcou5d8{clI-CM z?Y$qbCi7r#(&Y=#+rR~OPCE3R+YpVIZ`u?xuGvo85Pfa}eMh-v&ZMQNgkNFvkNs~& zLRw&P+avQ#K4=YL0o5XRjPMWKt<0u+XWXRjV}=4eN!NgO{RG#P>!mp{es(2RXbN zyHDZ@L_Xj?BtLoXvhKgNNT5gowLid&&?mSgb+0fWQ=LmZBZ5TFx@5?qa$U2kOM}u( zN}3&L+eA*{;k+aW_vKLM;)MN#%ftftky|`Rn+*D_I1<75LQiJ?TL^4(Xu#DZa}C=H zS`w zhmL$u4Fj2s9rkT1t_*zxmK(zB3naVklxwty0qWWwZ1}?M+S(x8Y9=7(0Fi~5Rrh6m zyOi3=@A+brNFPMN-xTC{c5=?)1(1N4ATfs>QBd<4v{z?789|U`S*|Bxidqb*2Nd}* zNy_LXnW{YM`xL-M@3}0ooyy&ki`~ORk64xVVvo#gkqC@pJ`|L*3p!&!@+qe2eNUYa zZQ=5h>YXfuH`_Jo#1`ZqrG`C#Fi!1iSP9Vy`%OIAxA4W_VC`jH8Y>q1POjHnIGnWwS^qA zm$Aruhr^t+N`9Hk{@re6W@LSmh+F;0Yni-65R1K5zDHHfYp*uJLcG$0lW~`AM ze+%T`ss^=h0b#v}JO1UGq9VFHVte#l;oD5{GtxlV32sns)~i^1)ea#fG9YrQW4^hd z38E zdt~1SnjjJ0W9~#zybL*C$&zoK9S~%e`VOZ<+muxV-^%1*(_43^D&M2X7rdvIr~~o> z=J>ur!ICxLgY&Ab$?=iMr0Nl}Q*rFnsJuLK(9KePnu!h5@3vCXlf zyl@utF!gz-;}$b*3nmx7#)1WD0Q>=6A-FNq6_46tAYF$Hrgi}x=4~MPf$|CuMdCNp zWMZQ6-#WU$>j*!LkGAF9-Jw`PJy@M8Lv+wVIh>IP3nL;&kX@-EQ7T#ve-1+=7G`g<56&1{gYgRI;|}n$BBn z@?YZ~(eVYu{j@PyMQxxyufDTGft(uv#sEwJo|x8Ns9kuJ+oX<}Rr<1o+Z-k=+l zBlM-*yev->BC!Okx0^AK>Ldfb2M;O+l>X{sKTh!?>^Ery#9r2DqW!s;pQGzmSog$( z@h&bE@UhLvY7Mdnd5S1<$vel~n^-owTlae0B%Y0Z$Q?>nUM>4j^bfw*;1?7r+60xUPo9(W&|&^oT;a@bTkJVP4nOb5oLOYAoGY^| z>@tZH*DaV%p0uv#(+p9PnhUgFPkk`W)FXD$eaz>F=s!^&>0~1{ES@SJlI@cy4*qO* zghi~gG}})bU+q;X91YvkRrm=Jpmn}wrGe;mNiq8km?#^Zoseh|M#G!h0{+hKgy$*WEQ7i&H-iTlMDanEWgZP zYJ=TJrgJ?pRqix|H{A+~R@uuuKP#KxD$PMjYk$jz#B3&A8h+$4P%pZby__a#I!uikmNX z+A9Cy&f@dn=R1He(a~cmrf0Ic=#k~A+qJuhY|CW}Q$LQ@?_{o#-218?gVO?e{5)8F zI;T0b6}F6AwY58-B*;nqjk{KpLyZ3!zFcjE#Cy?pPtbB>nxoO8b8l#FI-WY6WhGrZ z;kfVudq5ugw`l8}U8Hi-fZ+46;G4Mix1qTywAUj{#d4HM_KcrTo>r--{|#%FQLa3M zk~l(x8ZaR(rlMcLaE}N{M*MLL|oMtD;Aks+Dt1b|Chw6$BRw;aAp1@;#7ak_iR2=~~Rg>-Q zKB#%FNMn@U+qK~N4C-?N2_DwBS z!4oQ{!p3v_t6Y1Ftw~vRQi?UELBd(xxIu~XliP^uEF8ZCq4j3F+ZU(>ntb8;WdNX& zx()4O$Anj_4H+6i-X|$#e4Q_#(iVRYs_O#Y&8EnMsqLzBU zl&y@lMCJP*6maUTcW3P+3Ub&tRIVoUDm)dW_d87XO}YIsaXtDC10I5$~SrtGDLV3Q4z!K&8{XNfIE;^ zE*`o|n^2tj015*e&#oF!C$C(soeS(cGB4}#VJ6$(92*>ht;^jB*@#R+HYe*&#BHAL zZ(msDD}Ch~`9i&;#LOAAA=Jx{Z;PGoj$nF1jHXkVo3x0)X>q}Ox4}!{%T%K10X~|e z{Wb)5W=A-EOs}k-$7X$Ele|`w*<&5DJ=5kJU`LOG*4rP=mful7)m>6){2{8GyJddW z#RX*mmzYMp9{C+1tg^$z{%3rY!%tvw>{XeQY4-1;G56>N#mkT`gEVj7wiJDOe1)jD zVer=+tx0QP<7|aFmI@qkjZK9PMk%$7$QA|KU2Djl%(Itb%|YOXtKuy2acv@zxfx4C z28^-TTH%k0QBdh!N9fdxfEL42V9#}qVnUrpAJ%TWhd(E>n6kw@v#XyPf14%#+w!gH zH?l;t91}sk6UT{&Aiwp`i-|(7P}i~$DcOfd@tu(2kRBVF4@lY?#FiE1?os$QB>%`a zg+w)m`*Pbl-WSAk)P8l>hhWbk@Xg7qsjn98pT*4dh-9mui1E9eoh9rsmWl8TYl9*- z4cAVWJdi3X5O(uxzSqXW4JnpKPH>E&MN#>*gpg&XoXRg_SuKcRU5*fc5j&Nv3R)7L zA}L_Gdul@OXDgCr$B6p9dmzD?o$&+G7**X4yaC=Jv?&5kkFq((i+v&oBR8mv<^h^C z!Wy#UlwOf_{N8iy1V%zSX+4;1_X8j+)DF>GGsRoe9eh|PNown#@EZjNse_j+H^aqX zwqJ#du^&p4>~3V~WQ>bA{9_(C_;Fa-F;OVjW+jZgX-rB!D)f*7X2ZA3m>H^P;#P30 zUh;Naysmx%(cfU=QUcv5QU&Mz$Z~^)RSC8sXFqY%2H_mDHy0h&dSWg}5V)HREUl`= zg!qP2VQ{FY$dVlkSWe7ESk|ishUg#j-CTj_Om2sR;tI+&ic0LMHvi)E+4G#g*G+0T zOn9QWM_WbR?HY-r5^ zUVn;i%WZdgaI5c)ogau!Hsg)oRRKQaMmawVN!2}n*zDz!D3B!agvPUTI=O1Pth?r z@6HoB&Cwm&Aj!hvluj|@G#5@(!s+U?Gt*UjB=mQ-(F#2FQ&@`?XJ7DTk8Q|d;Qbeac_A-YN3lc+X4~;58JH{$X zG=YL8Jlqg8#wMZOO7#^LTMN3TuQWtUh6N?qd+b$`fq&Fh-OL~^rlXqt1@`Ix;k*=t zW7?=m;+we0UuD0%?t_}~#~OxR4+_ao?aT~jNrJ5x?{rI>h^G)5kTi$I%t^Aw*ucGI zhegPrP46bDWkW|Er{`KJx6WR`p-e1pd8FjsEB~vrtBY^UKUM3^#GJOKGS(p@f4Fky ztl}6!;g;*ArTI0`j$mK`xNwa=Ma2%i_mm?)N%OXf{Do;D5HN*kn1YbFO}hd+wVW1} zBOz`!g7Yx4{{{ma$c9FcR!*~_g>&WSr~cr%{;j=hFvrAD{|sO~2#HVI`j*zH|! zk(AB0v-`#D@_Yr41jHkLHThv$hiL6|WA=;8=>T zGDl|M@9zN_bPP#6AD~gqThi{1EvJ?3mV_T*eSUIKd5nMeEk0ch?UwBL7zFF$m zYQ+HJ4|rB}Jx1qOB!uwmW~aqa^v*=Ms73wjWscQi<_)@x+S8wDKE|f;)%YLEr2lh8 zn!>d||L2P|9Xr3A?2UcueU71puQL87$K=J|tzC3%Dg-O4ev8|y>?W9aSMU0aDJ*;E zL3thT7V9?I*VM0OF*l&{H%51U2b+^C{d+wKXC2{!uvhVKBiqkWUu-bc#H(_#qoMiw z7nRw?=>}#OPAB!OKGMJY5Z<|zjP8#b{tr$4PLHdwl`PRsbqs+N&6-AA6BtNB-lc7Z zMPnNNITBS=Z|uL`3ktu@#_Yvxw?x$i*tE?NS7HZN+I_>SjMuTfw?K1_oLpk7a7Dt( z0jzUd;yG5s)h2Qhhv2rGYzncQUrSi_EeceT0BPXttYjY0{5w|Z?R3k_QwUmPy#MJl zbLH^6s|Z98zqP0{s;P*c_)Hq$t@Sa;$%kn`!b6a;MC>>eULjFBnK&edRXJ;`>xv+IJKYJfI~_JkUJaJ|OK|umHL`>Pq8Q`s)Wi+#>+gQvCa?yiUU~ zMe-oV{SWo`!**v34aAu{kvZlMVzU6PcncDOA>KOVD7vMOWCl;|_p%pdMUewPTumN+ z1#qkAUpPSWH0 z#*9>?vx-|k%k44tj+sh7 zC~I6eSzVz{W^#dh>J6MRbjZ(>AK^^2WViqx_m^UoN-^{a%TNk#OW>M@xzdNt!j@yT z1&VkWRF5gkT?iVfA_26Ey~gp2#(-Nxgzc(#BRR2!!i0lyNBQg2J0AgzD-BTN4*Azy zu+B0@&iKjN_zDZZVffV5fhpp}z*_t9`4(-+s5(jdT&8~PjA0=ib)t;tvkNDy<_0cm z_z(Fmx~1OWz`J%-;t;$+bMxy*G_@03X@e4*Dlb>@tl*Zq2Sfg{Y;I{Q$lS{!?#}Y_ zN|QVv@;y4AYauyzH-9GTZ;!T$p()BQC)Ix3MjjDeaQB@$zxouj6Z7T1smU!QhqzHR~=S5GM5cs#34SIUu0o z)WJjd^HM3B)_B6UqHRS{o-?~%F>m0Tx=+sds<36-jC)mo^7X+@(d?tNEG}x54dI0$ zvi8f>8(B&qXG2L8(B-i9y#)`W~9>-Pv~zawD<(x_bSF}`}>EHZ*FYp&UXTEXj* ze^Wj#sF<{LzV?g7+b^qbN&gwZ^M7l{eV>~;55lFmSubHIN?su+PDH-VATjbMz4*PZ zO4C1jHGaLv9o9_3%pb+Hh@V0>yL3K?mVrHr3u_>RB|rp7vv~e)4)clbtj~7ff>{T| zX4M{;^a8Crbi|2f%jJF&t@<1d0j*Z-RoUuZ&iSX~sTIvf1j!9+hzrQ&hn%tVY@BPs$X0ix<$+6e_oaV~4=Cce zj|g1PVL=0ukvwfL-u&|oJb3mtF}$HWn0!oRg(FT*gW6yX2%TXtzCat`e8&t8pZiZb z;kmw%Gg4Cwhw-jtw*@k!$89E`*L6n0D4xG_i*86^*mB)>FSamOxYGo?-Y(*H$Yxa< zKbaHvE&aYcZ-d8-ds=+7L6|-IF-{oG7z9iKy)N5GUQmA1x?xvhUGQSIwPffepVDR( zO`033&^4AdFT|T6RQbPMjKiE1bLkGQ5FR zr^5<1P4dW7SmP1-&SXo~!l3SAA}LzKfm>58dHk^HWA!MvZ+5pvRlnuOYv6ll4-nB|vD47(^q; zz(&dvNLGT;{|)$SfeTT!L-bwFWJj}U<3u*^TqZRwX;UKM@BRpbjYfB7s^+;Q3Q<~5 z_`qIhtUk8POe~*r8?L{1H8?e=HlC{vwM&f%X+6!zl165MM3aGSE%q4N1JT&6_5$*SJ9K(|Y>O?3&j0Y?wUWa!Jua4FVTvMv_uK0tVF5vrjDO73tf z+e3$Jg>qJ}Ns(syww%wGjFvQCB59F zRA!Pr)YPU<_z~8e`%r*s*_Qlj>6y)r?gYbjuT;WPgFT~j2@(y%A{V6A>ohNXkJUGA zokoEqv6B!%Zd9h@Ee8?XP?6ss`=R^T3I7!Jg#W61I^|dF)?b=4xsIB>67Zt|{E3_{ zd0`V+P%;n5F|o<&uCd778c}cViSglXpJ1>q|E1hH+NT$d%mt?!#CFP|g7cKg<#e38 zp-AC{ITH-esD6}kmk52hb0EUYOITMHp2u=iwcr<^29B|l22!F&SNHo;Ayd4BTfwPM z^|l*oe#Nw@do$gJ=)k>^r6BE338&b! zf$fd^%FL1EO5<^|JIphpZg~p3#LFUaxwMyIc1itJ+oW<$#6~*b@_tnuKZ+XIIQ%#I zrFbFin}Jw?+0WvG@U{>oL^kvzsWi{sP=}-B9Beduqh?_%;V!P%{h`r% zQoJvy(O6j!tfBPyh-Pj*@Vp(wum1^W2nX|nn3texXa$Dz1C$WlTmBdLKM=72Lmf$C zWxU^p6*-zGK0NANOvPVt0O3L#y0r%W zz(VRf=OPEBIRaP-1+_4dJtI1$0e^HZvs=2)&l+;MA5HR$>W*J((f#Iyooe4?P5oK5 zM&t(Bto%yY#@sf+Xz4!1l7T zz_y9p#K+y=b{)u39#2XTZPXZxm#v%wCa~QLs>;jizNHE_5H3#O$Dsw#DiU0pOsS4* zM%z|+rYV$H4siX^1t;d*OrU87?Vej6D5xwo>Ya6U5qCQ~2G=@tRQGv$8Mc+g^-4FW zcx)gKadQ#aZ$+@~FT(>hnbOZ}Hz&|H=M> z)TETbo-5h@1bdwx=WzgdpOla-Fm({?$8GzJuZ*Z2#?goUM$KWai=w0wk3G4=1-S4TJ0Ulrr zta8_ug=3eXsXTQ;{l1r7h5}G`((HIt~%0eUbFEBZT_dw z91lKf8hTHyABu%ON-Cb)$skT8-1k4r9AsHC<8=(*2rtJt$tGkvt;wF+p*-&>Uv@+J zuiAs#*5XLmt#OzA#HCoZO_E&b<}Z;69{Md^`%936*2Y7O(-YGl+#>XHEH;Hw|mGhQjWwk7JrMSfK)k1$1X~rlIIasbqv?2?i{6#FPtxKsYwV)Fdr)# zIDk)rYg}cSB&pkC;*r|`)&Spw6ZyK@f7bej&v1CgXQVE9kOq*bGx-kxz-OB*j@Bc( zLX-^xD=n^z?-5~G&@y_<1d<}~wX<(N5EEB@PvvE1-A@_EiE1AKrcW}=s9Q1bK#x3A z4G(omb4e6zNM%a!$2Z5)$^pi>*#!vM7lI#Yk8({+Nj#q5bCZpcg(;_M!iCp<6*UR% zCUuH_JjND#4;-*XHm8>Pm(Z)viPlq>XdXN}0|!9Aa5&2{_PWL+k!N}sNeqL%-vF>M3)vSQ8(PEKS!r%y_hTUefPfZip; zfqX#H{F1B51KZZCfPcfwdiNcbg~9TR1N6UZKgCYu{4>zs)A?}g#WRN5_rmG-#wYW8 z2CXQMjf=5r(|MpDf$tdz-!X793u;HCa(`sKh|_&U!{B}<*Wca5Q#YuCF^g$?V$Sq> zfY)MI$n5H>#YXn&4osk(>g>!5Y~1Bu!6w+-F3yjo{rh|!aZUy&{`wpKV@@&bxy842 zCElag`o)RtJ0VPYI`Nqy_Oj&K5N!o5kPVe+-Yy~RBf{cidzN$Q{gfT#VB@O|j$f%M z`O8(8d?4p;{_<9?Q-#t(`EF9Q4S)s4UK}Fp!>-dt+|}KYXYs7mb;1iUE05^89^Yg6 zjswZxXb;{0qBE)cUfG0K42GXmAAo>#N%W}(4(Vn?$hHjkg!pId&o%v3{Uejr_uOW6 zm-yqj_riy)wv*##wv=+M=re(%e#*6PMB$Bt(fdxoNqK%ead9DnOWCxg{{8qaHQp9D zU;9t-y@CcuSiT35?I0VI$6XKSG>q-kE`W6dl@@VnPUVE+qYRoW_AaM^k7AOfJH()5 zJ2ngGHF*6WwHlC?b8oC{u!;oI_P9vRK$PPEFVjSb;`BD@MhxHfrXvEZT}IgL(|?1y zMk4<7HE+}Cgv~r9YCirjwo(4eiiMc#6pZ^j(ID`%jv6Ae%Ql(Q{jCENxxQh7ZJ~TW z(5d8uBF{E#VU=eL-$j(d8n~t7%J{9cCpl!???yCX`uySL@Iln9 znK56vEr60^pzkbW;_u4nH6Bj$_86ix6syElMa+J;}P^D>N7 zB*olP5j4SszahrtA0{x&T-7S$OyXOG&^>6@%V9gaPr@7V;7udjK7X&mr&2@K_Q&LA+k z?XOPv`Q^IFAxv{5^ooyEm{@~1U%BozQ)Rvm5B;B%i%$qYW(Y9sK z0k;HS8yD$>!SHCp|2gma^v5oTO&CKPH_7y@nt6!xAq=OHkBEnzR?NjA-J{6TAO@jM z)PBR}`qU$`3nsj+_km7y=1mAhxkqug$QmBj z;_%#$FAi_$0Z^>#>^9_pxWh7O@W5mu{-ss~fPgLg;G$n!r76gS?aXKz$zGiu<-GDh zk-vVw0LSPRed`pnjqM2NS*$a90yVHpdkBo3xHhT6#o!yBL}L!^Tcopg(tpkPn&)`h zDieD_!F&Hm+6gw@UmPPa5AMX7piS$w)lqd7+IGx9xGW@LrxyRqb5b{U!Y+od1#fjT z6P_-z0^p&CQR457({Pp#@3HcnIq5#)=2%(hAY0_qpzzV{7#_gR7Q6CW;lIBVevwlr!j}W@sL9HQ+Mr{kD1b(S`!0lS9 zmvRld=hgdO>c!UF@V_Boda|mZr2QDb;&F-p(WKLwCf1$UmpLyZvxTY4$1MTprqW4R z6Ng6TQBP=OqYWyGrPc9~WpV$^Q5i{#LFS?NS!SB;GK=A5aFV0(YF}~Oh@}X6K(ki% zgyEJHBKm4r=4#jg-zeLus2cqI3Z4x9$si6dcH9Zk6pqr6B=@9&L&103+=7rU}K(@yn{t`&B2#h^Uj8@&A^c>_d!-Hun zKubdAc85cVOV%Q0Yh&6#s%O`2us0h}a~;fd#1-T(_qp%9quyQ*?0;^s*T^8(x<{Yr!H!D`G^!iWo_Lh6muiLI7gs<#-%(&{xk21CN$BVO0|=3 zj^EL$Q$Nox%&GApxiKI-L1ulphgm*11Z0J4Dt6rKWB`0H(-#ZBaZE^2Y{v++`#c{S zw?-CJ7BQQ27f&Nc#}!R1{BPK;NJGk9Pk?O|T_7t4T{%Y zVqh?)0Nnr|4%f`GsX4F7ReE-hr;&o`RR2Rn7-knrkXu1ZbgK=uW!_j6W1hgRkNgiy ztk5|0o=Sa;3-M4+eVJqVdHA^faZaIqz};35=qbf+o=vnD_s|}P7y<|O&PSJI>#GHA z17^bFf$IU1jh|31FL;ZMc zz(>NjYJ2=9l04K})SvO{0*>pon@xE9G1cO^-QADqU#$dtd};*yoUIK9l|*LyOWg_R zfZ6)u-hRq)Pu1iPgz=DMW86U6E8tu6(veLGg~omQi^fcs4a2mnKK~N%@7+K47SaAW zhY#R&lCc9^f2ucrJLOR0W-<_;-w#w7lGfFd)WV&*7S=kfyF1hARrP{_2(_WOL+qT1 zES2tbN05}ciDwn$_xa&G>4gwSbF!GW0H~P_c{f=RN&p@0#JnWeg(7eUAKzKsithl! zYU;PB!lJ39Hsa;PTgF9+w}6w=?V2{=Un85Ma3D9<9Q{qPXosl;8rYne*$ifC+KxqP z7LR;79Svh;<9jeRgN`~Fw5fEs;peCU^)I|k7j@c-(*9uXXSFN3kJ&UPzIL3 z##}H!uTE}xr@GMbHr^W`T%oHb8~0R`jMl|&^C!ekf$5+`T>CV!Dy|@f&~P534@@|S z*HoUF1ZNn7PEi_l=R|6wRxZ%(LjsVCvGg#qs5xo3GKSJo^NN=8jPNa2KfLL*pD%j?v`M{iS+X@?wM6 zF419fY>BLKYQP;eh}aR6DK03wrdMn?<_mT=fSbhlJ1tB#}A&;(t7qxCVjBf z%2Q_+-YZ*^dc;VTdPZIU4)B<+|E3!7Xc)kpN2HOj|CqtG5xZiOX{)t^)|*nXv)K-a zb0o^--sV;HJ{e)S@X3OIiarZXI_ff&u8(ha?mj$E>V(>+QlyhhOvRfy@pwPU@8s+y z9(K{H*AfhWPr@PCgW#HC=}>I3ZH}FWL7<^;J6t#3#5@OU%N#VKZ+Xh?gAqQir#z!nM0O zxUCsa_@^Pn-bE*dt@y?T->y2BM*HfZ%00^Bd0F~Zaz30SA1XMmM>31E2%7n1VO$|N z!!jTMTQMo`{ZWfwJo^G64gmQZ{(-?|O|WbhGME;G@O3^fu=Y(lKj)Y78u=Gdrc!;T zyQ{Bc0vz%tLWx5JQ*+@ytc^4zf1nw;-YK(FHpJplAo4{6=`BdZOEQ=IbzPmB$KX3Ke@vWDT=Wv9gqHsf)bA z?No${*kTDxvR2yQK5u!RKlF<7!Ll~8;TiU8%l<-&mn-%dYY-67u$>9Uhe3cmWJB1c z-Vj_21T+Ih)3jKX;lP+C>NjTa;b)vhl%&a=$@)(A_#e4#SAOM`c57$V15!_Is~V0L zpo3FaZr6g=(spveqRB1&&yz*lxeBSE5#W zcZxBBXR^AJvQ^PjH4f{PUY;TvnAaSOs$7Hnf}FWJGw_rE2k}m$s=Pl24tg))HVy&%w3Ak*h{{}ss7ngk31fe`N>$vZ4z|X3>y;!f< z#5P)ILd)nLJH6*C-l9sol6yqzcKoHrXsRb7*8P$WmfSo2 z`E)gG1tzIGb+=$gu9dcxdfWJ(lf(z&wS$;W-$pgdF1Am2EG~2FrS+4#iv;1%yR0B2JqeqKd-`HW)ZgmQzp6eB}N5lO6j%O zN{e{~|40ij_H-+=uD+$aXYl~;OVB-PPjW}qSjc98mu*$Ps1r>&QH6oQcUgq>Wctwt ztdC|kTCy1nkvBA1^~P$`H?jt+nnkAvAciLbpzcXG#hY0}w_rA?g3#p1XKHOo$tZFJ z?4xkfZv3M*I+V+{inhO<(|GaBWRR&zx5sfcGIiu3c@byK>&?|tBmX_=bV~Gm!4){_ zn0Eh^842bZU*p{SZb>glR@91e#|yJpKBmKBoE2GPPdwk=JMFKOj~>C)X2xpE(y)P`jg|{sHeUzEAQ!{~6{n3ZN0xUU1d6=Q`$? zJo>U6oH%37!LX;oz<7XnpEyY@&bS(&o88DetZr9D7uSCyr;RRxui~y%?VE5_d%|oj z5yrn1%c<-|9ig?(ibwn%N-~86S0#g7MW07kRO|L~nuG$JsoA+Q&0$Dg@D_)9BZ(KZ zH(&ZY@poRiW&QoYFK~z=VD49iXL-tHM?)f;D`iKET+}{Kh`-g%1AhP&{<16>;OF@m z+rM<{@cH$6Q+-eh#U*hpzJ_+Sr`#l`?W4U?4fa~nKl(1)95tDBhez_!LB-0$Wa{vs z#D&SBht{I;6trE1uB5zG$ZM39UoL1CEV6Sa|E{-l`F!=AV(vfVO2iiO0&@F0%SU{=@V5Iv?oUZmL<9!N76FWMA`S*+;;Y-zKAFbH&U%l{|vMfd_Em5^x^{=WVR%(D-?czlrlO)S$&f`rO z*5#g$0d|cD`}sv1jn4}wtND4hsYmR7XGK^obLQr{7ZYkCRpP48nQp{Cc(K??!yO^W z8nTe~#PPOU>fQYb5X(t?&{l=5Bw@8xl!A{A$p*C#AJ^|ut(U-@_J`!2?=ZAmehn90 z&wzBsHuo#i7!D`y-O!CPaP zt@_N;Z)Gwc*kpuU2vrl0VCMAigZ=526Cm-(&mv>U0q3hM9_OE25fR+Ik@&ZIa} zbRRkp17{o5VdF>Ub+}`i0LJL6wBhGBa?!ZAsg1%kJ{kW^k8_jX6|4=hK_9QN6B@H6 zKFC0sKC1!ozwX2T^Gucf4+XbgaZ_q>`eHCcD8X9KAN`3KxtXji$d5 z8Ca&8AW!5^ui@h61h01O1CSeDY6?RwG-532-Xr6J*G*s=7I6?$x!Zk`=3HZEX6KtDnAPbt~DCtPimXPwgAJ-Z$pXsq@QLF2UN^YA2qzi5saj)rh#M#J=U~YmhL4(&k7C<$Vg(IefNnoImw# z=uzfKE9Uy_um5NPc&{nimAPnTY9!P`ZzWE>n_(Xx%2FRG|Lqf@-Z zrY=8Q$ifxLwG{5@ZW~hx@Sq zjFf;L04?AbFeyo9M)8}-Kj$H)Hw=K*06GpRy zH&F?Bf>m@}FM%b3a?=ug4AQ_bvwKGc)hPXETGn{9)DW5rys5e&8#10L2wL$2(vTU| z4oxXg?wypv|H1AMQb^Tgm#@s#ZYJKV1jpOG2Rn#p&aCI=YY3%Z;jhAD^UY*>8PV7* zJe;lfV6DdHNKXaXw6@Aa&pTrL2LX~vOETunOsQ>d>;6 z#UwWS>}Z05DtB1Ssz4%teF~xz@A}K=>d}NT^hhP_MgZG5{4m_TDmKn>pOfU9*?1?N zaZV8}c{Q07;T%{-$xbyw_y0MQK`WH&K#DO>0dxA77V&%9!`rJfmznxktZrG`-3c0&pMJUBZFk2!*8f{^^$B=9_V7E)bBnP^;@(oU*F^oXnB_$&c&c{&rDyWoVJ9d zi~RAGU3v9sthD-j1)=i1fO+o@PKz$}k5gsom{XJm?vIfhac#g*H|w0`P@tWdph({f zXKPF1#)7I^@X_f`feKoPqOSaAD-fIWaDY>N&K#cwxRu~$vrx000!aAF4oIo`@yAW(9K~MQE#j<{>(`4Z%6 z0!FX)89eUqx-gAHL4Ce1Dw>EM`43mOME^qd$5ahU;4SNCncErM2?VG6E>SU%hRHRe zZat*n`tY1%Y~LRsy~8}HTk0=rcVr z*3?ewUbYfM^r%H6K;uw3iJmyrc5yr}LOn+!hDC_svUqNOslrT+`!eyDFG`eB!+)GH z005m147a2cj;g7X8wG&Qa8Vy{>9}IGF`=-(U89}ae5=^C{(QN=+D_8;| z*nOIv=BlnuWL_T}dnq!<*qgf8M>fDnZ!rkHgO|3yt2aJtTX>J~Vn*Y(2qCpj0;t4Y zbF&%<4e{m>$t;At0?!8htNP8k%~ersM!uy5DYp~IR*exoYWW6#aG+^E&K}6HxTF9qJ{*ssnSbDcrCwQLWUut2NXn3}^IzrX599W8mHE-MK8BNGHRFbp-6iVv7U3R5Z|_^@Cn^g8h|jNYLJb?s z8n3qMOZ!F8b678FG&bZ0oK*EIcImHi&fy+r&=a8F8LyzJL;7_b;;dP%Vbko$e0phr zV%GHef$Ut0`VR#$i5)X}jyS%^{JU z;6Oy(zlt1BH1KW)d{^S=`K0-Yye|QwslUaxDN^mt(bPwv!*l!rTt#01su|bn0q$xX z0sfR%;tfiOiJ@Rzf&EvvWrLH}{QNc565Sxq5YkSiQZ*?SuF_-aaK>>-^OU|&y^4I9 z%AT-(s&^czOhc*;>;CRe-3VJ4TE(fZxgMsE;|pmqhWt{4(L3vlB#(oOQWIJu6g!K_<6-FLM zF7@X_ILZ?^fHX*uGB~3Z0!W8+d)CDcIM5} zj!3tHj-Q4HCPYw8@o@-xLDZ=^a7>fZIZ7tVE%RXug9e7Q=|L>-o3Npam9uEn}$v zq`nlajETNB*6hqtju*X*{z=$k1jG>QL%ZQTt!qfXqA+;WlZEuPeKQiu-O*^crTbp( za|HFsPXC(Cdbmqsc#a;4yP1o0|H)TBvM~{)sd4dpKu$|ay8EQof#E# zT4KkutUdwy?;Y*w230|kflAqR5O~dF)tH0ZX}1{acOaHq?*RTIKbBgiHv#VEf1NYO z#(y`=ZBOJ%?~eS_4CfX8iChwd&&Mo)^OtIm1leH3ZN;ZcB04d+q-zkEjOVYe(?e?F zeF+gUFZ0J<+u8(KStWd=we3%LKFugu>c+`qb}oIQgz&&u^cC2c%G5t;4m+hqAc67Q zBz#N&<^ME&Fg4pkLT^tYv*7#Ml<;M?M{3F2P~*1m!at>QWt#{Y)O-&YZt90`KB|Hg zR1g%qIzf6zvNG4GJEA(zerOwdK=y_WjSJ^@Djq=Xh0aolhjlWn13fjGDebq-#qXgZ zGQb1`={&mtWueX3jj8Gd0Lh$WH0lT939~V7qbO7S$cKGb{+7n4y~ixiE;N|HfP8|dkAGEi zbLyM{Ba%1sbKE7*UrCP`%mfz)PUbo%emdHp+^`(JLSoU!mle%WKDkW=nE~gt#5-j- z%Mf4j_V^C8h4g&VGYPJBVM7rY-lF82wE_K$A}lWhT=3+yf&l$6aRFul_8`G{z$j4` zK~T~!l*V#f(k*kv{lcbrZR*;A#m1(@TlC$SkSt;APQTE@VYK_ogkU|Tz+#*-wIgz= zJM6i>1*U5f^%>vP*1M$%8&Q{XM%=oI%xx??`N8{S^_4@&Lm`Ly>-dK!-4WccW$GLG zJ5=Y(4kIt%v(!TwEe64Qn7sL+rJ+T8UFAY#;=YiWedF+3W>E(oXJr5I^!uLrv#U9>_KGZ5fs8XG%^fe=1C&&l;9@!X zU$QB=v1gd-mmNz*FHTGHHaW%ZLp$Q2O%Nq}60`)ratA_HuC#Zz&3r%5FLKQ9vgQs*uNOD$Dr&8p5ej9m*Ru4`z&Sp=UMipGPUj9f9ape&E}(r1mfDMC65)_1c z**1qmEQtOqWGG^%h;Sm|P{D4|GA}coD{rDnW{&p@cpo&tZChC3!E7WdA62P*A2G4r z=EHDd`0c7$PhuKBJ|TECh=PHxxTKnRH^5R{?JcY!QXPZJ}$GwA4o^`cFh32|I+g3fEE_-)3w?13-8TEVWu;3vXNmub4(UtC2>2rju>HfL4K zauz7giaQ6V6`6MB`M7`K>Ka+Ur-+^mhF#U8@z#?eLEgNuq7jqz4!cJFJ(yqzzblt2 z_2rcCOI7PxvVAeXM7IrBKrcf-q^Q^UC%#tob}|30Ao`&ySTn<7EafrsoqFTUEq45P z!tloYFVGWJDfkb70Drt0X%-p=xI}oD`scp+Hg=IPr9ged(18zrSeLE)=b8$t_=<+% zxNfapsUQojHUXa!&VN)be9vi4eUpzlJ0!F#3UG>@0yC-ILmHPBXcIA+?06WFT;FU6 zt=fZJ2x_VkY=j$qB}vmMflf+Cb#Z;jfSVD`HZ$C&JTgrP+Y+Zy(Ws_ukw?Zqo@@=HD?++6=*odLKrzMFMN-a&nnJjgQIGwKswIM=X(6jmN(V zeuwYm0650|_|x5H{HcnRj;Ktc;GFG#MGqEJr~NXDFR=7H0KSjxf6fMCp{mc@3}@7M z>SU*IB`X{ayWbm?Z_~eUbYbM|Qe*`BzsYsIa{14FiN6}@ntIG<-b8$>PVo@A_~xxh zUn{$xg5?bQ-f4YPc<0p-fXeMA=(ot0lmZ)1w82xhND;_?i#?EE;nHYq1mNw3%|-}t zv)U-4|0|=d*L=r1-v=4*e(p>relW)au*hSEHLhM4SAjOc7x!Q^y9<0D*$f5F#)f=X z?jn%zCra{3jrZdu^mi(sH)P-`zKd4>nme7`q~#m}V93gXkYb%ScOvhD)f>#Gm91xQf~1^8WgIi#hw#If8@&{>GP%bIk6ecI#wNk^}z`lyFrd92Dl` zXLkaVT?OEh#j16yfAhzUg^@o zgE_MGkq`tM$pM&DW@cN&0Ud7}`;cKHWArrPUTv=EQJq|v(C}S&vj;hMy21Zle7h2Q zW@3f=0NYwe;au}1TU*?y7>UmqZZTZYKGlD`a<{;DE#XWZsu9W!lR#IUwmZvFsv+^# z02E`{*qUPK*~BZ__65p_H;yf|f4{K9jQbek|JF=;J}OoP2c0Nn!Qz$EBR7yu2%Z={ z1{y}Yko zE#N=P=C`mMX@7~ha2M3XW`Dt1lMEX2SefXIWYig}&!rb!i+SUBtu$kc;!gA7ssOeR zY4jVUO7wl3!{&J=ziXa*n_s?E)4G0T~Qyees-I!UKZh{?-ol%>u^<%moD% zI5VV}7w@@GC3TtXs)k|#H}A2gKY->xu+=A~nf~WIZF!^WFYYwjdQa=RCknE7kdAw! z4BU!csM-xhE1O>hMlT+4%?6t&Kv%;j@5Et;*XTRtpTMNzP4H{(Cy+TwDY4^@08&KE zZ|(&Dm{h}txUrGD`Gk1Mi$RaIrmp-QLx=w$-XC6g~9nNBs(HE)vEIN*7u6> zBg_L<8-hQR2Bslx-OT|;Q4z~}=o;Apc&2|c!F<*mzs+L_+{d;KkoZhLXxx~z1*1@g zRC2_F2o;JoJCL6765t*aGX%oi8MCKO~D20gwG=hshYTOQ3BquA`T5+V?hzdOwlpVM)MzeUNi?idCLDXvFybYT9fM z(y%95s!_GR(H{qZKAEe^nMW$Tjb9=T8Qx@ANCsg)Km+_WJbILDUa|q=wztfO^y)UV7Q_uc2C5kJS+P&BZd%VP`G+x6RkJF%z z>eZ=i%e#e-f0iETq`q#0*mF!;m|>484_OslNpO@cZ+M}tZqIR`8RPJ$WJc_SzB_fl zsR-fjn5?6^CD09jst|t98WZ`_F|{MoCm1SmC}izN)jyoscpP-L%v}42OtW#mciyWL z4|n#emmc;isXsH@>DNq!!R}y#eo+pW-M*fT$|H4Ot1=vfMEWADaNLu!eHq8{w|Ovd zyA#nROAI5Nx#kT&gwF}kovO-Ei`UYK-G-JAz4A}2!7{ASTXm7=HMgiDszxu-2}goPGG(&vidVOR<N&li zzJG309>3r))8&sC?e%GIVOpcuy4{PPUhQ*^>DljF7pnS}(+upND-4_r`HD!@jQZ?C zJ)5Qs^l+_BH-y0CehP4#eep41NVp;wtSp#a2V4QBNy_Dap`1q-O4#F2&LR&C8uNn0C2;o?zO_puy2{)Iu=sk;ttaK)_jzprP%@?`Cj z#s+=TAEOF5OP@*KWMIIPmgQ6?#?x^rupT^DJeh2MI9vM99z^tWEVefJ~tm+)@-O!K?a={!4t2s%|U78^}5_yr`trt)q;m+!Ro7QuXr*V z%U>GPD|a9c|2G%^rC;84w$tEY4L{kN1hrUCZBK0P{HAa8>^^iWdiLxMM=P7(sAF0$O7|Mxn>;&$ndoOiv*AlKIroWU zqq(9mX+iBN(dCO$Nbn8+;XcD`g?XnzoX9*kn{ZPW-al+l_I!rD#6Ml_QwF1O3~4Cg zdMX#WOH;6WS7pG2zHS#|5#j6{x6;ZI^#74`?r|;m|Nlpbi<)!jFoZ5rIi*%>lT(+g zix8sIkVMgRUOSeML_-L*hOSHKFqL##TdAz$RywTXw%TgdsvX<;@Z0D6`)mL1cH4We z_w)69J|FjcyJB|iZ)N1QzVzG{q%=9v+;APV#KQ{Bk^Uw{3DItA2QTZ_mMa_Vx7(fP zIz0Dn1Ta?E+{Kmo2>1y-PldnT8J}gTgk>PQcDdpjDb!t`p^G8_lx*&Q1|wgr#9OS7 zdZid8w)nYMykP3B=2Ho%V$Nx6gggx#-@p1%+bNx38a!?OMTKtpjOVnWV~3GrWBN*%lTV!hz|vE&-{DoKd5b2Jp^|}2e~0MC znRX_2-P&SDO&PUR+$8Jb(?v-TZdg-7OrG6>4CUDYYS~f?+LGZsw_U~5o4EHNzt_^B zb5UiAd$2?x`XiKEuCpMmtKCGxe56<@ntWeLFQ>bo$XJ-G)m#d#)zCW>&NQQ59 zAGz4V_GwAMH9j*Uzofbke3d)0`c*W*VFzTX>drIXVvdX~Aw9Xm{`t8d&!G2wf`KUu+pe#Wn zdzt=5H_G2ZL=qn@<#&TK59IlH1PVeijrA^3%uyW2*M(W# z;(H4UQxSY%tR+tw$` zL}7+Wyz(-|1X!0MIA@2-=U7OLb&aPm8sk(LSe&gg_hZ>BPmU2nvk zb?~lDFWago{DVtys_KqJO6aM{v5>qMtk`0457y^xRM~M?SjK}5W-&%dx2FA&VllR* z7|6U#kxlYooZK<&f{ZfXtQ80rII5%1q29d05-l71=mJc+H?8duU0h0Rg%9hyHs44f;zz(P&X@ zql>ChE}wo}_>_I|m-R47n;~x=&KEIVZsRpl%(fb;BFJ5a~U@XOna)i!dmS;cD^&;XE-m=kNVwor9J{^E=`Q}{-LPCeEsj@q778LF=%C0iG>>gFL-L=HHM^Y1%AhJfFwvy$u*pg-e4 zPTdazM4DrcO>?iLWQY09HLrB?wkoQ<;FuHY?y`#UgTH8ucBPM@5LbJlEiQL;3sxZP*^$YKGLo(Ov}`00EPe! z0P_>#afvTw1vWo);Y-nkyz-aOJEfD3QmC`O3HSn%HXrzgEw@`5?8QSr{d=UAQPINu zO^4nsFHDWYf$oNl_<2G1`^5(_*N1x%1F1h{J<59?|J@*c8EBfrL!LO4g$m9ad;nD|-1R9NZcW`$PU%2e*4Kf&IYdg-}*{&ldjzA!Z= z-3C#6w&;f+@5-dz-D5^{a5u+V^qrzht+ME(UB+FdtgKY#Uha=>y!RS9;bQ1@m*lax zDo+Hn(p0rkyq6bJ^2-(}A`^v`?RfzbAG95MG&GU&Lw4LfEahV0*O{!uRYXLTQs=zB z;u{Xz04&j;HR{o(@GAOw^6-na^sOv-bYB_M&wn;4_DS$_Hhl&50%hoc_HTF9?e6BZ z)IV{iacpFIWEFj*(c{GR?fa8iQ=vRNQI$8B{4U4>8HqB{5AwP>xt{bR8F8$;lACw& zDhhEmm;?D2I>bhWjm;>=Kg4u;3lzyFMh>!CRDb(Amt=%+lgff6s!Do~B4+N9sV+nv zN(@izxLhHH1bL9rq?UA0Y3~|C8(Cg7Q=wRhSXmFh9kUa9>oRR-2ca8P%@6DDPa&@iAueirlBa74-1>&7t_735bA`LGQM;n@W4SyBUGvx?!OfTYgIi7kJ5|GdQf{k#Hu$0A_4(fSm;4%2uD?FeCt5lj0AE3J zgla5V6BZw$ z{o54=55wm8PR>5BLO{1uLa$2OjQ64eIWF9=u?5sBy(3}K)&iLWmCsyuWNBD7UzN)= zPJI%@k2b|a+;SCs6nN;Yu1h%1;=$aZdnmT2qkWW%yWq6(YRa1_Y1&una;x{6ho-us z{RO?%*uk)uq}Y;4(;#7N#8gY6@Sfhy=BFAv$+H&3Ybmpvc=Pnb;}S8Eqxe|w#u zw4XPHT3~vozbcl4-{;qHLE|OSgmX+-oPOAPoShWhpm%k~0VUOIT|CkDx&S9Z^%qgm zWY=h0eQHViD`|o1sXNw63AQzi+qg05c2Gb4=0?UpPwI=Tt=|;JkniQfHXwb6fOH4! zmJ|~NZM;uDwR*h94Tjv!{h}bwJ0m;EE>z84e+^u>Dau}OPVpE2>bL0wnf!IwF5ywY zMxeo#>yG?MG%X(TVjR^?23%0IsPOG}xz*QI*~mX?CISZ|kBqdSOkWH7C*Ma0^7CvN z`{I5e=k1wO(nin`=qTnOq5n)`MH*}o^$|6Oe7Gj2(^3CG8cO}_&#LmWC26Ckq9IOc z>|@YlBawWV34r8J<|ex9S5zTO6!$YPdvNb1lTB=yS)lUI}} zYQ&`@7}H$xb60Lp9{U@30Yxz>}uGp=}FeIU2yDU0Ler}w-hHJCHags*Ojk#5u02oiW6&6O18<}B{t z>G$rk>}+)%d;nI7M=M`(v+IFv2)f?arwqxi55DB+3XcTfU5?pafr5hcFS1YVZ z3#?B-Q*!~h)TTV5r4^-PesPJ{bHDX9^FPED@!GG{qinWukO8*S97Xp{4Cd3-OZw=^ zijprROY(ha0@{JU08KulI?TBAA=9 zb5SBZ*6%4LI%C4j<^%uX>C5KZgnJaCFlV^dlhg|Sa+O#Zw}H(aic~}IHSKjrFrPa{}?_6h3@}X ztKok{$kk81=4e_QX$iYz^xL=j`Tre8@t!IeVSj0_`8wP&Lebo=#n) zN0qzWAZobYc$zpsi>PMeQ`mFII;w1wo>;nLmU5BRixxxv8wB0 zgl<*bUxBn?O}i%Gcy6iwm4ZmenpCtTbNwF1fguVSuv9+;4WM&3HI3uWOS;X)3E3NU zOhkueWrNciTGeD{^vDm(m2H#-E)zp=RWw@btbd8^$Unn=r{XX>^f{+kW#4%frL|MC z$@Tqw8{VuZbORU_q)CQ#vFu;5HQK{o8yK1l6IF~XW|HvQFXzB~Tnjgex?cNf^m4jI z^#PsDso;b4(rU0FOf;-|lypFGC^7s*VL*oFZM%XDhHFQpkh`^Bcgsh(|K`v#*M)IZ z8|NC1d(8zK!-hf(>UzOEC@x2Kc9b;K%l`zF_*LZ&42v9og@}x6%?Z968dV<+zRosk zuxR1?=l5U61Eq%$&jW^Wzi&laG|zXlSY<%MUkt)0ZN%7kt{4r{ze0bL=3*tLGCz=4R7X!IPTe$=QXsOtUE z7US%sV?W21&<1dpwT_9dcfzV%rFD$yxkhJ?(qNF!Tuc57*m8s`t1oWt=NNCOld%u# z6|`73-E)?*S938PY742;U!#pzaD(8>I2@W|yVUL={?pt7#kZMH^6o9v^=SO;dtocH z@|?;;=tMcCf+#kM_OmWKVcyA#+rg=QZ)6jd;zh&kG-H~-Q&-lO$N^Lr1QIj975 z(6Lv5+~GXoo8z?~(5Yyuw-x5~x6{Clg zAA;5?)ntcl$Z+bmNfz!;pRJmUTwIc^xU4b1A69)4e)F%nqoM@W+i_$`fh{{3T6bsg zGdfE#G`fQSOL?!>#F!pgFXed9Q+0#k8Ubph`ab2oxa@}RU4lD^TE?#&!I)d$$G*W4 z4ii4qG0=azyCp#$k{xnz%=tqw@uj{MIZC>MV>sv-<~dHS21!~$7B3ml?XKrYwnOY$ z1<##PMcpfaOVW4S+{3U==>h>->5!N{`QVx5XWJL!&TLPhyfcB!Kz|`(kupd38!$tT zB0^%On|Y%>9+DY`dh5_C93Usqxjheh%NuW_E!A= z|IkzW9RF0MfU73DgK*El!sgZefX#_pqM0pfx;RN$Kzzn7CtF&Y{mF+XawfNq!v**v z+z+z%Fuky;O*m=l{)n3-d`(;s?H*Se-hfn!-j+{~_;I2h41m^n44&=KHk-w|mrJe+BHqBH_!R;nF3f z&ow7)beo~yb7r73CQ|1>(=dWZ^j(*Yf%FFXu3YweDfur>J9-v$JE~ z8Gj_f*BJXBFn-7q;v`TrWjX3w!2mPNVFz}rRy+q#3NfxB*9*p3+hH7=SIT{DEh34YFB%;mL;OAEce!rj)%d z|Gi(wpR8Zs8rAoYp`pr4niEqG9?gDuBe?CBhX|E$UQ4bgEjQMM2(e~;Yn<`V?e$Vu zSbMRcJaB2wl#ujgD6U|p4w4G(R!-1{sXAtJb_3|WdFgD7pLa27R`BJ;TC^jEb)xVR z-4Z#I7wef5WfH z2(B}ZwkyQ%LB>9u(TAsnI2evk*6;H5cMu;CZBn*WIl59?!b*$Aj+`Qj+52!Op+~o6 zEdtOo7r1av6V`LSXDR8KWp|CHP|J&0pOIx6sVgM(G^?Pu00a{JgqtG|CQL=^hw(Wp zx<4HK$iBxgWjupj4F0O6d?BuKhiHP^7%8xA#{7}iR~|HHCIIz&ai2=nPtM!im>yij zj2x&4O9(Ho!)5e!(<31+vQ?DCa>zKU2jdBk`$R9&MTy;aQk}T%uSaUj^oxGsx@&jB zuW46jbXOIMZDpvTZQ~kBMOQE&(1h3iu`<7q0HnSwu#;Uh!kKvH^Y}ZxH2Pjd) zxS~B}p0oyH;D{DZhJ3aZYT}b*9y3(cb>{OJy`R!lR$8|j`^?^yJ zac!jEk{l^9Df+C+$j~a^iF#C9`uXwV-3meAdmp721$KeogXXl{V;t;__~7W@L0yXw z>0n1hA52J@)Fa3)fAR`-Z;V9W@PgFrl|HI+Dvv(~3rUNF4P3$(k-Uoeer z%fet6^`BDesoY<6U2z#pPq52R9``w9dl&MjMppE_ZtCnzLs!ak_)X-vhEMGb|Kie! z%zZhW!iw5A>zz=ZK=>UVWNNOjh0mGs&E8{np*t@PIAkNu8d|X}BpHHZ4vl*zIU5Q3 zp5vlFI%A;Qzmnb{_9rOcz>njW1s-aanwf;nX+C(B5BiVvl$AS8cvrIT@WZUNaxip_ z>oZz9H6_c}1i@oE;(S6WkZsX*g$4>WqNbeHTX^Wh?%_JvA z!|EV4jte!%tUk}u5*lGY#e;<_aNG1o=<`0Aq+AOL6cE#bsVEhBf)QJ-T~_BMlbs?f zIMFs_g*v$?o>51W(d93Nsn|EHyaeKUkKc8Gm(raK5<6koG8RZCiv>mUrQ@bm9d})jTOaj+$Icfg~FC<_UF!V zDzG!WK0Rd{yY`LH3I`5CfVawb(Q5@O2O*Ja@hne_bVS7dA@SWAjsmLN%YIWfo>;67 zXv+cx>Wk{4Py>KeQx^1MP+Xs{)F%`;sJdmo(xxA;u}O8}yGIl#cJa!`+iqu(b7riNu zyZI8G>p%5iilM$X=OSvCo6lH@QcZW_mZ7KJB>3EX6y;Z{q6L+LdMW)}|27U}6t<7I zB?@b?1rH)u;p$ZNxz|-2)JF^l9>9xJmm3iT-&mLfxq^(tH1;%1KarDS=W$;MR#g4) zSm62_Zc9?kAP6zGF4vgc;jIQ0E1x}xWlrMxCkcPCeP?`#lV@j$>hqJiv)i*nKMti4 zfkSy{ytLl!;WSHiYto&6H$ARZ{|`-c;HZtT<}aFHQ?c8LPZrcOxXC;Hc1oD?Z;3;Q z&k`GUQD?}YVBGY&`ih&YztZKPF2*MJeR1XFw22Z}N^du`=tFTohT2bo?+fiF4y>-; zjh$NF^xr6h`eh>2Edq(iWWi?y#_g1A{0BBt8}UX_6%Uw5(2k9?M$sayc+uRUccWoR zd~lIs=V;5hY5*vKGcY#bHGLdJt>l_Kt?1sJ*rGC5r)#ep$qM-wee%EL?^0zOrF%4= z;&{mDQBt!$intXqzubTi;{R%9%Pnqz*)_^c5WfFWKdH)&;}ZyEMg?R&AQp*l$=E%~ zgOxdcr&wd|98ZGAf8-b^>Z|DUhOA$xjIgR*{8y-jRXwUMeT6E3gnK-BI!tPJ#ASeV z>LqQ1K_tFs%hnb2Kof?0MUZjC)fJodbXCV?0GmzyJW(M0;GPp9A(ppAq!V9`FPwa$ zdpgtt?7jzE`v8Y)fkac!K`~!NM57+lml^)$>{}cU$BBSC$kr1(P@OgVG_P=}PMAx` z#f%j7=A4=dN*f!u1Cyoe;G~yWj`B^|g38(xK#w0*OzBS?8mF>%Ct7K${1}p2=#qsA z?|dO%(1Qm}Si3QqI!K+ZwzbZpEq&7SQ`?+hgC<`2Pp$V@)_d>a5Yklee=z>ZW9D%b z>@X8`OT3{W~0tp0HPzf=x-&zf4_wSZ0Ni5!WIq-u5}Z}vW1`_JUi zR5h^PDE6c}fj6O#6pZImO2dpQ#(L0)6#3A`vRDxNX#=2e6E0GF2&*c(s5zsMs1B8+ zK-}KvFyj+hGpGtK={GYNIMH9dbLH=T5`7*M4Ssm7J07G-1h5#wKq;7N1d9CNl& z-Ab($g<^I3DWLi!7BC@B(dNHSC>cM_Mnx9!B{4$^k(R(C0=8-^fF(X2cRV_Q#oHZhY%r37A zvlw`f`^Aot|`Ny%@QDj8pBc1oFD!ykK`*#lU|13ja?!!ss|UCt(HT ziN>t(| z`nNyDQQs@u%;{aNqAQ!ootcBs1gZ$-_ow-g3YF!4ut=GH{NCYaO~25Fj%bEWw2c_j zRCDmRw38-nQIBACxmPJa(d)H^vq<*|3?~NW_U40bp-nl$WE$WGUoXGKYn`jNQ&##@ zzGlTZ5)xX)^7c%%@FDj{Dc{y&*arO*N4jAswL0Xl`q9kzl|c7q4#c4TlD#fYNungH zY9NdF{o<`N+#w`e6voQ1KyA5O4;+8jI6(Z0_BrM8?-Os#Qq3*r#3k#fb(OfxzdDjzRzV$u=)o0vO8xx#~?19Xb;`C8@jk{jHW4if0bbCgUSQtKmzrO8h8fC3KdV@&TRs1NBM~_|oY37F@Sn#a#A7hP>RHG$dyJD5 zQ!#AuI$!|*A-P8PnT!=ZN5ly(1me@H|4qG*b=8OW_O{LU;~vMfsvZ{i!oEU1?!g}SdI&oLPsQEmDzkcPf7P~Em12yb#BrqJ za#@a-89b1G9~F0H!sS@(3t@@ZA?%;Yws-Z>%|v%1M1e&aU{?i2RIJ=1K6Td9Uo0oZJz|m9x8wH zLtQ8bb%Eg0-gnf`Vpv~Dd6~UKp)y8!R1*Z!qX)`Z8u#gYB9j=)=$W+}H1 zr9-NYRndh@sjq|o4GinZO&;lxrkV$TMHVanb7g#$R$Ov=8hod_p>4h%)}g&yMYlWx zG|Te*V!sH;?9si#^vb8IRv;3X~Mhx|6cFM#R>vcX_Oj5cO;376n?k@Yvjen@8Yd`Q`)0_mC>FM)a zU?|E}a5>@_sCagBjd$>b`{WYQmnJ9X-1?zWQY&gW;#5xc%+@-g$);x-{o_NbUslXs z2kj~Z4>fnBDWIS6Io|0<8KAe1QT-5hQSNR1J99srRzq^ADh`ksUq5~0VjK!|*KG&; zvc8-+ryYDS*9Ovpxc^e=EE1^$Pn>Q3>=yR|1?s9GrN(## zV}HR=C0^`Q|2vJ`9}?53a_FPr?YA%GCHT8*_r!)ML`R{mwRen~m$rA{GnRmA1agwq z{JjTs0?Jj2X={}yS$-03G*Q0R(2k5h&)UZG<mEJ1YSxK?+m1HM zR%*{_H*nny&ib~@a^RB#p;ZzU@Ujof4%&4m*(9+c!$b*y>QFtT?oxZ8KoC(iAa!&>f~eaIC?m9!{_I?qymPuj2}Hk z>-6f=j@1|VJi_e+WRB86i=iJ13LBKaz+j`y##y3a6ucCK4i9OvsKOONJoIV>#(TQy zM;j?=_!jyoe(xc{Vv4QAvRXTOyj7(fX1$5qM4hoyg>~K0Zn4fn$A+uccQp^C+-dCqZgf@RI8j>U`|PBre zGrz~lqb5U5SL*?WV`@-e6XJ&=GvRTicGUZ&bTPiuvJ4_mMQj%Oc0iDD$>})@G>78< z_k91i?D?=anel7C5W)n8 zTEi2e_FFR>&9pU7x_nVe7)6kk=XfEsT}Y3NnsV(VczupRv&e4jl2Q|@$%hqDFXk7P z#TGxz!rw4(+!aHl8Gyl0bp5NA{Unum9%~GhXQfBwUI+sI8gS4tML*+~N5m!z`&}mO z5?9@U4eVR?dmv^mCOK%aYARk_PVo|%-iu^gqSB|`Ip~oRPP>pWkXk)xc>2HT3?3g*p0FF2E8SJQ8oEh~bZOO4 zUs~a{r}Y(^4HR~kzk>pxcp^PfZ(#Ygm_7>Pkvo^r0LF6^Iu3)?;Gi~GOvZ#K&PBjV z&JD~*A5)$^W3d24*RLTYtn;Oj!a znafWS5`-DdlVGi%a|YL=AFd_)tpb0c-Ehq`dkC#jqj(W}trchP$+A~R`cj9_9Kv)m`gg1=AmjPnD#YBi&QM8VGJo>G>lXO5_!=>BuXH^4 z8-y*kgAM)H8%;&quT^0!q3!;-x(B*gmJNTv-ClDl$hBLDZJ`cP_vh+fbecTfU~?q3 zE4{qir=Q);y~m%OvlaEmoq?ax@_onHAE3<^J+{YK{}J6;e53MRQM02}=$cRmH*}3x zTAJ_Cl;C!-pLSG5>mnr;92_-n`cR(f5}R_eizy8&$i zN9j&oX$NMhb(HQhhnzQc1YZixHC)oxd8`MMk75aoiJExj4eD>tkEr}<)C8WY)Yp3v z50JP|TdQ;!VmfToX|Lj7ECYe}x_i(nf{TXmzb^%Ya+6+dNK{U_Nv!iWpKWB9vWYH_bT74reRLJbhv zD6cZF=-Wd?$wfcfeEuu=-}u(xH;8+s1n`@J!L#4K6J4u%Ct6(Y^W~322)i)5l*cIn zM7u#^7{>k)73Qvq3AV%~sA~~tK>Fp9nCw@W9g%NUml>|`n^mXBO6N{%#kOf72nT_2 z+Bq{M)&`pcYif9zgUDPu1@rBrR}#s2L^&O2O*IsVvL;XZnNy$IELThT-~Fb*TXDqQ zRWA?`>N^!srz@#PGFFfL3sE`!hO9=Ib!q99`Msy{tc_HIy^B3fP-x`|2{2+Bu#)w~pA!r_0v0=-=5r+3x8zvvE%Z!|{o z+^5*sQ~9mt4?PyvjOVYR$ucG(iOu2B&CS`r;cA-j-wmmq_BccKT%u(5Mc8W0cI@&A z(@qPbg~ywDvU2D}mTI0eVh;y8{pOHkZbGk;WQG3Gf`iN+W zL21N&Y^ujR=mP)4MZc?WwCh|gB5vS<@zxRfT1>f9#USh}OK{CAOE=_Wg{(unbW~|5 zV*!Dr#o&9Vf3&4v)fN)UO1_xjPRJaUcb=>WED0%ou4h;be;c|=3 ztv|9oOW8g21T#Z8BFup8^P)`6f8m2mwUh~>54q95Q8A$+e6TGRreT{^X(fsyz)L{I zDov{r+M(bsF+AwN{olY>BOG%4tOSf@H&d4bN0Eyg@oVwe{c$r=7Dc6(52qIZ_GmQr z;_~!YI()OTpK7g`1FMv#(EyGR_pABhkYl8kA%4PB_m#R^&zyJo^%s5`{YB77y-_;H zn2$A>rxRMReHrW|qB2bR-#3A<)zbZlDtm@FM_%|Nf>ROoO+B(#-L>3UHRZvyfFM5# z0X+u3k#BE~&V-D+Db5V9l(H~?7ZBbl#CXJK_yAO&T@1TI*$6_Q>aUW(u+!8Qr{4o{ zT;VZRfp2&27fclS8ld;6DsLj@p7z;FyEbCE>Qp9dY^;8TymmBiJDW;|gNXA7A^eqz zQQUTyP|Z6j(o?A|*j8cnTqP^&Da*F58NXS-5T{EVP4PR0OOVYV$fqO;w6j}on z1Ru?gaW4~d6JwagKGMsVU@tJC|%#p-vuCYPFG zGno;o*~#C`qRsT)X^ZK`VFPeUIr6OG0O!O^)oR!b_m z-cb;+IiG62Wv4x^PVrn(Cqz@TCY^+^AJGNYIqa-GEV0|5%I>IBAnZq(M7rec-pJP$ zl?kqU8{X5V>xHao`f#Q*=39*H{2zMX50lOM0J2tr* zeQ)Zo4x-?X-pqdcq<%0zK74Gu;hi{sTgFxgtY5*6K;x|_;2G^(SyW#4f3Vrxwekz= zNE4MI{0kq5F*_IwET$LuY^a3Iuf3os;gs8`x+&f8Cg7#qENh6uZtTkQ0<({w|-?{ivJz%7MVeS zcB_S7P@5s^AnBOlol=4w`ORim5G0KXusWB~p#PBd06MBqA*r02Di)fKq2SHzw1jwl z_520XwFuW>W@1~?0Tsn=h54_9>2;_3bWaQ$1>1ll@@a;OYSRU2U(M?11?|`OP49dM zPjY&b9rqLO={H5Ca=2RDgk?CQ95b1z^RxC5FYn4f;nY>YRS4T%`4NzIVPoBj%3gU{ z89Gn!%0;C=W;daLsMX)1SuS4mILLOz3aoVzA3{t068Zb*#DVI;|2v^ajN2P|pX`!h zT&(9DVt=gboY~W1Pso-zm@(Iz&s8`uo@|Am)@Rf{P#-hB9>|W!%tPFi9KBD;ONb46 z5ifsk?R$kx(63bXD7R7#pN#WU%WF)U-e5Pd_Tq)0@?Wszf+8d{T2~C+Jq6&{hMFZ=IsI zQT(a#P1W#yMYg(gu&9?Zk?Dzj+8b)>F|nQP1s}TT0l+TfW{NC@r7N7QKxW)7r#{lQ zQ0nc3j)(!+18te`Ce?gHE@1k! z2eUtOD$>XDyTiZ;ne_BwtQx}T96L1jyrfgt;cp8_G=v5F(t#raVfkNIZ%iEd*YX4SQ z>BO18yQA3zqTX+?N=p};_HXTL0VfG1ss&29@zv$o7P-#jO(wXzNwxiHfAq zRuvH&1tOXm*(HNDMtq>tutdrm4~M7_Om_fR~Vzk64KK?aF^c`^WogTZC3IVtX{-B)dSp~7-$yu_PVxPxG~%HutTfu1^IY7Mc(@iCFkv&j1KJV!!o;l&j@=`69ftn!E&Q)? z-Gnqpa<;aJHY=Nq!U>W4V0A=RpCnifQR9pVAgW6#?3XPD&dabYTB`KE?@+?uj_XJ{02 z_2*eTVIH$`JJOe9KsbI5X`$_`t-Cx^FnuzKbks4Tym)Ptd|N2gvXONp=T~B>Nu44O z@1Kdr-swxcZS&2i-vdUftId}U)J-|`x`aY-42o5bbOBoAYXT5)17n%dBh*Z*NcRQY z+pjkuje775rU2=v#5>3TIpu{ojsA~_yLzT7;{^PzF43NS=N%t5@{OB_2~WSMDRz6} z=2!6v#^b~%{uOzI_$=ykO|$MknEHj@%MJy8H3FYeC*^++O`QTL&DH0;|)t2kXXK zZi{mUN7xPGp$?R@dORu(D#f(8S)A9~k1>R4u*Po-Y#>)YavD6mf>&uqp{r`G;{8M^ z>XU5|1n0z%hR%xPAK{B}8bOVJEn#!+JtD;mLGqF*j9C>Ab%)0byt^H4 z{nQ((Q_}#Y9t5|f)Tg)08=RpNwSVpW{(NQ`!yfn>W@~5dSkr0*%*>XoZQ~d6-Bd+V zt=B--^A{DlYlF2LG}$xnQhCBU0FHe2EAS!lssrSPF@@%Wjr_>KPYQwl9|8fAYGw%Z zbJKc~39$6s%oz@UWVC|$9&%#>v_pQ9zCxk5Oe^~ZNWrS@AQ;Rz6LLCg-fn3m->9c9 zBab1hC-8=yDyvJ31q8cED}^5NAv@#+zfb?7>>9}}P?ti=NWfmX0-^#PVa1u~Wj|c> z$7Cxh-zQR+0^!IBvN7PdM(f$dgJg6r-+8 zoFpKW2~5mA!!|rnkMAs*O3%_&2iwM4>C=TRG*jw$b)hLwnB&!r_mtRDw<%4rLF*0Z zIRpP+d^xda4NN}()*KPUm~S7tWB8Jo|r@#OV~$^AC0sPFyrg~ z7k_%I!*3!F9ZD2(ecKXyd26(P09S*l+0qi&bngpzuI}y27*p^!enW*TRzbX#|BzCi z>CN$;Avz-lBpz*co&}Mff=C8$p;*yAr10SObLkcXkT4ietD|keADeuds4UUpZg%bwpid zAnWa$?Do7iNuq8R?4)`oJoU<5jxEDVb~YUWEPdl~-4_8;qsc+E|u>ds^og$n^zTm=D@)uW}}3xKjsa@vF7 zk+*TeEOtN(05Q~A)p4SHFofC@4)gzt7Peh(cF2DvJwSd>>|D;zpLDED#-o-{`T>{h zWaHYpwz$8HXeo;pC|pWuC<@_-~WZtUp+8#Esgnqlx;b zqWR8OsO`cnJ`Mb6;&XNn+A6kNr4N@poj+?*<~U>0a{=msH0OQoh4hC<#NPEBfId6$ z&17)SbNHHKxaO43e%3NJ1u1G?P4k8}B<8Hvtk&GmeXXe=SVf1+9v8!Y7dtKy+fF** zviTg9#RG-U4QR2sl@;y~gn?&hXI+kAH*sYDi5?Wwl16^Kc8T~rb4o__+WE%__>|ss z=uc|L?G|rdgb@l>{4pWa=KqoOE`BZl|NlS2UPNE#vDalDgW zr1NpLgGvplAr-1kqIh+fr1RQJsg$;oTE}g*)mE!^tev;-KEK~Tu*-JY_INz+_xtsJ zz2O1_^aF{F5jky6<|T{kE~~)Lz(!+_*^uzt zrPauF+CClGBv(-GKr*F8zUF@)HA=;&YXC4afg$9xMI<3y-^;3xm=~-l$NsKcUYhUk_ zs)9FuobXr}FUtp_GrQMZeyRhtX%4_f3q_c+fviaf(H42r@43L=J}WWJS>BMJTff;Y zToZ?G*U>UjoWqL1#gVEx?&;zAUJKAJ#}(swCQ4hYglpUoEcD=u^&+xak;DLLXJodT z(Cog@?{^?3;&YaDiAH`}zP2B?gR&|=F%_fq5JdW=TQc`@SHO1a0o|l$OzQ#1#*Bj< zO^49!Z3C5h1Ho|-ybHJtgpQcwkE!IanCtF63jbYb$~5>M@(df86V;07@V{9ng366_ zN#g*>8SAD)Pjh6giYeN2($T2Y$gvUh{rtpjs9KFDdFz9d#-G@&$e@a=R4xNQ#cU$# z7DU$#9d%nGwiiX|^hv{aD1j7g$c-hNbke{qRx+bZ>&7(rd|hj(apV~1#y!495!i!@a=L&A=caq4yEnT*o*+}&aDJq--X3XJr2=Nb$B z%R}=Li{M0Y?V{<=D3_VAc;a(+$*AIcf!)gZ-{Sls=yeIG5^{;c|4t3>e0tr`x%B(_ zDkksN3>Go>hT=m#3zrV`V#KE%K%3?9xWN!8Gva6)vl;silZuN!uP)*f=6evE&_Z+e z1GvpsvdtH~T`CtCG?uG(VF}xiHgf+euv<4`IqZRe@r_{c5IYFIxh)c!Um5h07Q7@Z z^dd~0My+Iz(~Pe#nlj~v#hN^7 z)FJX)*S*w{Nay${7qvlWBEuSdbKV?5k*!pT%jF0!a9iSZSg>00I)SNb5ty}Cdw0<2 zYG$EL2IVTX!`mCIY*sA7bdu%egNl)RvTbsEOPP@>3A!aAuAX-z1UBp{{oI_59C7lT zTRzB*gbvs0OR9U+;cwHU`i8`kF7^M0dZgYJXl>Wcd(tO2>{rJotF*sJ*7_1Ifm2wNb|f!h+H~XY-pxFNs9; zd&@L5*58L_>};s~y3cx#=5M-5qN7kAfhH`2?bBvO;D5-*h!vJq5kkoZ&3NoXo2al> z%&_@_Eb$|QpjnUJRagEl-i5;m>mrOISU-bwN4PrEc`X008w59h$0bQBvO_yCsbl#7 zt}eYM{Z9_sWXFzgbZ;|XPPmWg7gS|>TCpd=Gxi6hl|Jo6puJp>+1;_Q7UNk2);)t7 zG)Wq~U{YeN`R5cTSlPS>lvvZMA|*31=hdr`?1L?N2vR=9f|@`IXb1W?eDZhzzrp9#Uxjw~$LHrUZ}&@8P=0 zb@=s63KK%CR&~V$CDHCZXOnPJ8((ckZ_|frUFRxWQj9i~eFQzN_8C%N`TrTS262b( znAX6tL$jpvpR%uT`qz1YXakk;-uIptf9w}*)ElV$WZ~DP?+NLessi5>=Qc&j$gs<&E znVi{V#i&ds&Jo!rT)laouUrFL2e+S#G9&g!%8#aWE$+kxFssI&D!Y2mO#VAWkfdUn zo&|}6F^3RaU(#@k1JkJ8igW{YUF&Lv{F_f|J(49K#J2_X^kft-!v;}O~iS(+8@ zFA5f3?Cn;p5CH!&*Y2AzM1()H@CA3qj&xUJTubolxF(9sx}^_NsC0`GLm9lQFjXw3 zvJB$G5j^g6VOF~p-LiZDR9;|A(C_v|tT30g;im-N<7erpn1XG*_mYp570w6v6OdzYjDhpm(_Zl1$we%_m?WB%+EfJc93x9TE+aSvwU7_2@CyIK@H=vkJ-tdK3l1EyV3 zCbRd<^P|=<-UXc?rj2iR`Xs4-jTThum&3FAaf@F07L)CH>iuH@K{X@ZS~dKjKNSew zqz#|qjXp0bbg54G!ZMNAFk2IJMwFtcsLZ8Bq^3aREBL*{QnjALKoY7_^jr4-kT%6T zc(GG3Chg$oQ4i7d#kPFAf&_2&Fj)M70L_UM+{ z*6$_^>(c=g2J~6<_@;+T8ki&K3+1UtV@Uz( z%x#6OK^r?FYJ|9`fW)&-4Zn+m6wcD6R8C9jz;HkTHK_W{f-&oc1i*Ig@Uh30%&4D2 zbiV+4uw#oP+V}h+n#d0{6Yp1Cn?Zofsgwi6=^HcMRQP)~LLKVs;NO_IB5}eYXsTW~ z5Zg>u_Gjd1tE-m2X0HOyO;CPosOVFk6DiL?u^C#5zLrwjN*2iYVgxS(A>&UgzyWz=Gx1X+$BoYftVfNL0yAkNrp z0+`zvtNu~KX$kGyjP~c6AU`cK+rnlUAHNeLjP^aXrVyu|e}+hhf4g|OE}EBtU9`hi zd;>iAFUo)X6LDJ^{=}Kg334jyuFg!pVSy|7AxKG7<~~u8_JrG-35w#^vdHnQP)Zoj zQTl7ik`t%CsMq}z;vMCS(fb}@+V3m23{GNe{-)&3lG7HzpoX*fGvece;-4^Lo=y^SciEkfckcsbEDE=WqG8(f!m6BrE<3TfrXI=eJRm zF%Wm*4%H?V&U_u}wG73V0?Zqkal`W*qT`OKs2h8n9;&c!AH#}g; z0#80uB=2pp=%=mhgQ+bJf&;L3Ej^!cM`Ab6?Jgszc}qUSMW<-8ji7IF6rB2d1F)<) zL&>&H%;rP1=d_X_&KZ^~{`Ln%XS1y6$Z z;cr*vmyD3Ju$AS`7bO)FTjHhv2wUW9HSx#U=uz#{Hs*h1AIeZiNF1bOJ>CW+9^Q@F z2`=AHIoG;H_uDXx@0mF6}!)b0PlhJKg$ zH0ER3&HMhYJwywuFQ4PezUbwE|K)Rg^t0VISZQ(4DJGl^4`!Ce#%~6{}1@KLk}COzSMsT)QtW96-4#9sCDM0@$HXLXY`xPjJ&7?rP){*rVG-k z>e1XNHEwhb)S=+_zxq0W>BRiCr$^2~x}ASKKy!Tj(|3xKdAYk@@Y_=DrF$%P zFQhD->?}z(wsky%`Vy-i*_YqZyu~pwS-z=1Me>E%);9^Vi#az;`)le`5U8?$XGbIk z!L0L4V~*XExe|6EO<$2-N=z)LL=~4|>rt7EyQjsNlb+3%tsl}n4LI)0knxnW;Cw>2 zv`0j+TdkGvEdwwNaYk$KEn!}iW^|>yhm=8iqZYw3be~Np>VWcb%Sg%!TFVHAht)tQ z@)nGp_u-B!9In?B2hhjmnwj2VL7;zWF?_OHx1r%nDl77w8ock3uI$Gys;ntRxDmMz zZi1L1Y0tv(GWs(pJ?5DocO+WN_sN#T<@4Kmb9X>k?4FWHJd5_pNgocUp1-IdPjjn3 z&29Z@=%mS;H2G{%6!rlc6vA{e!Tm%v*MG;YZD_W4YB!pTM@#6O^_YtFlvDl{JK${f zPfT~ieLGp)P*#B~I9d9=--KfN?TBMiVl(1xgv?%Aeo2*6kG^ z|AM9mwwbRs-yRD1h)!sf2Xb8Les3vfqAr`}a^zVTt-`nEW$2>?K_c=iObUUt>mgS1 z7}jxN*G*RNA>Bg(E$bzQt~f5D+n+&SEgMz5y}W`t3vxV_|A-G)$F$00}T8q;Xg77DaOUzYRD?8$B`4<2dJ|;BQ5nG)P|1r!@A85 z>F7g2J7o=*>lC6O3%Ch+vzX;DyhR_GZN&em2o^bu-aZX;T&-PTRL$W?>z62waM>yZ zNzQeAF$Swf`jL%!I2F2EP9L1CUbx^t=Us4(>MXkwG_zkA%|K1BVboQCBtAzYN~-Hj z^t0W6G|&ViiNq>!ToN3Y=n3ssqmQlRlOP>1Ep*rwg?R&HL(jl|B>W8ejMhdNJ+csL z>iy)#;^&))E%wv3%?6y43lZY8b(w3pFC}cv7tI2u8#bru0$HBLRJ&1WH6q%=eNw#y z00lXU0F!Yk#+aYlvW<$AsKdL(Q%f`PTc{;sVNbuNvmW9qN3B$RYs}_l)PPZvC z$Ma1lO4c#(Wl9&$#ShebJqr;J)T2r4Ye}0EMEeMVti?snl{m9i3-03d0kMIHY%7{W z>h?aQZFl{5ycu^q@Di*8I9iGuSz{Ael;4L0PoT|Fr@&Sm|S{=}} z_|Eb!H@D`q#{2)PtJD6^WD^kz+DLdfE(;r)mq24Nk~k>W7UR2Mu>g{4p4#n}M3^6x zOe`2<`gS#h-4m}(kOZ8P6%rF&-1|UNwTs)yl+5+cXDN@EeLySG&1u5Cj^fJ`_3g-2 zRc)>94Ia*rZ1oXd5`d!-l84Y;Nb|>xK~)Bvf$s0(a`v-o6)d8@dN7+aP~#Nw zbL&g>CSRRLq;2d0eyyw2itd81X4{wO7dnuqBliEUv~(|(yzoM=h7;cm-OH0>6zc~k z59ajNf5peviEZ*gXQ8@s&Zfzlr@E*wFkxTYEnnx)N72G8$MWv$>K{B~Xw>&ZKIQD? zrhrN0-#N68stk&IW7K1_b;?dP`66Db_SYJi5nGjaMpatKSGM4u;sYZG*(TFHS1-sd zn!0gD2pg5n$7C#TmkCO9emg9(kiGO8e&}*5zc7>iOB6PC)Od$-9fuikAbi+bnSLm_ zgE!Dz;_bX;?knNbkez8z{AuCK7`;8L&UuHH-81y*pryt+mea1gyg0TU!?`!5{^5C& zoE9-@Wm@Y33y>gba|BvPZyC43pbJss1Q-h&dM@n$LP+E9r3)WKu@c!cLm6jR;d{sH zFXl3%tO@JKQ}Bgx&F+TKPdTt~F5ar4OY zrQ`dONsEW(fT|N?@{{jIt2MSv`KzT-Q38uu;Nh)bbAKs$pj*!DQ+dIXQC-S|go|^> zNLXm`4{R%9jstC>Oy&ZdwYQ|JgxB)(0AIAL%ydbVEuyU@x6S2V8wG|Gs;UiC#wbC8 z5)_vvP3}ebvyQ}G7ytD?VA5sa3lJtqBK&6Os9Ol#PSgG^%=^8jlKFd^Xroi{<#NrP zUT(OxLNzkPWJQTwdhB864ME4xsy8C1M>k5_4GB2yRR>(KGMo9F(j(ic>TZ%gncc)< z{=sC|U6L#Zp9Mk=%KmY~AI3D>!*dBowVUqofXz*T(&mJNh~_=07xUSfBFSIi(%v#hvdGmIGt4za(|d+mr~o* z#(YhAU31$ZjI(-yyHS6N8VUc0nj$WvC7H-t)=pe(=_cG4GME!V2ICs?G>t?{hd6VK zzfkB$^_#IstLOp}f*9=AYf1fKVwppG4M1xl@Pis+;~58QMb^>GiE?Q)=*&y=ll2fXEx)bt-JSvO<{f;X{{V12*BAI{PD^t8SPs5rKlmZUC}nSD2B* zx5cU^yjd=oYsi!WM3x+D|$5dd9DxS9}jROA#w~;#R^6aoYhS-BaaaNj6{pP^CUR zwfqiTRFr>KeF@))FJ`YhyQ41sM#(EwRZ|CxG4qV3Fs7aJC~8&Eo!!tg5{DzxGhB~@emgXA1{*jG(Ddw!MF&@fGY$d z8k_XLAgwQTuRQwS_VLm<2N=#3)~lG3K;s=(sv_{1&+vLG@gX@0&@bzbSIj5;UB>=& zataCbs!&(l0k>70$yZkX@-+P#QXk6w@ttW1n;gDk|3){N4}lL`F1WF(JZX{6%_X_= zI`;dH)KCb@aQ@fJs5 z=PLu|zs>87>Zsq~x`fuO?DMP-wc_hXPxaOd-&6K<#A`eERz>Mc1Q`ydnE~Xm-@2aB zu5z!()CJ`WlyFT}GjLd2OnoakfU*T_Ky8X0t)>j!bD+sv8>p+1tJu$AbVd2d z_mn(yHuolahX%TMhXriU(%JCq*CxD(=*2YynS~AhaRuY|Pz!}6qaUWotC^4Z=(I#f z4CUmKQOEY?+6KbjREkhC7__eEL|cXjxLH`K%}snEf>|nE^_H592L{?h6&{DruulQ{ z;BRxAaadA-8@gCMj!2JP01HWVu+yu)uPU%cMAIRkb04^+61IDv_^e2fb93UkbP zd03+Lbu`v_zI2juPy5m~(LlXbeum`8)KioYKPQ!#7IiC3U3tCn;?ze(4eFnv|Hp`kWV zzH;&ttM-ve+QPv?WJYlqfrbiFF6k-3pe%3}X-0pdQo{TzQWR{O!%kz#v?#1Z%e+2w zQ&Tx*pJSqrP}_uRJ)>~*J>Qrx)t1hpe@T-^(}@qmVhZpVtHOGnxwf^s%?RTm3O0p4 z7G+CrLI34x>3-V6@)O?M$zhYq2Khjf{1#r3?rf{fH!K3(m%e)glS#9`sFu_1R(15$ z+^Ys%_p{}-+e}6Yt;WtK+~c)Ve_7HegMHo29JWygi;bR2yGD1r2cq0iJzwSNO^51k z;?lqmaCc`-Tqp*B*$-dfMFtq5@mb@haUGqJ*v|L5hR~fqck0232P^;C8Ncb&dEUzB zmw7u+9{%&gxd*+8xtkI;p8oU7$!D3rp3m;wc=E(HKwHbH=YQ>@H}AV8+7i?|&+y}G%uM?rDv7c}1GUnv;#wv%ml2vVLd9IKoABmDepH9;H<*L_0g8m~ zp{bPIuU+cR&$Y22e8FV8a{W)^;&IZQ)oNST`Uq6OD!cw`-7wY>%?a0Gu=r2S4Spob zkxCIz7yg;B>T;1gksMG*V_k?oLQ@JM{6P1806|7PFt8)P!fH1cLQJrF8C zsoi5RS)1W(+62_%OVdJ~7&ARQH{fQ}Z4&tl?b@PK5>x&ZhsLSQ5LG#@TM{nk+lx#p z*k2LH|I`LJEYu=#9#kXlq?KttTCJ3ff8>0(Huk z;vxyP+GWE4)o@r=?C{rgJuXf3NYl9dlu>mao|ZA}fw@?sl9>&W$@KaJtI9w@AV<799tk zSK%vh^1mjNp}Xy2TeJ%Y2TB_)!UJ@VYnOxbaYa2@TkWA>=e5y52yCBpCFwJ4r!asq zdIA3HlX(m1sF3<4AoGWueik;>u=l3{w{D?j)mYdlg z>B7Y|v_g|kTuD26bow#m>)qg(jY*)%R~o)OT@Xx`&o@9X=jB>+LpqL3AwfCF->!-| z9)`pz3o|;1(FF`D{#GYjW(0flMYUHL8`RlsV4q@`7zb*L3U%vSWf z#ax~TmEEfE+AQ>a_1Ii8d|n$5*KV$e`qf^=87l(bHuGtxfk-|9WO1DM57Y__9Xdt2 zhrS*RoF#+As4(EDSL-3-aV6|J3uLzb9q=ucrx^JsPLI6$w@lnDN;G^ zVTN7(K@mnz*S=k-%-#%)!B1D6pdOM-ONKhke>t{O{m9s+{HPe^$#q=mRmp7+V?5x4U$aVn*l)Fll#pj)Q@c69cxOYzf53}oJU zJs8nK`_mD9nJRO6NB7tR>=V4+ZtQ4)Zw4OxPqCTOciun1uOg0(OzUkrjz6rTLapBm zf}^ug--#<6p2a-Jtx0%jCN%bQ3r@VA;jgY*G!VMp)XI(fs9Vreo_X1l75J#{{(2(z zmL}h)Blw$ObDSjOh`!9jj=93t7n~uqXrY+v{p8lBYytNxqpEg3Byp4x8~X#a6x>L} z^jBmrS#wT|gEE1zwU`s*7sqPH*ujDC;-2f4L9Hb9y5{0)RI18^l44V2i9El!;#t*9 zNqOEYu75i6f_jxAq#grel$kPP&Rf$?01p1G*X%a))kx9xg@GRp#JJz6{_{o_My8rw zXt)s0af>v6ivPA4Z1jfu9`wAhuQRW#=qyb!zQklVF<)aP>d#tmyZP6N*Hg&L z#?T>({B2SoW-00&dCWJvA%DyOw$b#JC*RmuSM2)UoqS2Y!hh^~Wr`C)Sa;whjIEXY&M)UCYqMZk;l|B)WtiH^R#kc7kiMX7~L-xaz8 zQPp>Y;~g-p$LO~`RoO8HmZRWKiV!T$&3lhL=kgV9tA@sZigd1q7+`QsBMC9oCc%tF zG`J4GNdi3{p2iR(Eww2sU0p@-cd{G#g#6)kVmhG{IfeA%+AExj;)ITc-L%!#?CqA! zD#1@6YPDJqWMir_dCU1X(Nf9wa>FKI#C!KC*%^ts`cFlIFRdSuIFoBnInGF~`}tH> zG*am{JIq>CAk-1vxyN;g!9Yh$WL594&;5S zSV>vU>_%5XJKUTE$7J^6pX&cM zia~Qg{T3s8V}nQDzzZbjoWKK%6IRw&#-!(;Y>x4T##DWZ`=+5bk>82$H)s#5564E! zxOD>^f#CnjjbV-~SCZV>i@Pb=DRMaXtNUm%K~Q#;3Us^-b(MdvPbRSVdo|tKV6I^E zXb!)b5yP)5{%$nCXq~663>Mt@VcDa6tQtQhsVu-ku1@FL!{n*I)t?r+WwJnMBD44F zQl7(Mne?LG^7R&!9Qrr5(Zhex+w5+CR(T3H&k)x>i;!QDKR0Gu8v1S9C@%uUzaoV> z>KmUrF?0$ULeakC?L>;VAVBhs9c>PK@ISZ!ZF@|-DFqdE#a!s zQ&Ul$F)SNxRB5;S3;RvnnYT>$jDxTuV<)Spf2nSfLF8U^XYY^go4Z6jtm;_U+tW@nP=e^N)a_3b2%m0)jN?0=kqDDnI@1QUlk36a#@E^959=9xJet4Jr<^bvb z;*1wq*?$e%JRXYY!nVTx6+R?=EeE-gORP;}miuT7 zloXcaq64KlomA7Ww}v-975AEMB8R7Ir(|>eS1OH@+*KEVH(~YN{U?1&T(mdx0?|Z{ z7W${Q3NuU#@dC=vk}i7=21D_hvAx`^L`ijX5^6W-&xAk77)eEOAXCS38?p@>E%pU2x({{OO(gOH4h5l2a^$4GEa_>l*4AXJbok6mu64X56$Q zdM(UI>Uoh*`ptRx$)T>d`c}5%=OxTiQz!Cm+)rJc1D!|c_0Sw(|81NeMQu_NXTmg^ zZoO_BC-5sVBtMVW5Fb;o5SKe(7#sX~vv#e2TzZQBvtiEOU7E@f1)6g9NFi+{Z)*d{ z25p|dbPs5D^D|aC{OKr_F9Qp*1ZAD56`_yv#+`*LDD9^yRlldFASItM zx9VtSURo`d7DK5x&Pb3IqY_HWtR(oI*0zE2x|Pp4sxt6Mer(Y=bB?rMRkp9Z`&Jsh{}1rEBqT?D}{FbdGtbNGEE@ z#p*8LuL_rN3iQJIH?er6`7ZKS%F9YouI?(H>7>tx3R?|re4_g_A$aWbQi8mByPkSk z!X{+_H=(4FhS{30G0vy#Sa}a}XHq99!(Ya69rjW__IU^UU=K-8w0z&=Y=F@CviT(ENd3i+=3!Q$q#G<>KA}N&%-e>^tSi%Xhqg_~GmMI1=ewXsV+N#079E(lEDo2Ym7qoDs`RF($lf*|dr$+{ z<{$-LAJPA=q=qc_QM8DDyK@rduU1*m{|A^>hx~`IVcwlExF`B+33m$UhWpAXovEuN%JeylpOR5TXlirF>mifRPMX8wU2?W^b2UUd1n|o(n#M{`@?pTu&}VwF z-MyH9o5NHwY`Y#o4UKZJ?v(MRFmR6PMf_DBSoQVI?B5$aziaC=ll9@=&QflKDHUv? zb3nhp-cfdki%xvLpsEC67lA{si;3f3D}pA}{WY=Kt9jnOIUSb9ZXO72}A+E_xik{yt)bh4EzN)_D-$hMVjgqM!6vOG<}ZkQdf3z;POFu@;& zGr8=(^uGkHzrKNJbZ$V7;~$*zwfJz@Kg`aUvd>}W+Y`_I6r%jEn;)6Q|Bo(hfa`83 zAZQXUjd@DIYKXB_w=zQ`v64s3C8s_QcSxgG0)k9&#wyck0_!*l+V#c;3Uurx) zx$q$)f)5p6`GF^}6Yh`nl-fj6T%JC%c0tV^wF(2#bfidJQLXtq$TiG5i_vUaf6W1| zESL3MW|n|>pk76Pb0d@Kfm+7Rj=HRUgSoEdb!jdAZV)mgW0inhN$HQhzw2SU!X126 z=hb25(7($=>CPu_(2WHg&s6kl`R=)9gFqT_VE8C&wXK+}phwfR9X1=|KVOZ0U&FRh3) zvvS)HBW{o16!%nT%qO{tNO<~rQPBfX#XN=27@Td1V1D*rTHK_@T`hSeOPh99dr_$p z+T7q*<}W!!u=rcZlmzozTCqRCadn-(dfgJs;ib3vC6g;M{0Ma|PBksg;@DG7Bu2!D+J?$kS$w}7C7h8VU zX|D~~-SL%-=Ia8h$UPrQuoVtWV~yYID0vRgFrscOpmeH<&E2mm9oK2F`gR3;L-<>= z#O*F-Cxp`kOA#ai7q=I>LTfX*J9S&4bJZL2YiIhvSLo|h-PeM1Bpjj9*Xa@h?-nS*D-TRZGh;8k*u=uXcGlBL|>?+yd? zG(+SZO_298ImlB%nY3mKkPurlU5gqFqCn(-$X)weir&>m6{uRRgkti*0Fz*9 z|IS$TpY$ZhIkw5OzsQMgH}6R74cXRp=&1HXecYWmP1Kz6Z)eO=Nk?b|BR6kQ5!W{= zfi7igq-1u5_3Wr7k)7;HOog)PKkZrFE#k&x?N`N}`nVKsC$5|f6~uJG7AdQ&Z83x1 z&aNFF#?6Sj)z z)JEH6$Ew@JtC9^TY~yB6IP}NyOK`r`zLvDtVN=>9_c7b(Ux>GtWL~*~_2QvkrvDY* zc5+VlIBaqJ&STnTO4wr{!aDj1(ncMcFsr@?1q+V=y6`_#DU+Z5oWcsEJX-}##fJbp z5u5wn zxpFL*mBOBk{gX1G-pY&eb!}J;mWFVc6ygd?Rr2RsP;%@bZN0JCIia^&PSVR{ktARB>h-tIWmo_kIpy34DZ`@`~%+HVDb zH4}v`*yx-;wTji-FY#~f-+$|4^Jb~g*ws8-Gv~3@n!<%7>N@_w`6%~6Qxl27=V*A$ z70nk~^Qf%H&B5~bp~TD4{V`4@hGBgGCuYmQts9~ILXvltWdOX|Z&>CWG!Wex zyNaDEC>S9<_=`eAos^TG&})MD(D$?_a`Im-G9x&*~4msewr(nSiETD2Nrg@^)R$YFd*0_UR7d zK6k-9^;?NKr1Z0s*dOBpo0j%&?uuZzlY8EP4C8J75?YcKUK!mAFK>x5VGwg9mIVR8 zg~W?Nc+YHhQm$swmH7knP`2+ut;5GLIA+`ROx;b^q$7uSCu3e9T>uPJ&9pYIanls+&N zt33YeNB@t*Q^bH|zoFV#PSuX4aSMUYliWewD*URg7@7}OW%y%BIsN3`uL0GJHBap1 zZ=-KtR@usGnnLssI!iUWx`J^9vCegl*pNJX<14yCl%?}iAD+XWgN2Z1o^m0dN`cl~ zcmEu&%@OX*jq{P*f#)C!sA&y4p)-c(X`aq7YC3P(2w1XG88%iNSgN#rPjGfp3*P?u z67Ilccj%N0Zcb?`D24JlsK0@{W9iw{ZrB3ay?+4_(EmU8i17{FlwII-_5}j1jYOoS z!IEfi&RlkfwdLLa&#@=OSw)^gYOUhk{9k)BV#S~rAk)N)>xnmLPlW;Q9bt*WcjGi> z!k^N(IhENk0scNv$5`uov@=}+KOpQC$pHiZRSz;UUICC;CNd7>eJMv$?z|sPY3P}0_ zqXvI7BKv}@Ui-IuPsO45%N}FOp51HmO(=_FPpKF z%b2)QM(Ai5=ZRrIPL$Xu-+S;)j7P1+N-zAcm=Qgoqa5RU>4BCxYy~9biUW48`yJM9 z{=hpKmB>YX!0;YQR-Ww{o3-6)3>m^=S9&hBvJ1*%iac*3c~i>@!0M_S*bdN9cZDjU z)^0u-)r~Maicy%xf72}1|2|~t-B>*wx()q{*LKwRe0D=9*Q3K6J%Gg_yaaWh1Dld$pQd6fTZHG26}h1%0X z@;biKo80E@ze}YWn2mP;X$ile_CY0mfs>TgRXKVXmNE7u@k?Bv&R;N=? zkD##iiv4EA+}@~DVE6T`A00i3o~Qmj6X*0jCWPYCUn2sTEq$2A{*-WX12uyl9wpmV zv^`VA%JF6Bg2dNIf0I;riu)<(n6Ex_d{4rJoh^vPlRvvGsV=$Mt_Fu}O8u10%6?vzf#(Ch8LLKJ2qNGxEc&Gyf!5al#3)WqL4qS1DgwIVc^BUXvs!3kQzej^!WS=@ zjAgOR<>w zdn1%{!1&9n7HE%|k$#VJx9-OX9(F`|ljVfnm}ZJo<~Kwcab3aCc~ReQ?Cc&pA&a4*4uZ(JmITzFYGq2(C??%*}Oa0$~z9vmIicb@L#(rnD zgT*sZE|VqpC3z)eZfSqsR&=&vqpNbNdAVW|?iHkLiX;~6Bd9Peh4^bRD!wNYZ}Z7w z^FwV2r|>fRJ;ewa+evEn5uYf;B@z_V4iXKWkN-gax17p+%6W~kePlES@+hEIJ~nXj zQD-R@47gzKcg;x*8T?A-o1PS96bDZLWWtyMRp3MpF zrXC@%&M+hLp}!KXBN+U<1=V#DQc94>9?BH;=cstbrqaZO5FGvGx@)%m7J_PrGvr=(B&woA7JmlhU*xhR5LCM? zmT5kVJ1nj5j{r?%5f_9DKuf#%x4#GWVmOAc0TBoDJ~7|GvLuzhnJ7r~@1{|MnP)k*XJX)^<+R9EU{A3--H79%cN zo8N@j*3QN3{f`V4#OwwFFrtD5R-Gf&Px*^!Zd>y2DKBv*XMUALU^2F7EojSvIwri` zPm{fgAk;k4Zz>*NE4dGOi9A%JLrNNo_nJ2&bR)>-WEkQay824JKeK9#p8Jdj3ul=& z8y_)JuVy#F3jv+(7Vm)J=0ngI0lhTcS#E#y>OFK&W!@>|ZcN7ijKD{^eHdhbL6_ zi^7YEj~JOQdYQz>^q+}bD{Mgg6L3e1BBuu#J3rrX=C1P{T5f5eyX(#tHS za}!4OP(gu2+yV2^rKm3vu2L%4SGui|{89a1+fM${Wz7(jK~k=KaQiR!0^@pKwCH3l zw%N&iD@FCj_p~o}sjx_Jk@4@;#wq7k9q&o=eAAdqZ`6V<%LYx4^oPxOR`etMrqdzE zFDXIoFvZ7Wvz=&$x-YG()#)PiqGAp6a4`}+b*7sOGGso{&ZNrwA}Isn!#2bSFQE9B z>p!ka^>5vW%(2i3{RJSsns-%xLVSM#tZsEz=T{Ur#p5FXA4}&R*K!}XaU>xc!cc7p z-R|y+JL$AGB;9T_bQ7J1kR+WCYbQyPXoy0G$)V6;(z$jh9oBIxrIl)1$E{XvZME~^ z+4H=fzxL1m+UvLP@B8^&*Y&M|fqIj$WbSQ+|jAEv$7E6-0878@LC)MY35ynA=*v-s!&I zGqHd>M7l4}K_4^NbME#T#4=y_+G_*2@PfruT&wwegC}^uWlq;&YZtc*6Ei=tcR4ZW zvC%lmD{*gs6$c4OZQ zyJXuN;E_;)k>7hd!}bjT>9)}89CN&e^=mVef!*o;zCc5oxXI5fwY^k=jxLPI$%b{5 zwRW_c^Pcjg+ee&pCZ7GSK{)_<@Co<(5I}ZoS#L|uEkC>Xq+vZw80<;aKZ~jL7(u6j zN~(ruuaUI=Ca=q$Iz;%TyV#O-M+mI{Cnoll$iD%0ggD<$UWiv{pDgZ|429($Y}rNl zD`Ne4H8f9czR(dsGSTU}j06A43){d0=Gg@q4k29m7%0X!WjuAXu^TVSoiq~d%M+Zy zD+>N);imY?$MSSM->2pX$oH#`L;7C&Xyiw9YjraA(3c?lXWB>N^9|I_)u4K+1Kvsk zS5@8S|Bg5<?rr$&E$GqHs%_l{*=n39wF{3VOEI7CZ*X1SOc2`FtYsc zqhXOcTUS*Nf3}s;xlXcy(A4;SnazOAcxU-$$&Hck-wi2H(@w_3}rhUIQLiP`?ZGV5-|G>_|-o zJaDAY5^tkiO`h-{fPS6 z{?rVkD~M9?f1G}-wod& z3&l0Zv5mq(z;y01|IS9$gmq2mXyWG~RJts#r2)YX-zRjG#X`hSpJX9*u$kr@)u+!x zk^P6y7(#sE846SP;foZQXIOrkY=0lE1I24hpgkY0%4IlSQ5$DkagUPiC5DhZ52E_% zbJtcKwik$g{e~d~wtNU~W?R)9?LF__iu_t1&<~5hgJvei~je$21HiT%b-ZD%hG7I7PvO;9?e zI%&1J0v`e68*ETaOU$38wfy z+y0;2>^#*-D?YOo5!%?jcVcY0d512ToPUEfcMWzbwyfJlL5~n zk?CMD3eGq1AG(Tt;tr)W2gph_8(p%*{bPu;g_+R51$k9Oc~>_KY3|DoikgHbv%`kx zex_*e%0bi3ZLtB)NlS}D_eEtqZ^o>KP@@~4l^A|i4jW)h%D7xTONd6>hfk$tef|Xx zU^V^_nCaGzMA?@?q>lM)vvGOCxJhrLoa0X}QvC#5aMvny=$&oyv! z+o{8Zewy_wc{sb#2LDTw`$}1{70-*e5t`4!6Ta->?0r6axZHlNAc4Td6bth`U945$%iyK<%d|Joae|+YCM8bC6YJh? zMFMN>i6ZZ#fq9AUH^76*Z9kGF5b1teq+d;l`%8h^R}fG&;kywKa~m)T>=>FID)$XD zJZY3uP^gO)Q&+1vS!bAjHYnRRVErd!ik^iG+xy(}2KOecKRx$Rcx&n-v_2)Bku4wD zL#-8XHEm3cNAX0Ri$hvhiz}8I8aHSYSM^2`c}3qt*J}C!ZS@cqx7ydbctCm4w<6mj z;_IC4aI#H@&7gDx_Ew2rE2P>z4*R`@IO*!jXi#a-C|6S)J%&{qqLR_8f}1FmTWk@z z=HV-TQCn3fK*+a6QH{A zj>zQ2G75}wE(Lsme`feX@;>^;T08UjOTw}CY`xif)vt*}mmh>&8^vJ1#Y<{SdqVI3 zD%?V}uf~7`OGRvno@zF5B$lXKkYHFGmrpWA8DN_q18+jp0Ix~0^5Frub~bG#{NX+X zBk!wCtK$iHDzsO#*eU4Ac`JE}*52!<{v)y+4J0EcmJ45NKI3KOs?t%|-5tJkM%a`o zJ{@!QUJLS&XkAQZP6U)Tf#THuqMr$N@^OwOxnqJBXVG{w{wVq($mVjJ-fV4MQ6!Pq z=wLGp$Oh1)2Q<&RL%7bG7Yf{-=;i)xs1Ae=ogrQU3Y;3gR&W))dbL=n4jWQU%F^Ih zDZ_>X$=^E6^2HVZDo}3HRPolG%xVy6sinf<;gHS@zsZI}dPN5$A$G>poNH5(wJRne z4S^DIM+1&i7pM=@WV4x{2N3Xfq`eDdnf#oFAdKuzM7V1yzD|{YxhqfjKbJadrDL&&FaRphNy=dwq^2`-0;hPb}vhh}WbxXU324me1N8C;3}S zPr+;<5gUa+U|`JM0Yat!Myb`JF=`?IK3FFrxrEc0k2T&@U8x?rlX#3UDA+-j+qY?h z|CwjO(TFa$jEAm?oLsgXCfc(E@NEcO>;mo=Sjtwh6SSK(s$BP{ zpmzmzdlg!KR8(xlZZpv<~oF&YeU_kverkpqG`A&w9cIn^6<5gu?H7kYq9s8kI zekB)SJaR3!KJP>5`2XGY=R&lB6*|rTHfHhcbYMEKi(|?9{VgaZOZiK6gPH-RR2=P%fI+hr zw0_q!T|mz!T=s!jDZ{wnZ}_rA1T#yWwUq*@(Z^Zq$mc9fWTqhGz4VUu8^Q`r+f)-9 zEP44>Q=E@D0mhY!#(&%FsOxJKo!SXlA706;nunWIpa~&HLEf_}R4bj1E=4iS99L0l z@VSM*^mOowgh!3AT~ud8N7SJHGvgjTXpZ!elyz}|!Ci~#BE zzvGzT(4}6aV3iBaDXkMSVA0V-n@DYo2cQfjT>`e&huv>$sQP5eXqL>hvN`*(psFAI(X-9#N#ZMrSlFbh`yye2iDpH5ioOMH5*MFX)9 zq&nE}+Q?-Ji2#DU~e?(85%Px?1L zSLBAzcdCVD14Cbis;|x*I-2=hO11ko%ayOWp2+v!V#g+Y8fg3j*oNzsIKJHp z)>1gvlA-RK$u>K*1SSGV(WWNj#rxFFN0CO(wF`=SypeB>qwrbMM1@m`9d7&VUQWZ| zc9Z3V1ZaZh5jV_qv)GL=IF;;48u+)*`8>6rc$0V-(^69D<26X-|G37OpI}!1nmp67 zaFnnH4X*tZ)Pt+O`_FpD0x*g7gc_kY-vrEAL2>1Wn(f4GpiR9 z+Mh}xkajRvgXLz<7BRHQm4dirU5I(xb=S!8bRwoOqDy;lFVB+sVljVMW@QD zclAUHh>`jlCr#ATy&cGMq}nww0nP`&Atx0+JEi@rkhmABSipafV}?ZZ-O0`g9hnBJ z+dq=LsWl%u1fl0#o+^c60Q$3GT=s-sf8t0Eq*y2z9@^jgtDtZbEdYC;4FfuqBd?cP z3)kRIGbbUjgnWG?VNg*ozxY>IB~^Yy2t*2)-Q=VW0d51V#YzuM^D+nQc83%C$_K)? zNq5gI@Id=Y^kG*BOwC2!9kayjuI4ArQJf!pC3&y_3#P?WXn)>9zYwo2`Fh$dr z-TO2O#x_-qDev-y)YhQvDnI>X;Gf)xo3gIA>^GBsOx%|qn|Zts?bUeBOU%c4{(oJu zdnWX zlAH2(n>JU;(EKx8)~RIaw}+$Q#(-&uq=KUuq?R1o-sFrjuM-u`{q+IlXp zfr$9m2z4-yuEw+1NevYwkBFwx(MInY2r@ml$K?vNRpf&HOS*##RL>Bq{^U+8jGEk! z!7AHjz(8N6+4^VXRWi&zb~+)6+vaoH?2oaB-DP1mXNmWsiKf`^UCAHiKf%)H{FQyO z<^1h!)D-eE^`USRJ96T96&j(WsIqtfUAjtN^YlLwe{x#dBP=el4UaOwnT=_)wJ(q( zFO?FvW=jDqRLU>az8fD4Rwbxs9`)vdrxv{Cz!4^je`i+@aBm}PO{F~9CxP`yd3TQO z3yY_57ihK&OHvHWlR0=J_sO_23$;qqEPj9w&pwRYM2CD(h9dyE-w=KBC~~dEeS4uH zc#&dZZK(dGa(>A_?r*CpkGGo+s9dcLROQ3?rQl2qT_bN2ySzp&BUC^yOV}vf{|jvz z_AmOps!mu_nOr@;Qw5cWJQIXws9QDXDhtZd+>iNgg43F#vIDZ%*<}+o!+#(TE-=E? z+@*-n^MJ~H8&zv{{#=me%2f3ES#pJw1@jg~`XH|iJ&L|;hqGza>HaLC-IO#t`Gx8g z=9Sm22A;n4761DdV$>eCF~R=B825qJ6n&xC+Fmpt|D8JfLhAKz2X~ zqk9GeQisRmOmZ#gs9T(qBO7KU1C$nx>7|scYZ|DaLfi>WC$>cE>)fNI)hxnWCu!D< zdi$>b`ptJ2%DC&VcQ76T<(zxQ($?oasCR;TSVCFpGVC5TQ*!c6C;1TX0k1t(^-h$p7${AoLrc;}-ha_Hm zVJYejt?8kv0IUpIYkesz)12T3eU?$Z;__lxr(%+H8gSyB!7m5~FYP0m zs=v(H+bIjV;z`-LZwT4`Ec|j0;Yqlqjq4orsYkWk$9O2` zw$qX9(dSwARx#)1{X``%q~!(j@G}OpG*`8^*2BzuDrsamT);^d?}xs@=xgfQ1^sAd z{0vRJg0bMN56{yxuJD&d3uBgcO~%y~fVrQR1MQ75$ui$a6Tv9Pr)g?j>o%BW>rAB0%ZDBeU6$U-$Pqiyk`H@y7NCUMI>{O2Ryc_?i zc7qJJFYdnfnc5m{tvQ9hYejfAwL`k6pqHGYU_nh|H)5lGMR>nFb@e{Ut(lb(>_nQU8=jIIUJAw@Uo6hRwTB(RHDcGTQ`6vM zug^`!pXRQI<5O|C^)LwAT`!s3lF1l_F9%1JkI@FSy>1q*uIi(6aEmq6IY#WiQ_K_= zvwywWPW(M_<=>!~zf>FB3U1dPydbN4A(GL_L5_Hot6`>WsbH!p&mQ~L z&T%z)beSy$bdQUl_(i*4EQL}A}#cJU(M?x zWdKBA%sLf&$T>S#J=&@L-ZeJP%Nn>VD*FE^gK^#J}^Zj-&bNMH3nbs)hzK-Pky2-xi ztuBA&k7g9)+n2~k3H}Xw9ou1@&;gjxZZ=1bj**3rmo4?P8RXI=mF5d=4*g@+SguhR z0-kDHHx6)1&Orlakurm0gopW|{xrFup@&+5;(OG8A9@npkc)*3$wQWHUD zq|EyePD34MpLI_6~S^M4Muz|<)=b$5q_H&$vI<( zIZ}JZgC3jFz#A@r=xZq`4olJTd7isw1ZU*<=i(ZdPfI}qyYg)Ly1u>|RNNq*e@~iK zm~Fv#T>O?liKSeQAqI^Vb`o>UC_(S0E~IKzPh_QswU*H*Ux&&bN5XG;)2J)lH?)Tz zO79(G*g7eX3_qDHEwsMv&2e6b+pSu*^)F_Hwemy!omppjBw5S<7u-gMP|nk63wwkZ zEWzO)HlO1x!0l2tQ?9|na^lCn);`o(q}+CY`irhFVz-d@qo2x2Z*hc4%l`4|Sk7_K=EWG=3IC?t z74^z?YH0SNbSiWd59&b4nrqXX6sVaU@U7H4@|(MUeTnej$j$l)4af?-9Dz7P{Ra~| zt4DWtTdXuL5zJ9AShQ1aZg2tEwV}oB(-883&+}x`p#Kd?c z$5Hd2=SKw2k!$8Sfqw%;FDAVO@^7$;32*3gn?>Jso6hi|IomIxQnB_NZ!I_AzbW#q z79>}ZU!l$7yC)JSh%pq7crB%Os+9B}zH|N|$LD}D<+uyK}2~3x- zL+z9+)VQOI_~`rYoH@ib+~6^8Ti7x93RcYHQ>CVl2!f6B9GPl;OD6w*$@*u|t6{6F z_^tqnm6ioDh2u)E8h!Jfa%&a%#K}3`rhyC z)4uO#alVUdvI|1fbaBruZS0qq_Qycs zZRwCr$$n<9r^{OO87o)^PKsfm+Y&>SsHcR3h$cs&{H8wz|L_!SmF}ncev^>XtBMR& zGIVX#+F#Q92N3_jq?XXPvb9PYHb{_9wjz8neu!OoSZ>~EUm4BXj9|8;Px^)fu2@X^ z?}PWq3o`}hwC_DtvK3hw1PoqvP{Wg@IOB)99)p9$r|oQ`L-_%Yi>{R>~W1YnakDkH$EkDVFaser!&|WzjbS z_yA5Ad~~oma1<13C$0nWN9f9*G(OiXG_>Mw@Wt+J8wdh)Nbl^&^IppWFo=U?en z-umqv$-b|xv+v3hRzc{6W=A`D(aa`^1^SaE% zO?68qSbvw(SecN3S`CR-JsqW!{I{}IN5{iI_ol%1#a(d>w3aW-Z;V$oj=Zg`&;}k6 z>Z1-vCx>R3a#5q+ldqaI)1;@IVnL?A@>ZylTAJgz0QQ=MVE<+t6BE<{*JPbv-ls?6 zJ_Q-I3U?%}e`3R>(hlGX2hvo(jhLH{YWeSPb^mMG?CV4BvW}`zx6;t(OsG8jg zlwV74?vRu69ITjy0if2)eTI>$RqK)+|1vmg`$#uM-`pnP|bNJ8*-DsagS-LuHk|223+!4JT zPqq&7<|dXRMi|bm+z=LT(G_dPG+8E~0OMx2VF#o1)tlsRp9#7`9dR2ZhKHShTrJv3-1b%iml5}hHB1p`A(rR*D0QNY>sl#eE!E z7b9Hk?Cv>Cz06xG?Cpq-ZMT0&UUEKlcn;X>H2?X8_OiXEub+L8xc;ZWSwxE&sl6Eb zI&H||HFPq*p;7&3)S9}DSX+}RwPukZ&n1Z6%`Ve-Mg2yipzkqAC^t9K9kW5Ub)43M)x1gV zQYRLCXOHWIV6P7s1)7@4CDW*w;_fw)*lf{*1cQPbYAuVZq9EqE&BvGzS5qX~`3az# z-=FjDizoSOV1wF)%iW*eTTYCRV)|1-%CP~qPi=n{Iv3(l;`NXs^6BGuISgof0KaCt zjaSfipkcYbv!`Jr@{cw)}dlsZeKM!0+T_<|* z940*f>$#%?fBP3j3x(*vcBCFLepY@oyyUig`yiP4DV-bzKFT7>dy3v@a=01j^_`(X&9C6;=($>R z8JEMTxwN=t3gzepA08~{BuIz2n;iby0*x^nbw-O1B11_k?*@~x^d9p&XWPo$hTIi9 z`vtX*h;!=eD(eP&CG#(EZ36DiLMGM$I4fl-?TZ<cKMPa!wr=f$zElG2O7x)wsOw)4QUl4O?)>OsV!WPnWHz$&G}ANi9C zG;ahqYPqPHswv1D##A<>C_Zb^-amQ(SA%JJ^ZXQx|ASQL*?`N zBesGM)F_toI2S$Wu46yLsCsoVoCfkk(klOOn@fyoBxNU1k>=93M8Xy!$sl>J z%znQ*vZMyzS&+6CQrZ}f-{DWPPsO39aEjfNkWQGoOyusxA^TI<4h%C?STFW$U2BN= z<#^MbgQ=JazU3jS_-*{bskR^apS1=CnsJS^i?Vns)hrXjYwC!3;Yxo>nW$>W%(({i z#~zmt>#IAkyFC~$V%x((bmILhHz`lCZBjRCy@5-uAm?0Lq&}JDrd|PDJe{pW%q!Pj zDx|80>S=Lp*kjapG&^lok|qC`e*m;;d8DR0b$EjuiC73OO!oL|lV(Iy(WE{iea$%A zMbmh8o6jCNNf=D)aQWNCYC%XbkSdB_4`Hpjq05~Il(p~_TIp!h(y`+Q>lT*J* z;7(Ql*^$iND7+kGpasKW%)6dgMx^cLs_^8o@@(prTi%PO)sj2jTqzsM2=UGXi*IT| z^^cUKF86&0n59Kr7Cze9^sx_6GVR0ok1YA6SHG@TXBC~qIsb+IhX>{u4%qYcF`&!3 z;;#-l;sYC|eDxi{+G|zw}XbtZ! zvpYuo<`Y(uC&+NG*5+_iRmqU5B4IFk_GDAQz+E<6QsNOGDBA9u<@7{&xwRm8l}B#{ zfir4%xnO*Ih`qtx40n!Fe%g1PY*Mj1Ep(4jO8BU4jZNiNsy0S4hiRD8r8ArlfujE~ zP43&2J)F)0kev~s;n9-NH&w@)aMa7KV&AM7d0NH8x>U^ zvZzP0QJ=qIi&xBBL!WDahOlLR<~H6|grSkE4?f$UfI1Q^#e>2!b*u;MH$Va2SJ&?J z1}0sy>Txb6Vj#6YY@$RASGfPU`8W3;umgZa`IPa+t&m{u2F*RxW40GvaS1lASFnBh z+my{R_1@5Z!Y~a`c3H^N>INp!IK4|5G@8gV#!^4lI#@hpU(@FPtXZL%7%6Pi?_NH8 zPxzc;u=MORSW`Q@J$Rbz858>3ZPcJ!cY&efzFgJ$Jnbo;9JURmmnJ9RhYhR#@aPkS zV~KP<0k^(*=}PZ!=t2yA>FduSXThHg>NDi}4gRpITaKEx_wN19HMvdJj*MZ;3~@0& z+Qn8TwK{~8y>dSWn8hWKCwL)Q8B3vIzP(O#W9n#SZiR^BajDwWpxO^a-lK37fz(H% z62mVUAnz{NURS|sV6bdC(LKYOcGWC&Eol50fn-yejxv?fG?n2O<=4(H`}=sz6=gve zK~gqc7?fw<)Qiv^Ene4E+Bd>$XN0x_?YZdg-wJFHIMqertk)rE5X<}jpe>QLg+?KB zHOvVHqL9{%j&-)82Ko(@{pB9+fuMd1!cbvF(rsL$1wHtxd5Ot@wbmi+DQP$a-LU)& zU%bxW(_k}oa4G+gmE!QM8Ap8d6$ImI7dmlVDA_Vt7pG+jr$fkHkWlR7@U4b~Th+y{ zQD%w<)Hz8lu=2Bvy$LZN{Vub4eua)sTNGw35GX zz;c8XiL-R2aQ=nA(0O}oO1oT)CW0@}GnI3>+1$VzI0e4xQv!jay^Xyga~q=uI32^X zGhgDaYmprbpX+AApOu+P8S49lhXTQ2a2=<7Y+gsd*13HjtJ+n-qU=JC=yzM0%6k2} z;Tr+%*aeh&4fe?vrzv%D7Xa1f*5<_$*|ir8dpeDHLMWf!8p09XpZQpcUd=j3t;ZPl1NSN}7~8DZ&7!h0k3+05AMp$<{B{7JN{fTk!Dc13BJ;#Yj9#ew_^5DUAK zu!Z#SU)CTrte!Aq*3_(`h++R1J!UiXIY`io&UnQ((mbqB7cYmr zD;&0&9Y6OqIf$Gi-X(-a38s0a^{040)inh-S6O!>6%8Io{9RYpogrm6Sf*v4p{;9q4Vd(4UvUo|TpfF3aZL;k*O!@s^f=HSA-lEn| zLN1nC(R6T7XJ9HIDkQRmm}3NIT3iK_eva0o-s$=X-Y6Uid17&7^j~Gp^G`^+*O^EWrdYN7mNir`C+z zhi%)dnS0l2!}H3McBEnvIye!S_noG^tx{)p2n6UaL7DP9j(D;g?%sHY&W8XN9#X}$ z0c3<@3}HGFq1qO`CA3RlNuhJm&%IJ$!Q#vte#9x$dvLF&<%yVaa#HQvt<`$Ws`FLy z5zDud-#aCtQAG6t?gO-a2}jrn{2 zxz~@(23_ObGfW^QKU7lC47GQOu@r;BU6}Hu{yAl+2hB}dSyqtaC&8B`GGs^3tffvU z66u9_(nEDC_F`XZtgmzU-U_D&GUd4TAz`OHXEfh2Tid5tZ5V#Bao71wt>u z1ocL30*>kOkcXy!Poqr6n^JrB!`6zW*rS=!_0T67m?ubr z%XihRY?e7`(4@;QJU6J=3hbcl3f<`*{WUXO_q+tyV8&wXvh3+P23B{U>9Js z_hOxcH6ZPPE<0c19L(YX7ludx%A66^=ZpDyaqMzjcW{J2<0IVc@?x$ihdy2FUB$or z9Xg~008|N$;0xS`wHb7SS;bsaYIVtmsBf#lO26OizL=0P_mt%ADYZkq=rNr12TgKrc(Ne~Cgmd$fG4+_1x?PIrdWyyGsMO?=^nymsTpyw! zt#*8&6X%S$Yelcua6Bsj5@rLe)YG)_Yh#Dz9>1sEkf+m@9~xu`V#f^r;=Tsdmpt{= zC!>yQz6%W`=7Avap-^2~d82Gp*Yq{CYv80S)3G4UEAg(XO!b_fT%mb8&5qy2Z|gRE zp43e=FA8ryj7SayYZ^#e1hNuHdII!kRI2)yA(HZt0IQLal@GCHh3wuclo~DIs zg-M-2Rc?{8pIdr3bmG0djF{TYx{U(HC1f+F%rB6U?1BmRE36WO-(RfB{0r1COV0DZ zMfr0@GCx9k;>E(U*;j?a8D9muNu0ppPD?|cEctFW=RCV;%@D$mTJg`hNLMDAX(J^!XDH(d|s zUCX5U1{TOD#0LcA`M9+N2RB7n|77i!z}0k_*ultvG<;rVh`q$J;BN_5wC;e8ffctm z3`Q)x_REqSnbvP(oHlD=+&!zI*P0^Yu35JdZbCX5pV)dY?L3?gntx8ronTY+td3S@dFuER1gcYoOvchhKR13+x&^&&Iq4>#Pz;S%#Ws6 zh$gJC!M*(x=1Su%Y%x&#m*{dFv@lh05cCEFeipfY__Nm|oK|^1YkH*!EO0J)4bhvm zQ9}1eEqN9ko@-QhFmfa#OL~CwnzIt==m3`!_|BXcbO&+I{~+V7_MigHYZSLL^|MnN zgVY8Wh&1&kYHxp|F?d?>XFXN(mgJe3q{@iBBz)Cf1`em~>oNc2F? zTlZqlur@2rOrJY-3U>``&*Kv|;-V_XjszoZ-VJp;@JOd!BXb&#PHQcAv0H#Oo{O95 zdJXC&|HbC)fGGoCslZ3lfvtwg@Eh47-n{u8ga0U#Y0OJPYYi4%n21YXSpa!>R z{)QfFfZE24@G4Ey5RLTZY}q668@@7w;y*u z?z5{Vc?WDV9DPB%`yYh7og$pTG@VE-Am8G2Jx&k^13Jbyz6tk6OjEu@YxFw^!0Z zvi?=WvWd|ngmUiG>fVQCes1u& zB^h%T+9AEi=K6-&kUlynlu@~P@Ejg-;ko#>BFUE%DIHqt??`ANmI3lDB7b9rRC3OU z$RO@M;g@F()h~Gi&TFwx&s|q!?#|R)&KJuZE5ijyfa7x1Z=>Z+1VsjadQb8JPy_}% zVv(P$7S}B~6@|SShadN~1r>2@&}Lyb3U5Va{}InQl?awFc-sgs7~($(gAG5q{h|l# zG^&0)c=pmvrn>Pi<~whX+CM$=0+L%vx;rRp@jfS!|6ms` zv>^QEq763%v0iF|?p>Qgl_@sefKfspPE(Ems@)flaM#SY5Vq$9XA^foYKXZlM4yF@ z5}Om4Ey@~K;9MpEWV0E0Uma<>T}}F=p7afXN1AR>887q_Ibd6yBV4AZj2yE0h0Hvi zJb*{|pQBR?RK0H<3%cKUEb9F4IR*p^mOH~>WEnzcM%n{aCyuR`c4bfL{l9fIFxI(F zO^hIyM??RDRYL(wtLLN%xD@_Fhmw^pN$Ss_XNY~0F5Atx8<>na(_v`_o0f#! z)Ctv3-uW2Pa%Yw*&ze;p_P2`|+q#1pzJZ6ErNT8I+<$5hH3yemKoS(Wi)Bfi87=_` z<2wDr3xu7&OtU#8IJFmk&w$VlVQm86RnG8VfEyxt_m_r?*-N^&Z9^FXn{Ubpqn;JU zosb}fcfF_JzHQC-v((k5?jfY?BGReM_W5atm~o)o&3=pNJ!8IW$?;qfq{@e)+Zx2^ z8>LC>H36_cC*B8g<+O1>usj|8GemRzX|m}h6Yj0Hs`>@RMCqM{-J;$#?*W|#>coXt z>@U4xXwj8H{2EfFUjk&SBx}q!$NqD+Mf6olrkuJPavYuvaoQ8VjJ{s7Ny&|VA23ji z&xD+06(!qSa@YG-GBHi!R@6Y^JHw7ZvihY#nmPwAbyxySSN&36$y_~NKA7>wyPnosxlf$W`%BuI~M2%&x}qdR9&p9AQ#vYGP!>;uVQ~_UI0J3&2vqU+II(D z!GCE8%9#JF$LB zYv)3`ZF?$rD4I@V~1Ch-9xO?t!7nFxBeG(jaG^ohF^jatH_=|*aNMN%IiSy?Kh*eqN}E>8$J#(80G!oS^eC+(I-IS{T?U?YR}#N?dgKNv z_aA`MT#RWeNwc)*d!mRM|G|xhulTb~u^j0!E$1BdFLb`-#>s^>Rc!5mg>I%;8xLN|g)lL@{n4IF`^%U^bUh5VscU z@=g^U;-^1&?g`a)YF>kyVb5;ULQ$M560d>E1PA z=@FpHI-{MTHj1Oap1#BCb60Ek%!UuR0M!;tIQI$c=UU(N}lUsKvTd6B-MIxX)@&mRWH}0)s6!d09;{}zP90%)@qjGkNx3FOJ z?eWVew9V(W?>v2F9=eToHLQqmAZL;^Z?5lC1vBd^epr+=x%{uikydvf;{+l^<-#v|e z$S;Yold(?u7&KFLpKl*=q^;oL@cHrd`bbHm=2{7?3E~dl&n^2le@;~d>Q&>;PJ95b zx8aiKs`Ha<-t2Q`3d?b)+^4`_^d?am$N+6TWG+iLO5L}~45G?3L%OPtUC?l;Fg3*g zSNJ69vq=*Q*DAlMrGQk>{pdGy76VtmgdWHE4W+a)IFVHW^fvYw!vnL5yXCvzT3L)J zv79u9?nq;AGgC#mY-YoUBP(iKZN8Rljau)t2D>75X2V;5?Y=qtPX%rsrn@xnjH&;P z<9kAX=bjxOr@mI7VEk~D=S}oe^+SI<3_W4E2&?Dy*ah8}C!#yjGr@i3Lnp1JHCN1d zLp6A5y9{PTc%U65TmYoTcj^iPXqhj;Qj22JfoJGt1d71=dyor) z4{3B)4Cof^gokPj`P1tNIHc-EPW>zSFZxE|-rR1kS^RC3$a6Zht9v<<95UDQ#zI4| z*nc-MaAM)nx7 zMU}@l86*Hv|G@bn2duHaAZARY=qGc=FwU=%v5S2P7au2kc}A2ZHXlt+B8^N%uF0X5 zT(5I0WF+8Erb4e5aaJRifm0})opUDg*i4;=RF6&ZXtr6U21|Ep#?$b*+i`~`PDx%> zKcEaAP^~g`zg#HgLi8oUN1&zP4%KBqSoSlNVo%T2{+d(sUXsbuR#)Mhrls@JmkW0o zCT}pYlv5ps^_aq(4yl(P6-}34DCT}=1wE93xhHSH`k{3}Ly+>z2ErK7$XoyX46g=Z z6F9Bfh`-V1A1$ZB_h+vU;FSN1ExOEFVrEnHS&Mt(HSAwgRe;=T`gF=q^FAJvv#c}j z+p>jyW8fzM4s&8y+wCWLuGGp0j2#4YXL8Zl>?q^5yqQELNuQSFNBcfx7%pr!Z-&8h zsiw4;zj$i-aRnA-2i4YE2vQH%uaRDPrmxzB4;KRk}GrK zsTZyudN)o7_@!&#p^RM-J%;ZDD0B2;-VZO~BUPA{pP_0Y@>nP*zK{7BdzShVwTZ%t zOE5Db4Oz;4vjYQ;D8eD*okhl^?6(;CErlC(iRscI*C5Z!2NwmElXcn=Su^v!(rfRB2DacMHggZmL5;IA zR4mk7fBBHRRv+e>Zb?5g^60sFhX4Z=S}dQB49xXOCEQ?xGr?cxo(wbWoMpjC?V{kN zT43R%beGcJ=T@lZ#yExV0de0F0&$y3rHR|IHGlTGvm%Oa0zMUhtE9%b8y1j;(>QlyWa zmBM1^%QxpfQ8#n;PBCf614L|Fel*sbd!~voHmTRnhCd_mYPSgs3_cQ+Vhte?ERJB` z@Xziom2*|uo^&zp1n9Sc!*uT6dHLnZ@!iBl)}TB6x%ho)nuQ1&Rx_P?<`kwSYY{qb zOuet!Op)B@ZTYU+3ch$zVUYHx6tQ`!@vQno zC!{9KD$EhsSTs|3cUbscOVfKdd0zTReA0CJV#~tSVRq|!x^gQ1eaD~db6xCfak>}0 z``sB!hdFbXGwii&dac1;7y!Ly0iHH4Lh|d*O4UyIiph89lI^Ux0VlaNsc7@88AC`Ey&h)8eJ zq>F$k5>Zg8p$1WE0BNBH1Qcl^h(>~Dc8n6LMZvjDCfDqDNIQM!kdhdJp zzVm*(wMW{>7;}!Xzi-a@&Ar!NYbna!tn*~geI1r}Zv&>wvh#FoXi0Q5f%;S)RY-l{ zs*{Fn$P>RPA5?KxYg()ioK;N5Oo(!1d7&R=FR{5qs6%>3Hn4CT(EB3b=m@W9)qC4DcN5qh2>$@rMAlrL{QN3a`~$E z6b*e8S4KTHC!i_1Oku1=B1Lj^m>C6Q%tzt(fjfvrMwo`}4HiLNF_K_w|xqaz- zKiHn#S8PM%^ZmAbj=;A6h&BSLC>6>&aGgq>~e%Kgj{V3J4wV7lVsc2+{~ENwjgJe~9LXsvK#ua_j0!Ei zUcNgfu@T9~Ui(EO#MyR`d%h*xAF~PUA-OXq|M<$;8c)U+!^3sIRSHKG9msC4Ly;6l zZr@4A`6h)QpzYTc{?1$#-gBylQ5EuGDKiCL!h)bY)8O)JMt=uR;Nsq1MB%k|vwAl}aG@sR65XPGLYy zU%7yxo&l}``Xy8sF_v(?#%UE!!7EM&sZ;@Jv_LB#8}td{NBu!`uGALyLi^gsnf8v8 zm{(gl-=+{gqGpgdH<-4;snM}Yy3G5whNcc;M7G}^`_wBFk|ae(bh-UvjQ%tw0X$$D z;`p73pyLzal=VVfmmN3yFdPMWj5t|mLfbQDV#`94T=pc#jSfip#6+|}I5DmbRh_sl z;AR=-p4xTmk3#)8C+)Bb#zWpHelme+RK6b>hFxh(L{b{6OOf1{A9v_;+$R_g%TUk1S>-g}5yOOP#T4vT}&b2SQV&gey$LNZis+|LLQfSuY8NjF6((R*A{@Om77=O=Xgvt+7#?JakHf;X)P(^rQ0S$d&;PS$6RQa{6*XA zD79-Tv;FxuaKY5<2@li~K!c?&jd6LwG*ew7ehJ#CwbZ^j#)K=$36jFZC5;AHe5_8Q zMq*9evWLoh1RBVY`JpKE`+$3Ydx#-|@g&&_c>W0YZA=qUeFW+IGSGm+Lg6K%Csl%y ztB=$YgEkkKDph2|Qwi(dU(dn4v)4Yq&{m|Vt@{43Z_CD$1$Ccfw>};KZ|tHN0gczz zu{H9>nAgLk5kNgZU_>$k0QoPL`*e5yDp=)UZ-c}os zS~&!BIC^Mu=`0%aJqC`NpAc}>op}get($-dP_9~srxxB*PcCYbUy{PS&A766(*_rb z22*CPMDw&INs%j{iYrEwH|5B~uM6C*h-do@%6&MFBw?Rb*>03&M&_WtSLqlyOW5cC$r zO&#^sZ}tTNd~HlnZG=Zvafz!=2E@CQIR#qBLuI3#_|&5EUSD3r-IS#5+pXQLX7v8d zM5#t)u2UtTxDW>yAX&+B!@c5-f%btDP^Cap&GZNB+5`d}!>29g(@KwCS(accu5TfY zUne)dBcC=m9a<6kj;5%C+c_$VPSlZhgb%G51C+HnG8LE`3<0GGN}aQ-^CC1y&kw=xbq z*BloGftY~bj7JaSaCo-R^hsK&u>oD@t-~N)aX&(wdj_y_N&9nwf;XiZ2AUl_*$WL1 zw*B~)mf>DvRqb9C7Zg$O=;7N}=q`dv?P+kQj*KjIUROF|s#NDnvuOEoishP5SbE^A zW0@(*6N@s}oE9{fc_Z%VIU)SCLeVI+8AWGOTdIaF_&dKQh9bIE8v4agV&miEhy1#` z7V}H*q!WG=5Q%nAY~8w{f?<=;H=_f-?Qu>ujSfX|ghacCWEOhhCUD_M(m4q z)ETT-SC6NbhcBq)^;i}Yvln!OzLOUpJyDZpftn@S(Vq;VX9`5A{#A@s#H|*2TOD-< z?k4mrQbgMkb@#~p;jEjQW@HOejjZiB9+f#tvI(!bz}Q*qYv!c)R^r0tf&KA;w(htW zhGtG--&o}a##5A+;-M=((nMJiR-y4haZQvaG=plB6z>_dE zF~v3tQ7%mu)fypSUQnH;o|GnJR_T%x?VWRH-xrgvS8+|)R1Oun2i~h=_1*n~aYYq4 zBzKucC!C@io#wK#SaZo$?x3L~a>y&5YST&QJET6?sN|H+@WX6aW^ccB_PL4E>TW2_Wyeaw!?!oRXeIZsj)p1A0^>M<6v?(oH%Eb9Z+UqAS+zut}Z^<8Rg_0m%C4|&?iP;Rl? z4aBfT_`4yosZM<_iiM!{+Nn*(bgl$K05I6UEQ08DsmHm3r6U*x-rUv3Egi97s&y5k ztLp3YcVZ!M zKKh5RNF(hVh*n&Y_BfGQps!|_#P4Z6RrQ-S2 zp}IP|Dnvx`7!$Fz63(3q`5{3)i02BU$ToW>$@lLbdU@`a764^hxnZI^l-FxF<`hyI zG0RM9Nii6d7^{hFZQ-s>rjW5@Zk<%<^oP=EzaHe7@TbKHed_Vq&mvp3k2YB*m=flv zN%sxnzalc7JAI3|19gSLr?pshB=6^&Ct9FN>jwi58MMXnQgFo|bW*OlP`q zj*UIc?~$&?`CR}&BnWWf9FTb*fSqvw6Fy%C{C)5+0|18shXDQ_E^w%mqrdYtc}IVL zS1*5eZ!f5)qu&iVPgj3OIUj#Z3r+yr<-PY@^g{2uY<~s>aR8W@*O>qShFJiBV;Kf> zbQl|Rs0S#4JU|KJ;`zbiS)|~Vs)Uxokg9y~ge}Pg@q`eTK@Bf6Q;@kir)fnj=m2k_ z9%qYC0!w~uV!JrWuwBTuABm$|+CDtw>RRnZn@#J8L4|YwGp#Ygg@FWxnE5*%#jfyl*BYdfI&Ih`O$) z4#-n;5n6a1B9{N=s7l>&@d|8r`<_{DxV{<8#KcSrt5NheSzMpy+%zxj&T3HDBY93% ziuLw8jk&ZLL1(#?VFCQ}I3=d@ZjCo8`H=@M*R1QAzk;Hy)3YAjv%1fo(QWGMckxK0 zBx@_|)-9OP46D%#qg%&dt*xy5FjfbLYfdiLuY0*WdN^M9Z~(xkO}T*FoH?2&>Wf{N zHTW0Jrm*P^J{lPRc(1*1E$q_=aN#(U8}Wb5g>)bQu=iIk{0@F|$iD->7W5SK8`{wz^VyuJ{I@CkV$mEg4@;IF z)eys|mq}HmC{`VK|~p5L<%ho|x)= z%v$~`+wHKY)P{SX_*P54VOD}hmSuvtUSPA^i+37AX^Z=S&dLS`%3^*%u}fkGe9F$w zKn`V~f`XE&f|7zF;PykohX;_hl9Rzh_P|*#&ezgH`bn`hd>Uy%ind8xeZEvp;$-x2oFIv&y@D0Jb-9y^IQZqLm9&7$X;MZ1_rJUmWbcnTIE7uTS_5a%C23m_IlQWcB%ieXl;Wd8o&!@P?T=Knsa_NZE(v zRb{*XoPWRD48BTZE_GJ;w=h43JgNF?m|xxHZ;XK5esoVs(M(Z6Rr$mTWhFHQB^9L; zipr|0uN9O5Km$KzXFg{h9QH%A;MCdsVyz`Ak&C^6nl#?2tM4>Zb1xlenhyGKxLP0E zcL?c(HD6$D__(Xu1REt4>vU02?DD;w)OathHpT~#r|!VA26$GTfz06!mZE96j% zVY#d76w|xkl;^<9WRcS)PPdkY8k`C}c(i34)_E%aV8UF{nad~!6SndG2NMoie*H7$ zGb#W8{690n)6>!CkAyD*$#VJm`YqwN-0zfbHs9A4apWk!C0A7>+F z2$QWuihF%$mDgS_l#ngXJT_{Hm^8F^D~v_LM_kv`PVNtj;(A)+m?%ci_*8I)(ynl6 zflqazoM?S7tP8W};8;H z%_F-D9O`OIAdN-#_u<9D4`w#G0{4PBJ4FJjj*ZOJYm^iHmSbHW#w?_**Uc4=vzI9lu`4AjUTQ{~*Rr zE%2I=JcNEFhKHk*tB2oT$PGt82#`jCNxnwQ?@7THWbV^jZ()PY;ceLS=>jVCYKk@X zfo&Qz9o4WOWp%&95<4Y3Ry1%KJ7kUBYk@t`z-p0^W}T6Rvc|GnvW>&{zcE$-ir?cn z%b{V-p@G~}RS$cSZn3L->ej37E@?&Oaj7ZE&kQ+&#}2$TEE0an8UyqRF-VW%Y&mFh zUp$}0$T5Zt38EJ<<&u#;^(;E^Mn#y+ZaDa${bY=gjCrCoqw@E3;r#rR4EOBY8>uLO zUA#m1V2~C0LV3KxCPAOO#Y_7_`m8`6zw5CXm&@WAfMH&A*2K&+!{|$(#BLC0cVZW3 zqUl}G`L4vfoHAXY`ug5x?59sd9~uW5KMm9araH2OSw#=%1zwA^YiEbc3_ZrC9*!P| zd(5#9GfyoG81{t;cQXg=dwMWRII;1#QM{s(!yQ$BU`p7_7^nK9CxEZx!XniUKH{>7 z6E2gpRpU6o3Fl(Js-S-@N9QKbk2cV_a&*hp!9yWe>%Skq{s;KsZO0-r7~pS1{I9{M zF#Zw#6?}Iup8$WTyUSm}e*!cF$_i$AF20f_`A-yB0GadrKTsgLL1=g<1vV5SQaIpB z=gz_}>i4*BlVFuW0(`vUgD0AKG8He?m3495?eU;CLBFU4jJmvb`tEBd`Rr>fh06PW zd;uEfhx=YPhdG(SES+}uis+fBNd!1Oc5?85UAqQz@p5pv?sClu=HcPs2m`R*I%f3Z z#Y_f&EB}jDR@OsTdZrYXdjZu3z=g9$DbC}Ir;|*5L79_h;%09hGLB-t&D5sI8I`xo z|56I@5~w9z!3cCICpFHC_01Ja^R9O>?yMVU*q33>AEU0PIe&>gSRZX9t$1p7&oOq+ zPivo1J*jXDGzd0Bz z_&I*)Z-3j-9Q;`oME_nHeya*U4R7}u+lHHpzPe_zF>Ek8kZt+#b3BVOVSL?T_@BCt zUuqCv-{0!czwx%S-#Pe;H#0`o+u>~|wg0b<1b=a~Y`JTvqdybq|JKWPV%{m)x0|$| z<5^U4F7r+=|8(;Hqn2$aR@?4&ipcFg;OBT2_2~I8?*7BB;CK5wrOB^c2+I4#{$CQN zJH2f)Z)d&RmODSkv#99&|IynoNc}hBXQ#96Q@1m}ZkN!XgK_E#esT6EAN~?TJ41Lo jS^pf*q8=3e;$|nm|3gnKtlQT1Fb-aZbVQXf-T?mv1N+(# diff --git a/test/test_cost.py b/test/test_cost.py index 6153e1ae712..12c2a6b6d6f 100644 --- a/test/test_cost.py +++ b/test/test_cost.py @@ -5985,8 +5985,6 @@ def test_ppo_shared_seq(self, loss_class, device, advantage, separate_losses): @pytest.mark.parametrize("advantage", ("gae", "vtrace", "td", "td_lambda", None)) @pytest.mark.parametrize("device", get_default_devices()) def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): - if pack_version.parse(torch.__version__) > pack_version.parse("1.14"): - raise pytest.skip("make_functional_with_buffers needs to be changed") torch.manual_seed(self.seed) td = self._create_seq_mock_data_ppo(device=device) @@ -6018,21 +6016,31 @@ def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): loss_fn = loss_class(actor, value, gamma=0.9, loss_critic_type="l2") - floss_fn, params, buffers = make_functional_with_buffers(loss_fn) + params = TensorDict.from_module(loss_fn, as_module=True) + # fill params with zero - for p in params: - p.data.zero_() + def zero_param(p): + if isinstance(p, nn.Parameter): + p.data.zero_() + + params.apply(zero_param) + # assert len(list(floss_fn.parameters())) == 0 - if advantage is not None: - advantage(td) - loss = floss_fn(params, buffers, td) + with params.to_module(loss_fn): + if advantage is not None: + advantage(td) + loss = loss_fn(td) loss_critic = loss["loss_critic"] loss_objective = loss["loss_objective"] + loss.get("loss_entropy", 0.0) loss_critic.backward(retain_graph=True) # check that grads are independent and non null named_parameters = loss_fn.named_parameters() - for (name, _), p in zip(named_parameters, params): + for name, p in params.items(True, True): + if isinstance(name, tuple): + name = "-".join(name) + if not isinstance(p, nn.Parameter): + continue if p.grad is not None and p.grad.norm() > 0.0: assert "actor" not in name assert "critic" in name @@ -6040,12 +6048,12 @@ def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): assert "actor" in name assert "critic" not in name - for param in params: - param.grad = None + for p in params.values(True, True): + p.grad = None loss_objective.backward() named_parameters = loss_fn.named_parameters() - - for (name, other_p), p in zip(named_parameters, params): + for (name, other_p) in named_parameters: + p = params.get(tuple(name.split("."))) assert other_p.shape == p.shape assert other_p.dtype == p.dtype assert other_p.device == p.device @@ -6055,7 +6063,7 @@ def test_ppo_diff(self, loss_class, device, gradient_mode, advantage): if p.grad is None: assert "actor" not in name assert "critic" in name - for param in params: + for param in params.values(True, True): param.grad = None @pytest.mark.parametrize("loss_class", (PPOLoss, ClipPPOLoss, KLPENPPOLoss)) diff --git a/torchrl/data/rlhf/dataset.py b/torchrl/data/rlhf/dataset.py index aa8f02d98cb..09086bfad65 100644 --- a/torchrl/data/rlhf/dataset.py +++ b/torchrl/data/rlhf/dataset.py @@ -137,7 +137,7 @@ def load(self): data_dir = root_dir / str(Path(self.dataset_name).name).split("-")[0] data_dir_total = data_dir / split / str(max_length) # search for data - print(data_dir_total) + print("Looking for data in", data_dir_total) if os.path.exists(data_dir_total): dataset = TensorDict.load_memmap(data_dir_total) return dataset diff --git a/torchrl/objectives/common.py b/torchrl/objectives/common.py index 367882a5bca..1a99ffca108 100644 --- a/torchrl/objectives/common.py +++ b/torchrl/objectives/common.py @@ -10,6 +10,7 @@ from dataclasses import dataclass from typing import Iterator, List, Optional, Tuple +import torch from tensordict import TensorDict, TensorDictBase from tensordict.nn import TensorDictModule, TensorDictModuleBase, TensorDictParams @@ -288,11 +289,11 @@ def _compare_and_expand(param): # set the functional module: we need to convert the params to non-differentiable params # otherwise they will appear twice in parameters - p = TensorDict.from_module(module) - with params.detach().to("meta").to_module(module): + with params.apply(_make_meta_params, device=torch.device("meta")).to_module( + module + ): # avoid buffers and params being exposed self.__dict__[module_name] = deepcopy(module) - assert (p == TensorDict.from_module(module)).all() name_params_target = "target_" + module_name if create_target_params: @@ -445,3 +446,13 @@ def __call__(self, x): x.data.clone() if self.clone else x.data, requires_grad=False ) return x.data.clone() if self.clone else x.data + + +def _make_meta_params(param): + is_param = isinstance(param, nn.Parameter) + + pd = param.detach().to("meta") + + if is_param: + pd = nn.Parameter(pd, requires_grad=False) + return pd diff --git a/torchrl/objectives/sac.py b/torchrl/objectives/sac.py index 0752acb3be8..d0617dedc74 100644 --- a/torchrl/objectives/sac.py +++ b/torchrl/objectives/sac.py @@ -370,7 +370,7 @@ def __init__( self._target_entropy = target_entropy self._action_spec = action_spec if self._version == 1: - self.actor_critic = ActorCriticWrapper( + self.__dict__["actor_critic"] = ActorCriticWrapper( self.actor_network, self.value_network ) if gamma is not None: diff --git a/torchrl/objectives/value/advantages.py b/torchrl/objectives/value/advantages.py index fee53b5f4d4..f3aff0da1d2 100644 --- a/torchrl/objectives/value/advantages.py +++ b/torchrl/objectives/value/advantages.py @@ -294,7 +294,7 @@ def __init__( self._tensor_keys = None self.differentiable = differentiable self.skip_existing = skip_existing - self.value_network = value_network + self.__dict__["value_network"] = value_network self.dep_keys = {} self.shifted = shifted @@ -471,6 +471,7 @@ class TD0Estimator(ValueEstimatorBase): of the advantage entry. Defaults to ``"value_target"``. value_key (str or tuple of str, optional): [Deprecated] the value key to read from the input tensordict. Defaults to ``"state_value"``. + device (torch.device, optional): device of the module. """ @@ -486,6 +487,7 @@ def __init__( value_target_key: NestedKey = None, value_key: NestedKey = None, skip_existing: Optional[bool] = None, + device: Optional[torch.device] = None, ): super().__init__( value_network=value_network, @@ -496,10 +498,6 @@ def __init__( value_key=value_key, skip_existing=skip_existing, ) - try: - device = next(value_network.parameters()).device - except (AttributeError, StopIteration): - device = torch.device("cpu") self.register_buffer("gamma", torch.tensor(gamma, device=device)) self.average_rewards = average_rewards @@ -675,6 +673,7 @@ class TD1Estimator(ValueEstimatorBase): estimation, for instance) and (2) when the parameters used at time ``t`` and ``t+1`` are identical (which is not the case when target parameters are to be used). Defaults to ``False``. + device (torch.device, optional): device of the module. """ @@ -690,6 +689,7 @@ def __init__( value_target_key: NestedKey = None, value_key: NestedKey = None, shifted: bool = False, + device: Optional[torch.device] = None, ): super().__init__( value_network=value_network, @@ -700,10 +700,6 @@ def __init__( shifted=shifted, skip_existing=skip_existing, ) - try: - device = next(value_network.parameters()).device - except (AttributeError, StopIteration): - device = torch.device("cpu") self.register_buffer("gamma", torch.tensor(gamma, device=device)) self.average_rewards = average_rewards @@ -883,6 +879,7 @@ class TDLambdaEstimator(ValueEstimatorBase): estimation, for instance) and (2) when the parameters used at time ``t`` and ``t+1`` are identical (which is not the case when target parameters are to be used). Defaults to ``False``. + device (torch.device, optional): device of the module. """ @@ -900,6 +897,7 @@ def __init__( value_target_key: NestedKey = None, value_key: NestedKey = None, shifted: bool = False, + device: Optional[torch.device] = None, ): super().__init__( value_network=value_network, @@ -910,10 +908,6 @@ def __init__( skip_existing=skip_existing, shifted=shifted, ) - try: - device = next(value_network.parameters()).device - except (AttributeError, StopIteration): - device = torch.device("cpu") self.register_buffer("gamma", torch.tensor(gamma, device=device)) self.register_buffer("lmbda", torch.tensor(lmbda, device=device)) self.average_rewards = average_rewards @@ -1113,6 +1107,7 @@ class GAE(ValueEstimatorBase): estimation, for instance) and (2) when the parameters used at time ``t`` and ``t+1`` are identical (which is not the case when target parameters are to be used). Defaults to ``False``. + device (torch.device, optional): device of the module. GAE will return an :obj:`"advantage"` entry containing the advange value. It will also return a :obj:`"value_target"` entry with the return value that is to be used @@ -1142,6 +1137,7 @@ def __init__( value_target_key: NestedKey = None, value_key: NestedKey = None, shifted: bool = False, + device: Optional[torch.device] = None, ): super().__init__( shifted=shifted, @@ -1152,10 +1148,6 @@ def __init__( value_key=value_key, skip_existing=skip_existing, ) - try: - device = next(value_network.parameters()).device - except (AttributeError, StopIteration): - device = torch.device("cpu") self.register_buffer("gamma", torch.tensor(gamma, device=device)) self.register_buffer("lmbda", torch.tensor(lmbda, device=device)) self.average_gae = average_gae @@ -1403,6 +1395,7 @@ class VTrace(ValueEstimatorBase): estimation, for instance) and (2) when the parameters used at time ``t`` and ``t+1`` are identical (which is not the case when target parameters are to be used). Defaults to ``False``. + device (torch.device, optional): device of the module. VTrace will return an :obj:`"advantage"` entry containing the advantage value. It will also return a :obj:`"value_target"` entry with the V-Trace target value. @@ -1429,6 +1422,7 @@ def __init__( value_target_key: Optional[NestedKey] = None, value_key: Optional[NestedKey] = None, shifted: bool = False, + device: Optional[torch.device] = None, ): super().__init__( shifted=shifted, @@ -1439,11 +1433,6 @@ def __init__( value_key=value_key, skip_existing=skip_existing, ) - try: - device = next(value_network.parameters()).device - except (AttributeError, StopIteration): - device = torch.device("cpu") - if not isinstance(gamma, torch.Tensor): gamma = torch.tensor(gamma, device=device) if not isinstance(rho_thresh, torch.Tensor): diff --git a/tutorials/sphinx-tutorials/rb_tutorial.py b/tutorials/sphinx-tutorials/rb_tutorial.py index 5237b344e56..be6e607c1b5 100644 --- a/tutorials/sphinx-tutorials/rb_tutorial.py +++ b/tutorials/sphinx-tutorials/rb_tutorial.py @@ -46,6 +46,7 @@ # replay buffer is a straightforward process, as shown in the following # example: # +import tempfile from torchrl.data import ReplayBuffer @@ -175,9 +176,8 @@ ###################################################################### # We can also customize the storage location on disk: # -buffer_lazymemmap = ReplayBuffer( - storage=LazyMemmapStorage(size, scratch_dir="/tmp/memmap/") -) +tempdir = tempfile.TemporaryDirectory() +buffer_lazymemmap = ReplayBuffer(storage=LazyMemmapStorage(size, scratch_dir=tempdir)) buffer_lazymemmap.extend(data) print(f"The buffer has {len(buffer_lazymemmap)} elements") print("the 'a' tensor is stored in", buffer_lazymemmap._storage._storage["a"].filename) @@ -207,8 +207,9 @@ from torchrl.data import TensorDictReplayBuffer +tempdir = tempfile.TemporaryDirectory() buffer_lazymemmap = TensorDictReplayBuffer( - storage=LazyMemmapStorage(size, scratch_dir="/tmp/memmap/"), batch_size=12 + storage=LazyMemmapStorage(size, scratch_dir=tempdir), batch_size=12 ) buffer_lazymemmap.extend(data) print(f"The buffer has {len(buffer_lazymemmap)} elements") @@ -248,8 +249,9 @@ class MyData: batch_size=[1000], ) +tempdir = tempfile.TemporaryDirectory() buffer_lazymemmap = TensorDictReplayBuffer( - storage=LazyMemmapStorage(size, scratch_dir="/tmp/memmap/"), batch_size=12 + storage=LazyMemmapStorage(size, scratch_dir=tempdir), batch_size=12 ) buffer_lazymemmap.extend(data) print(f"The buffer has {len(buffer_lazymemmap)} elements") diff --git a/tutorials/sphinx-tutorials/torchrl_envs.py b/tutorials/sphinx-tutorials/torchrl_envs.py index ccb1c9d4ea7..ef995030c9d 100644 --- a/tutorials/sphinx-tutorials/torchrl_envs.py +++ b/tutorials/sphinx-tutorials/torchrl_envs.py @@ -25,6 +25,7 @@ # will pass the arguments and keyword arguments to the root library builder. # # With gym, it means that building an environment is as easy as: + # sphinx_gallery_start_ignore import warnings From 38d9cb7c73f9382ed6e9d916c7609a222aa9dcab Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Mon, 27 Nov 2023 13:30:46 +0000 Subject: [PATCH 16/21] [BugFix] Make casting to 'meta' device uniform across cost modules (#1715) --- torchrl/envs/transforms/rlhf.py | 2 +- torchrl/objectives/common.py | 26 ++++++++++++------------- torchrl/objectives/ddpg.py | 2 +- torchrl/objectives/multiagent/qmixer.py | 4 +++- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/torchrl/envs/transforms/rlhf.py b/torchrl/envs/transforms/rlhf.py index 48464d9f9c4..79ee94318cb 100644 --- a/torchrl/envs/transforms/rlhf.py +++ b/torchrl/envs/transforms/rlhf.py @@ -112,7 +112,7 @@ def __init__( # check that the model has parameters params = TensorDict.from_module(actor) - with params.apply(_stateless_param).to_module(actor): + with params.apply(_stateless_param, device="meta").to_module(actor): # copy a stateless actor self.__dict__["functional_actor"] = deepcopy(actor) # we need to register these params as buffer to have `to` and similar diff --git a/torchrl/objectives/common.py b/torchrl/objectives/common.py index 1a99ffca108..76e5ef10900 100644 --- a/torchrl/objectives/common.py +++ b/torchrl/objectives/common.py @@ -289,9 +289,9 @@ def _compare_and_expand(param): # set the functional module: we need to convert the params to non-differentiable params # otherwise they will appear twice in parameters - with params.apply(_make_meta_params, device=torch.device("meta")).to_module( - module - ): + with params.apply( + self._make_meta_params, device=torch.device("meta") + ).to_module(module): # avoid buffers and params being exposed self.__dict__[module_name] = deepcopy(module) @@ -435,6 +435,16 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams return self + @staticmethod + def _make_meta_params(param): + is_param = isinstance(param, nn.Parameter) + + pd = param.detach().to("meta") + + if is_param: + pd = nn.Parameter(pd, requires_grad=False) + return pd + class _make_target_param: def __init__(self, clone): @@ -446,13 +456,3 @@ def __call__(self, x): x.data.clone() if self.clone else x.data, requires_grad=False ) return x.data.clone() if self.clone else x.data - - -def _make_meta_params(param): - is_param = isinstance(param, nn.Parameter) - - pd = param.detach().to("meta") - - if is_param: - pd = nn.Parameter(pd, requires_grad=False) - return pd diff --git a/torchrl/objectives/ddpg.py b/torchrl/objectives/ddpg.py index 7d94a5eb07b..a72b84f69e4 100644 --- a/torchrl/objectives/ddpg.py +++ b/torchrl/objectives/ddpg.py @@ -198,7 +198,7 @@ def __init__( actor_critic = ActorCriticWrapper(actor_network, value_network) params = TensorDict.from_module(actor_critic) - params_meta = params.detach().to("meta") + params_meta = params.apply(self._make_meta_params, device=torch.device("meta")) with params_meta.to_module(actor_critic): self.actor_critic = deepcopy(actor_critic) diff --git a/torchrl/objectives/multiagent/qmixer.py b/torchrl/objectives/multiagent/qmixer.py index 61abab6216f..35e03d35744 100644 --- a/torchrl/objectives/multiagent/qmixer.py +++ b/torchrl/objectives/multiagent/qmixer.py @@ -213,7 +213,9 @@ def __init__( global_value_network = SafeSequential(local_value_network, mixer_network) params = TensorDict.from_module(global_value_network) - with params.detach().to("meta").to_module(global_value_network): + with params.apply( + self._make_meta_params, device=torch.device("meta") + ).to_module(global_value_network): self.global_value_network = deepcopy(global_value_network) self.convert_to_functional( From aedcf297fadca2108885ddcb4a10210af44bf086 Mon Sep 17 00:00:00 2001 From: Albert Bou Date: Mon, 27 Nov 2023 14:31:57 +0100 Subject: [PATCH 17/21] [BugFix] Change ppo mujoco example to match paper results (#1714) --- examples/a2c/a2c_mujoco.py | 4 ++-- examples/ppo/config_mujoco.yaml | 2 +- examples/ppo/ppo_mujoco.py | 14 ++++++++------ examples/ppo/utils_mujoco.py | 8 +++++--- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/examples/a2c/a2c_mujoco.py b/examples/a2c/a2c_mujoco.py index 48844dee6b6..7f9e588bbf6 100644 --- a/examples/a2c/a2c_mujoco.py +++ b/examples/a2c/a2c_mujoco.py @@ -101,9 +101,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and lengths - episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] + episode_rewards = data["next", "episode_reward"][data["next", "done"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "terminated"]] + episode_length = data["next", "step_count"][data["next", "done"]] log_info.update( { "train/reward": episode_rewards.mean().item(), diff --git a/examples/ppo/config_mujoco.yaml b/examples/ppo/config_mujoco.yaml index 1272c1f4bff..0322526e7b1 100644 --- a/examples/ppo/config_mujoco.yaml +++ b/examples/ppo/config_mujoco.yaml @@ -18,7 +18,7 @@ logger: optim: lr: 3e-4 weight_decay: 0.0 - anneal_lr: False + anneal_lr: True # loss loss: diff --git a/examples/ppo/ppo_mujoco.py b/examples/ppo/ppo_mujoco.py index 988bc5300bf..52b12f688e1 100644 --- a/examples/ppo/ppo_mujoco.py +++ b/examples/ppo/ppo_mujoco.py @@ -28,7 +28,6 @@ def main(cfg: "DictConfig"): # noqa: F821 from torchrl.record.loggers import generate_exp_name, get_logger from utils_mujoco import eval_model, make_env, make_ppo_models - # Define paper hyperparameters device = "cpu" if not torch.cuda.device_count() else "cuda" num_mini_batches = cfg.collector.frames_per_batch // cfg.loss.mini_batch_size total_network_updates = ( @@ -67,6 +66,7 @@ def main(cfg: "DictConfig"): # noqa: F821 value_network=critic, average_gae=False, ) + loss_module = ClipPPOLoss( actor=actor, critic=critic, @@ -78,8 +78,8 @@ def main(cfg: "DictConfig"): # noqa: F821 ) # Create optimizers - actor_optim = torch.optim.Adam(actor.parameters(), lr=cfg.optim.lr) - critic_optim = torch.optim.Adam(critic.parameters(), lr=cfg.optim.lr) + actor_optim = torch.optim.Adam(actor.parameters(), lr=cfg.optim.lr, eps=1e-5) + critic_optim = torch.optim.Adam(critic.parameters(), lr=cfg.optim.lr, eps=1e-5) # Create logger logger = None @@ -120,9 +120,9 @@ def main(cfg: "DictConfig"): # noqa: F821 pbar.update(data.numel()) # Get training rewards and episode lengths - episode_rewards = data["next", "episode_reward"][data["next", "terminated"]] + episode_rewards = data["next", "episode_reward"][data["next", "done"]] if len(episode_rewards) > 0: - episode_length = data["next", "step_count"][data["next", "terminated"]] + episode_length = data["next", "step_count"][data["next", "done"]] log_info.update( { "train/reward": episode_rewards.mean().item(), @@ -187,7 +187,9 @@ def main(cfg: "DictConfig"): # noqa: F821 "train/lr": alpha * cfg_optim_lr, "train/sampling_time": sampling_time, "train/training_time": training_time, - "train/clip_epsilon": alpha * cfg_loss_clip_epsilon, + "train/clip_epsilon": alpha * cfg_loss_clip_epsilon + if cfg_loss_anneal_clip_eps + else cfg_loss_clip_epsilon, } ) diff --git a/examples/ppo/utils_mujoco.py b/examples/ppo/utils_mujoco.py index 8fa2a53fd92..7be234b322d 100644 --- a/examples/ppo/utils_mujoco.py +++ b/examples/ppo/utils_mujoco.py @@ -28,10 +28,10 @@ def make_env(env_name="HalfCheetah-v4", device="cpu"): env = GymEnv(env_name, device=device) env = TransformedEnv(env) + env.append_transform(VecNorm(in_keys=["observation"], decay=0.99999, eps=1e-2)) + env.append_transform(ClipTransform(in_keys=["observation"], low=-10, high=10)) env.append_transform(RewardSum()) env.append_transform(StepCounter()) - env.append_transform(VecNorm(in_keys=["observation"])) - env.append_transform(ClipTransform(in_keys=["observation"], low=-10, high=10)) env.append_transform(DoubleToFloat(in_keys=["observation"])) return env @@ -72,7 +72,9 @@ def make_ppo_models_state(proof_environment): # Add state-independent normal scale policy_mlp = torch.nn.Sequential( policy_mlp, - AddStateIndependentNormalScale(proof_environment.action_spec.shape[-1]), + AddStateIndependentNormalScale( + proof_environment.action_spec.shape[-1], scale_lb=1e-8 + ), ) # Add probabilistic sampling of the actions From 07fcfb1cff2e897fff406ef45c2e5ff7ea57a14f Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Mon, 27 Nov 2023 15:12:44 +0000 Subject: [PATCH 18/21] [Minor] Hide params in ddpg actor-critic (#1716) --- torchrl/objectives/common.py | 12 +++--------- torchrl/objectives/ddpg.py | 2 +- torchrl/objectives/decision_transformer.py | 7 ++++--- torchrl/objectives/multiagent/qmixer.py | 13 +++++++------ 4 files changed, 15 insertions(+), 19 deletions(-) diff --git a/torchrl/objectives/common.py b/torchrl/objectives/common.py index 76e5ef10900..00ba8cf456a 100644 --- a/torchrl/objectives/common.py +++ b/torchrl/objectives/common.py @@ -179,15 +179,9 @@ def convert_to_functional( Args: module (TensorDictModule or compatible): a stateful tensordict module. - This module will be made functional, yet still stateful, meaning - that it will be callable with the following alternative signatures: - - >>> module(tensordict) - >>> module(tensordict, params=params) - - ``params`` is a :class:`tensordict.TensorDict` instance with parameters - stuctured as the output of :func:`tensordict.TensorDict.from_module` - is. + Parameters from this module will be isolated in the `_params` + attribute and a stateless version of the module will be registed + under the `module_name` attribute. module_name (str): name where the module will be found. The parameters of the module will be found under ``loss_module._params`` whereas the module will be found under ``loss_module.``. diff --git a/torchrl/objectives/ddpg.py b/torchrl/objectives/ddpg.py index a72b84f69e4..3b4debe6259 100644 --- a/torchrl/objectives/ddpg.py +++ b/torchrl/objectives/ddpg.py @@ -200,7 +200,7 @@ def __init__( params = TensorDict.from_module(actor_critic) params_meta = params.apply(self._make_meta_params, device=torch.device("meta")) with params_meta.to_module(actor_critic): - self.actor_critic = deepcopy(actor_critic) + self.__dict__["actor_critic"] = deepcopy(actor_critic) self.convert_to_functional( actor_network, diff --git a/torchrl/objectives/decision_transformer.py b/torchrl/objectives/decision_transformer.py index ba7e2d4ba3f..52339d583dd 100644 --- a/torchrl/objectives/decision_transformer.py +++ b/torchrl/objectives/decision_transformer.py @@ -317,9 +317,10 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: tensordict = tensordict.clone(False) target_actions = tensordict.get(self.tensor_keys.action_target).detach() - pred_actions = self.actor_network( - tensordict, params=self.actor_network_params - ).get(self.tensor_keys.action_pred) + with self.actor_network_params.to_module(self.actor_network): + pred_actions = self.actor_network(tensordict).get( + self.tensor_keys.action_pred + ) loss = distance_loss( pred_actions, target_actions, diff --git a/torchrl/objectives/multiagent/qmixer.py b/torchrl/objectives/multiagent/qmixer.py index 35e03d35744..23947696c9f 100644 --- a/torchrl/objectives/multiagent/qmixer.py +++ b/torchrl/objectives/multiagent/qmixer.py @@ -216,7 +216,7 @@ def __init__( with params.apply( self._make_meta_params, device=torch.device("meta") ).to_module(global_value_network): - self.global_value_network = deepcopy(global_value_network) + self.__dict__["global_value_network"] = deepcopy(global_value_network) self.convert_to_functional( local_value_network, @@ -327,10 +327,10 @@ def make_value_estimator(self, value_type: ValueEstimators = None, **hyperparams @dispatch def forward(self, tensordict: TensorDictBase) -> TensorDict: td_copy = tensordict.clone(False) - self.local_value_network( - td_copy, - params=self.local_value_network_params, - ) + with self.local_value_network_params.to_module(self.local_value_network): + self.local_value_network( + td_copy, + ) action = tensordict.get(self.tensor_keys.action) pred_val = td_copy.get( @@ -347,7 +347,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDict: pred_val_index = (pred_val * action).sum(-1, keepdim=True) td_copy.set(self.tensor_keys.local_value, pred_val_index) # [*B, n_agents, 1] - self.mixer_network(td_copy, params=self.mixer_network_params) + with self.mixer_network_params.to_module(self.mixer_network): + self.mixer_network(td_copy) pred_val_index = td_copy.get(self.tensor_keys.global_value).squeeze(-1) # [*B] this is global and shared among the agents as will be the target From 2a72e6d07f5bcd7e02f4f3e0073fda91b96d155d Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Tue, 28 Nov 2023 16:46:28 +0000 Subject: [PATCH 19/21] [BugFix] Fix hold_out_net (#1719) --- torchrl/objectives/utils.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/torchrl/objectives/utils.py b/torchrl/objectives/utils.py index 1f1fc04e58d..c3e7dbc68ce 100644 --- a/torchrl/objectives/utils.py +++ b/torchrl/objectives/utils.py @@ -363,13 +363,19 @@ class hold_out_net(_context_manager): def __init__(self, network: nn.Module) -> None: self.network = network + for p in network.parameters(): + self.mode = p.requires_grad + break + else: + self.mode = True def __enter__(self) -> None: - self.params = TensorDict.from_module(self.network) - self.params.detach().to_module(self.network, return_swap=False) + if self.mode: + self.network.requires_grad_(False) def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.params.to_module(self.network, return_swap=False) + if self.mode: + self.network.requires_grad_() class hold_out_params(_context_manager): From 2e7f574529fd4e6bd2f661b0d59bd22623e4fb49 Mon Sep 17 00:00:00 2001 From: Matteo Bettini <55539777+matteobettini@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:41:54 +0000 Subject: [PATCH 20/21] [BugFix] `RewardSum` key check (#1718) --- test/test_transforms.py | 34 +++++++++++++++++++++++++++ torchrl/envs/transforms/transforms.py | 13 +++++++--- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index da8bc12c126..cff1d33b34a 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -9,6 +9,7 @@ import itertools import pickle +import re import sys from copy import copy from functools import partial @@ -4878,6 +4879,39 @@ def test_sum_reward(self, keys, device): def test_transform_inverse(self): raise pytest.skip("No inverse for RewardSum") + @pytest.mark.parametrize("in_keys", [["reward"], ["reward_1", "reward_2"]]) + @pytest.mark.parametrize( + "out_keys", [["episode_reward"], ["episode_reward_1", "episode_reward_2"]] + ) + @pytest.mark.parametrize("reset_keys", [["_reset"], ["_reset1", "_reset2"]]) + def test_keys_length_errors(self, in_keys, reset_keys, out_keys, batch=10): + reset_dict = { + reset_key: torch.zeros(batch, dtype=torch.bool) for reset_key in reset_keys + } + reward_sum_dict = {out_key: torch.randn(batch) for out_key in out_keys} + reset_dict.update(reward_sum_dict) + td = TensorDict(reset_dict, []) + + if len(in_keys) != len(out_keys): + with pytest.raises( + ValueError, + match="RewardSum expects the same number of input and output keys", + ): + RewardSum(in_keys=in_keys, reset_keys=reset_keys, out_keys=out_keys) + else: + t = RewardSum(in_keys=in_keys, reset_keys=reset_keys, out_keys=out_keys) + + if len(in_keys) != len(reset_keys): + with pytest.raises( + ValueError, + match=re.escape( + f"Could not match the env reset_keys {reset_keys} with the in_keys {in_keys}" + ), + ): + t.reset(td) + else: + t.reset(td) + class TestReward2Go(TransformBase): @pytest.mark.parametrize("device", get_default_devices()) diff --git a/torchrl/envs/transforms/transforms.py b/torchrl/envs/transforms/transforms.py index 3e6d597dffd..de8baf2e403 100644 --- a/torchrl/envs/transforms/transforms.py +++ b/torchrl/envs/transforms/transforms.py @@ -4692,6 +4692,7 @@ def __init__( """Initialises the transform. Filters out non-reward input keys and defines output keys.""" super().__init__(in_keys=in_keys, out_keys=out_keys) self._reset_keys = reset_keys + self._keys_checked = False @property def in_keys(self): @@ -4770,9 +4771,7 @@ def _check_match(reset_keys, in_keys): return False return True - if len(reset_keys) != len(self.in_keys) or not _check_match( - reset_keys, self.in_keys - ): + if not _check_match(reset_keys, self.in_keys): raise ValueError( f"Could not match the env reset_keys {reset_keys} with the {type(self)} in_keys {self.in_keys}. " f"Please provide the reset_keys manually. Reset entries can be " @@ -4781,6 +4780,14 @@ def _check_match(reset_keys, in_keys): ) reset_keys = copy(reset_keys) self._reset_keys = reset_keys + + if not self._keys_checked and len(reset_keys) != len(self.in_keys): + raise ValueError( + f"Could not match the env reset_keys {reset_keys} with the in_keys {self.in_keys}. " + "Please make sure that these have the same length." + ) + self._keys_checked = True + return reset_keys @reset_keys.setter From 6c27bdb048694cc90a0382f0faff67af7fa19ab1 Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Thu, 30 Nov 2023 08:02:44 +0000 Subject: [PATCH 21/21] [Feature] Allow usage of a different device on main and sub-envs in ParallelEnv and SerialEnv (#1626) --- benchmarks/ecosystem/gym_env_throughput.py | 38 ++-- examples/dreamer/dreamer_utils.py | 25 ++- test/test_env.py | 42 ++++ torchrl/envs/batched_envs.py | 234 ++++++++++++++------- torchrl/envs/utils.py | 12 +- 5 files changed, 237 insertions(+), 114 deletions(-) diff --git a/benchmarks/ecosystem/gym_env_throughput.py b/benchmarks/ecosystem/gym_env_throughput.py index 71b7a481ce0..246c5ee15f0 100644 --- a/benchmarks/ecosystem/gym_env_throughput.py +++ b/benchmarks/ecosystem/gym_env_throughput.py @@ -76,12 +76,12 @@ def make(envname=envname, gym_backend=gym_backend): # regular parallel env for device in avail_devices: - def make(envname=envname, gym_backend=gym_backend, device=device): + def make(envname=envname, gym_backend=gym_backend): with set_gym_backend(gym_backend): - return GymEnv(envname, device=device) + return GymEnv(envname, device="cpu") # env_make = EnvCreator(make) - penv = ParallelEnv(num_workers, EnvCreator(make)) + penv = ParallelEnv(num_workers, EnvCreator(make), device=device) with torch.inference_mode(): # warmup penv.rollout(2) @@ -103,13 +103,13 @@ def make(envname=envname, gym_backend=gym_backend, device=device): for device in avail_devices: - def make(envname=envname, gym_backend=gym_backend, device=device): + def make(envname=envname, gym_backend=gym_backend): with set_gym_backend(gym_backend): - return GymEnv(envname, device=device) + return GymEnv(envname, device="cpu") env_make = EnvCreator(make) # penv = SerialEnv(num_workers, env_make) - penv = ParallelEnv(num_workers, env_make) + penv = ParallelEnv(num_workers, env_make, device=device) collector = SyncDataCollector( penv, RandomPolicy(penv.action_spec), @@ -164,14 +164,14 @@ def make_env( for device in avail_devices: # async collector # + torchrl parallel env - def make_env( - envname=envname, gym_backend=gym_backend, device=device - ): + def make_env(envname=envname, gym_backend=gym_backend): with set_gym_backend(gym_backend): - return GymEnv(envname, device=device) + return GymEnv(envname, device="cpu") penv = ParallelEnv( - num_workers // num_collectors, EnvCreator(make_env) + num_workers // num_collectors, + EnvCreator(make_env), + device=device, ) collector = MultiaSyncDataCollector( [penv] * num_collectors, @@ -206,10 +206,9 @@ def make_env( envname=envname, num_workers=num_workers, gym_backend=gym_backend, - device=device, ): with set_gym_backend(gym_backend): - penv = GymEnv(envname, num_envs=num_workers, device=device) + penv = GymEnv(envname, num_envs=num_workers, device="cpu") return penv penv = EnvCreator( @@ -247,14 +246,14 @@ def make_env( for device in avail_devices: # sync collector # + torchrl parallel env - def make_env( - envname=envname, gym_backend=gym_backend, device=device - ): + def make_env(envname=envname, gym_backend=gym_backend): with set_gym_backend(gym_backend): - return GymEnv(envname, device=device) + return GymEnv(envname, device="cpu") penv = ParallelEnv( - num_workers // num_collectors, EnvCreator(make_env) + num_workers // num_collectors, + EnvCreator(make_env), + device=device, ) collector = MultiSyncDataCollector( [penv] * num_collectors, @@ -289,10 +288,9 @@ def make_env( envname=envname, num_workers=num_workers, gym_backend=gym_backend, - device=device, ): with set_gym_backend(gym_backend): - penv = GymEnv(envname, num_envs=num_workers, device=device) + penv = GymEnv(envname, num_envs=num_workers, device="cpu") return penv penv = EnvCreator( diff --git a/examples/dreamer/dreamer_utils.py b/examples/dreamer/dreamer_utils.py index fba4247e2a7..385e4a53aab 100644 --- a/examples/dreamer/dreamer_utils.py +++ b/examples/dreamer/dreamer_utils.py @@ -147,6 +147,7 @@ def transformed_env_constructor( state_dim_gsde: Optional[int] = None, batch_dims: Optional[int] = 0, obs_norm_state_dict: Optional[dict] = None, + ignore_device: bool = False, ) -> Union[Callable, EnvCreator]: """ Returns an environment creator from an argparse.Namespace built with the appropriate parser constructor. @@ -179,6 +180,7 @@ def transformed_env_constructor( it should be set to 1 (or the number of dims of the batch). obs_norm_state_dict (dict, optional): the state_dict of the ObservationNorm transform to be loaded into the environment + ignore_device (bool, optional): if True, the device is ignored. """ def make_transformed_env(**kwargs) -> TransformedEnv: @@ -189,14 +191,17 @@ def make_transformed_env(**kwargs) -> TransformedEnv: from_pixels = cfg.from_pixels if custom_env is None and custom_env_maker is None: - if isinstance(cfg.collector_device, str): - device = cfg.collector_device - elif isinstance(cfg.collector_device, Sequence): - device = cfg.collector_device[0] + if not ignore_device: + if isinstance(cfg.collector_device, str): + device = cfg.collector_device + elif isinstance(cfg.collector_device, Sequence): + device = cfg.collector_device[0] + else: + raise ValueError( + "collector_device must be either a string or a sequence of strings" + ) else: - raise ValueError( - "collector_device must be either a string or a sequence of strings" - ) + device = None env_kwargs = { "env_name": env_name, "device": device, @@ -252,19 +257,19 @@ def parallel_env_constructor( kwargs: keyword arguments for the `transformed_env_constructor` method. """ batch_transform = cfg.batch_transform + kwargs.update({"cfg": cfg, "use_env_creator": True}) if cfg.env_per_collector == 1: - kwargs.update({"cfg": cfg, "use_env_creator": True}) make_transformed_env = transformed_env_constructor(**kwargs) return make_transformed_env - kwargs.update({"cfg": cfg, "use_env_creator": True}) make_transformed_env = transformed_env_constructor( - return_transformed_envs=not batch_transform, **kwargs + return_transformed_envs=not batch_transform, ignore_device=True, **kwargs ) parallel_env = ParallelEnv( num_workers=cfg.env_per_collector, create_env_fn=make_transformed_env, create_env_kwargs=None, pin_memory=cfg.pin_memory, + device=cfg.collector_device, ) if batch_transform: kwargs.update( diff --git a/test/test_env.py b/test/test_env.py index 6cee7f545d7..aed4e07b0b7 100644 --- a/test/test_env.py +++ b/test/test_env.py @@ -354,6 +354,48 @@ def test_mb_env_batch_lock(self, device, seed=0): class TestParallel: + @pytest.mark.skipif( + not torch.cuda.device_count(), reason="No cuda device detected." + ) + @pytest.mark.parametrize("parallel", [True, False]) + @pytest.mark.parametrize("hetero", [True, False]) + @pytest.mark.parametrize("pdevice", [None, "cpu", "cuda"]) + @pytest.mark.parametrize("edevice", ["cpu", "cuda"]) + @pytest.mark.parametrize("bwad", [True, False]) + def test_parallel_devices(self, parallel, hetero, pdevice, edevice, bwad): + if parallel: + cls = ParallelEnv + else: + cls = SerialEnv + if not hetero: + env = cls( + 2, lambda: ContinuousActionVecMockEnv(device=edevice), device=pdevice + ) + else: + env1 = lambda: ContinuousActionVecMockEnv(device=edevice) + env2 = lambda: TransformedEnv(ContinuousActionVecMockEnv(device=edevice)) + env = cls(2, [env1, env2], device=pdevice) + + r = env.rollout(2, break_when_any_done=bwad) + if pdevice is not None: + assert env.device.type == torch.device(pdevice).type + assert r.device.type == torch.device(pdevice).type + assert all( + item.device.type == torch.device(pdevice).type + for item in r.values(True, True) + ) + else: + assert env.device.type == torch.device(edevice).type + assert r.device.type == torch.device(edevice).type + assert all( + item.device.type == torch.device(edevice).type + for item in r.values(True, True) + ) + if parallel: + assert ( + env.shared_tensordict_parent.device.type == torch.device(edevice).type + ) + @pytest.mark.parametrize("num_parallel_env", [1, 10]) @pytest.mark.parametrize("env_batch_size", [[], (32,), (32, 1), (32, 0)]) def test_env_with_batch_size(self, num_parallel_env, env_batch_size): diff --git a/torchrl/envs/batched_envs.py b/torchrl/envs/batched_envs.py index f0e132eb092..ac0a136c7f9 100644 --- a/torchrl/envs/batched_envs.py +++ b/torchrl/envs/batched_envs.py @@ -122,11 +122,16 @@ class _BatchedEnv(EnvBase): memmap (bool): whether or not the returned tensordict will be placed in memory map. policy_proof (callable, optional): if provided, it'll be used to get the list of tensors to return through the :obj:`step()` and :obj:`reset()` methods, such as :obj:`"hidden"` etc. - device (str, int, torch.device): for consistency, this argument is kept. However this - argument should not be passed, as the device will be inferred from the environments. - It is assumed that all environments will run on the same device as a common shared - tensordict will be used to pass data from process to process. The device can be - changed after instantiation using :obj:`env.to(device)`. + device (str, int, torch.device): The device of the batched environment can be passed. + If not, it is inferred from the env. In this case, it is assumed that + the device of all environments match. If it is provided, it can differ + from the sub-environment device(s). In that case, the data will be + automatically cast to the appropriate device during collection. + This can be used to speed up collection in case casting to device + introduces an overhead (eg, numpy-based environents etc.): by using + a ``"cuda"`` device for the batched environment but a ``"cpu"`` + device for the nested environments, one can keep the overhead to a + minimum. num_threads (int, optional): number of threads for this process. Defaults to the number of workers. This parameter has no effect for the :class:`~SerialEnv` class. @@ -162,14 +167,7 @@ def __init__( num_threads: int = None, num_sub_threads: int = 1, ): - if device is not None: - raise ValueError( - "Device setting for batched environment can't be done at initialization. " - "The device will be inferred from the constructed environment. " - "It can be set through the `to(device)` method." - ) - - super().__init__(device=None) + super().__init__(device=device) self.is_closed = True if num_threads is None: num_threads = num_workers + 1 # 1 more thread for this proc @@ -218,7 +216,7 @@ def __init__( "memmap and shared memory are mutually exclusive features." ) self._batch_size = None - self._device = None + self._device = torch.device(device) if device is not None else device self._dummy_env_str = None self._seeds = None self.__dict__["_input_spec"] = None @@ -273,7 +271,9 @@ def _set_properties(self): self._properties_set = True if self._single_task: self._batch_size = meta_data.batch_size - device = self._device = meta_data.device + device = meta_data.device + if self._device is None: + self._device = device input_spec = meta_data.specs["input_spec"].to(device) output_spec = meta_data.specs["output_spec"].to(device) @@ -289,8 +289,18 @@ def _set_properties(self): self._batch_locked = meta_data.batch_locked else: self._batch_size = torch.Size([self.num_workers, *meta_data[0].batch_size]) - device = self._device = meta_data[0].device - # TODO: check that all action_spec and reward spec match (issue #351) + devices = set() + for _meta_data in meta_data: + device = _meta_data.device + devices.add(device) + if self._device is None: + if len(devices) > 1: + raise ValueError( + f"The device wasn't passed to {type(self)}, but more than one device was found in the sub-environments. " + f"Please indicate a device to be used for collection." + ) + device = list(devices)[0] + self._device = device input_spec = [] for md in meta_data: @@ -413,7 +423,7 @@ def _create_td(self) -> None: *(unravel_key(("next", key)) for key in self._env_output_keys), strict=False, ) - self.shared_tensordict_parent = shared_tensordict_parent.to(self.device) + self.shared_tensordict_parent = shared_tensordict_parent else: # Multi-task: we share tensordict that *may* have different keys shared_tensordict_parent = [ @@ -421,7 +431,7 @@ def _create_td(self) -> None: *self._selected_keys, *(unravel_key(("next", key)) for key in self._env_output_keys), strict=False, - ).to(self.device) + ) for tensordict in shared_tensordict_parent ] shared_tensordict_parent = torch.stack( @@ -440,13 +450,11 @@ def _create_td(self) -> None: # Multi-task: we share tensordict that *may* have different keys # LazyStacked already stores this so we don't need to do anything self.shared_tensordicts = self.shared_tensordict_parent - if self.device.type == "cpu": + if self.shared_tensordict_parent.device.type == "cpu": if self._share_memory: - for td in self.shared_tensordicts: - td.share_memory_() + self.shared_tensordict_parent.share_memory_() elif self._memmap: - for td in self.shared_tensordicts: - td.memmap_() + self.shared_tensordict_parent.memmap_() else: if self._share_memory: self.shared_tensordict_parent.share_memory_() @@ -483,7 +491,6 @@ def close(self) -> None: self.__dict__["_input_spec"] = None self.__dict__["_output_spec"] = None self._properties_set = False - self.event = None self._shutdown_workers() self.is_closed = True @@ -507,11 +514,6 @@ def to(self, device: DEVICE_TYPING): if device == self.device: return self self._device = device - self.meta_data = ( - self.meta_data.to(device) - if self._single_task - else [meta_data.to(device) for meta_data in self.meta_data] - ) if not self.is_closed: warn( "Casting an open environment to another device requires closing and re-opening it. " @@ -543,7 +545,7 @@ def _start_workers(self) -> None: for idx in range(_num_workers): env = self.create_env_fn[idx](**self.create_env_kwargs[idx]) - self._envs.append(env.to(self.device)) + self._envs.append(env) self.is_closed = False @_check_start @@ -603,29 +605,39 @@ def _reset(self, tensordict: TensorDictBase, **kwargs) -> TensorDictBase: if tensordict_.is_empty(): tensordict_ = None else: - # reset will do modifications in-place. We want the original - # tensorict to be unchaned, so we clone it - tensordict_ = tensordict_.clone(False) + env_device = _env.device + if env_device != self.device: + tensordict_ = tensordict_.to(env_device) + else: + tensordict_ = tensordict_.clone(False) else: tensordict_ = None + _td = _env.reset(tensordict=tensordict_, **kwargs) self.shared_tensordicts[i].update_( _td.select(*self._selected_reset_keys_filt, strict=False) ) selected_output_keys = self._selected_reset_keys_filt + device = self.device if self._single_task: # select + clone creates 2 tds, but we can create one only out = TensorDict( - {}, batch_size=self.shared_tensordict_parent.shape, device=self.device + {}, batch_size=self.shared_tensordict_parent.shape, device=device ) for key in selected_output_keys: - _set_single_key(self.shared_tensordict_parent, out, key, clone=True) - return out + _set_single_key( + self.shared_tensordict_parent, out, key, clone=True, device=device + ) else: - return self.shared_tensordict_parent.select( + out = self.shared_tensordict_parent.select( *selected_output_keys, strict=False, - ).clone() + ) + if out.device == device: + out = out.clone() + else: + out = out.to(device, non_blocking=True) + return out def _reset_proc_data(self, tensordict, tensordict_reset): # since we call `reset` directly, all the postproc has been completed @@ -643,19 +655,29 @@ def _step( for i in range(self.num_workers): # shared_tensordicts are locked, and we need to select the keys since we update in-place. # There may be unexpected keys, such as "_reset", that we should comfortably ignore here. - out_td = self._envs[i]._step(tensordict_in[i]) + env_device = self._envs[i].device + if env_device != self.device: + data_in = tensordict_in[i].to(env_device, non_blocking=True) + else: + data_in = tensordict_in[i] + out_td = self._envs[i]._step(data_in) next_td[i].update_(out_td.select(*self._env_output_keys, strict=False)) # We must pass a clone of the tensordict, as the values of this tensordict # will be modified in-place at further steps + device = self.device if self._single_task: out = TensorDict( - {}, batch_size=self.shared_tensordict_parent.shape, device=self.device + {}, batch_size=self.shared_tensordict_parent.shape, device=device ) for key in self._selected_step_keys: - _set_single_key(next_td, out, key, clone=True) + _set_single_key(next_td, out, key, clone=True, device=device) else: # strict=False ensures that non-homogeneous keys are still there - out = next_td.select(*self._selected_step_keys, strict=False).clone() + out = next_td.select(*self._selected_step_keys, strict=False) + if out.device == device: + out = out.clone() + else: + out = out.to(device, non_blocking=True) return out def __getattr__(self, attr: str) -> Any: @@ -710,6 +732,32 @@ class ParallelEnv(_BatchedEnv): """ __doc__ += _BatchedEnv.__doc__ + __doc__ += """ + + .. note:: + The choice of the devices where ParallelEnv needs to be executed can + drastically influence its performance. The rule of thumbs is: + + - If the base environment (backend, e.g., Gym) is executed on CPU, the + sub-environments should be executed on CPU and the data should be + passed via shared physical memory. + - If the base environment is (or can be) executed on CUDA, the sub-environments + should be placed on CUDA too. + - If a CUDA device is available and the policy is to be executed on CUDA, + the ParallelEnv device should be set to CUDA. + + Therefore, supposing a CUDA device is available, we have the following scenarios: + + >>> # The sub-envs are executed on CPU, but the policy is on GPU + >>> env = ParallelEnv(N, MyEnv(..., device="cpu"), device="cuda") + >>> # The sub-envs are executed on CUDA + >>> env = ParallelEnv(N, MyEnv(..., device="cuda"), device="cuda") + >>> # this will create the exact same environment + >>> env = ParallelEnv(N, MyEnv(..., device="cuda")) + >>> # If no cuda device is available + >>> env = ParallelEnv(N, MyEnv(..., device="cpu")) + + """ def _start_workers(self) -> None: from torchrl.envs.env_creator import EnvCreator @@ -722,39 +770,39 @@ def _start_workers(self) -> None: self.parent_channels = [] self._workers = [] - self._events = [] - if self.device.type == "cuda": + func = _run_worker_pipe_shared_mem + if self.shared_tensordict_parent.device.type == "cuda": self.event = torch.cuda.Event() else: self.event = None + self._events = [ctx.Event() for _ in range(_num_workers)] + kwargs = [{"mp_event": self._events[i]} for i in range(_num_workers)] with clear_mpi_env_vars(): for idx in range(_num_workers): if self._verbose: print(f"initiating worker {idx}") # No certainty which module multiprocessing_context is parent_pipe, child_pipe = ctx.Pipe() - event = ctx.Event() - self._events.append(event) env_fun = self.create_env_fn[idx] if not isinstance(env_fun, EnvCreator): env_fun = CloudpickleWrapper(env_fun) - + kwargs[idx].update( + { + "parent_pipe": parent_pipe, + "child_pipe": child_pipe, + "env_fun": env_fun, + "env_fun_kwargs": self.create_env_kwargs[idx], + "shared_tensordict": self.shared_tensordicts[idx], + "_selected_input_keys": self._selected_input_keys, + "_selected_reset_keys": self._selected_reset_keys, + "_selected_step_keys": self._selected_step_keys, + "has_lazy_inputs": self.has_lazy_inputs, + } + ) process = _ProcessNoWarn( - target=_run_worker_pipe_shared_mem, + target=func, num_threads=self.num_sub_threads, - args=( - parent_pipe, - child_pipe, - env_fun, - self.create_env_kwargs[idx], - self.device, - event, - self.shared_tensordicts[idx], - self._selected_input_keys, - self._selected_reset_keys, - self._selected_step_keys, - self.has_lazy_inputs, - ), + kwargs=kwargs[idx], ) process.daemon = True process.start() @@ -834,10 +882,16 @@ def step_and_maybe_reset( # We must pass a clone of the tensordict, as the values of this tensordict # will be modified in-place at further steps - tensordict.set("next", self.shared_tensordict_parent.get("next").clone()) - tensordict_ = self.shared_tensordict_parent.exclude( - "next", *self.reset_keys - ).clone() + next_td = self.shared_tensordict_parent.get("next") + tensordict_ = self.shared_tensordict_parent.exclude("next", *self.reset_keys) + device = self.device + if self.shared_tensordict_parent.device == device: + next_td = next_td.clone() + tensordict_ = tensordict_.clone() + else: + next_td = next_td.to(device, non_blocking=True) + tensordict_ = tensordict_.to(device, non_blocking=True) + tensordict.set("next", next_td) return tensordict, tensordict_ @_check_start @@ -880,15 +934,20 @@ def _step(self, tensordict: TensorDictBase) -> TensorDictBase: # We must pass a clone of the tensordict, as the values of this tensordict # will be modified in-place at further steps next_td = self.shared_tensordict_parent.get("next") + device = self.device if self._single_task: out = TensorDict( - {}, batch_size=self.shared_tensordict_parent.shape, device=self.device + {}, batch_size=self.shared_tensordict_parent.shape, device=device ) for key in self._selected_step_keys: - _set_single_key(next_td, out, key, clone=True) + _set_single_key(next_td, out, key, clone=True, device=device) else: # strict=False ensures that non-homogeneous keys are still there - out = next_td.select(*self._selected_step_keys, strict=False).clone() + out = next_td.select(*self._selected_step_keys, strict=False) + if out.device == device: + out = out.clone() + else: + out = out.to(device, non_blocking=True) return out @_check_start @@ -944,19 +1003,26 @@ def _reset(self, tensordict: TensorDictBase, **kwargs) -> TensorDictBase: event.clear() selected_output_keys = self._selected_reset_keys_filt + device = self.device if self._single_task: # select + clone creates 2 tds, but we can create one only out = TensorDict( - {}, batch_size=self.shared_tensordict_parent.shape, device=self.device + {}, batch_size=self.shared_tensordict_parent.shape, device=device ) for key in selected_output_keys: - _set_single_key(self.shared_tensordict_parent, out, key, clone=True) - return out + _set_single_key( + self.shared_tensordict_parent, out, key, clone=True, device=device + ) else: - return self.shared_tensordict_parent.select( + out = self.shared_tensordict_parent.select( *selected_output_keys, strict=False, - ).clone() + ) + if out.device == device: + out = out.clone() + else: + out = out.to(device, non_blocking=True) + return out @_check_start def _shutdown_workers(self) -> None: @@ -981,6 +1047,7 @@ def _shutdown_workers(self) -> None: del self.parent_channels self._cuda_events = None self._events = None + self.event = None @_check_start def set_seed( @@ -1063,7 +1130,6 @@ def _run_worker_pipe_shared_mem( child_pipe: connection.Connection, env_fun: Union[EnvBase, Callable], env_fun_kwargs: Dict[str, Any], - device: DEVICE_TYPING = None, mp_event: mp.Event = None, shared_tensordict: TensorDictBase = None, _selected_input_keys=None, @@ -1072,13 +1138,11 @@ def _run_worker_pipe_shared_mem( has_lazy_inputs: bool = False, verbose: bool = False, ) -> None: - if device is None: - device = torch.device("cpu") + device = shared_tensordict.device if device.type == "cuda": event = torch.cuda.Event() else: event = None - parent_pipe.close() pid = os.getpid() if not isinstance(env_fun, EnvBase): @@ -1089,7 +1153,6 @@ def _run_worker_pipe_shared_mem( "env_fun_kwargs must be empty if an environment is passed to a process." ) env = env_fun - env = env.to(device) del env_fun i = -1 @@ -1144,7 +1207,8 @@ def _run_worker_pipe_shared_mem( if not initialized: raise RuntimeError("called 'init' before step") i += 1 - next_td = env._step(shared_tensordict) + env_input = shared_tensordict + next_td = env._step(env_input) next_shared_tensordict.update_(next_td) if event is not None: event.record() @@ -1155,7 +1219,8 @@ def _run_worker_pipe_shared_mem( if not initialized: raise RuntimeError("called 'init' before step") i += 1 - td, root_next_td = env.step_and_maybe_reset(shared_tensordict.clone(False)) + env_input = shared_tensordict + td, root_next_td = env.step_and_maybe_reset(env_input) next_shared_tensordict.update_(td.get("next")) root_shared_tensordict.update_(root_next_td) if event is not None: @@ -1208,3 +1273,10 @@ def _run_worker_pipe_shared_mem( else: # don't send env through pipe child_pipe.send(("_".join([cmd, "done"]), None)) + + +def _update_cuda(t_dest, t_source): + if t_source is None: + return + t_dest.copy_(t_source.pin_memory(), non_blocking=True) + return diff --git a/torchrl/envs/utils.py b/torchrl/envs/utils.py index 06eec73be97..9a2a71f24bd 100644 --- a/torchrl/envs/utils.py +++ b/torchrl/envs/utils.py @@ -237,7 +237,11 @@ def step_mdp( def _set_single_key( - source: TensorDictBase, dest: TensorDictBase, key: str | tuple, clone: bool = False + source: TensorDictBase, + dest: TensorDictBase, + key: str | tuple, + clone: bool = False, + device=None, ): # key should be already unraveled if isinstance(key, str): @@ -253,7 +257,9 @@ def _set_single_key( source = val dest = new_val else: - if clone: + if device is not None and val.device != device: + val = val.to(device, non_blocking=True) + elif clone: val = val.clone() dest._set_str(k, val, inplace=False, validated=True) # This is a temporary solution to understand if a key is heterogeneous @@ -262,7 +268,7 @@ def _set_single_key( if re.match(r"Found more than one unique shape in the tensors", str(err)): # this is a het key for s_td, d_td in zip(source.tensordicts, dest.tensordicts): - _set_single_key(s_td, d_td, k, clone) + _set_single_key(s_td, d_td, k, clone=clone, device=device) break else: raise err