From a5c04dc6b44f410aeb605f7c505deff9fd83ddc5 Mon Sep 17 00:00:00 2001 From: Alexander Blatzheim Date: Fri, 4 Oct 2024 19:22:41 +0200 Subject: [PATCH] Initial Transfer Commit --- .github/workflows/python-publish.yml | 41 ++ .gitignore | 9 +- Evaluate_Description-Embedding_Body.ipynb | 475 +++++++++++++++ FusionSent_visualization.png | Bin 0 -> 117277 bytes LICENSE.txt | 201 +++++++ README.md | 131 ++++- fusionsent/__init__.py | 4 + fusionsent/merging_methods.py | 138 +++++ fusionsent/modeling.py | 274 +++++++++ fusionsent/trainer.py | 676 ++++++++++++++++++++++ fusionsent/training_args.py | 259 +++++++++ pyproject.toml | 3 + setup.py | 78 +++ 13 files changed, 2284 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/python-publish.yml create mode 100644 Evaluate_Description-Embedding_Body.ipynb create mode 100644 FusionSent_visualization.png create mode 100644 LICENSE.txt create mode 100644 fusionsent/__init__.py create mode 100644 fusionsent/merging_methods.py create mode 100644 fusionsent/modeling.py create mode 100644 fusionsent/trainer.py create mode 100644 fusionsent/training_args.py create mode 100644 pyproject.toml create mode 100644 setup.py diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..f57fca0 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,41 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.10' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Install build dependencies + run: pip install numpy + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 82f9275..9941588 100644 --- a/.gitignore +++ b/.gitignore @@ -106,10 +106,8 @@ ipython_config.py #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. -# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +# https://pdm.fming.dev/#use-with-ide .pdm.toml -.pdm-python -.pdm-build/ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ @@ -159,4 +157,7 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ +.idea/ + +# data directory +data/ \ No newline at end of file diff --git a/Evaluate_Description-Embedding_Body.ipynb b/Evaluate_Description-Embedding_Body.ipynb new file mode 100644 index 0000000..1da16cb --- /dev/null +++ b/Evaluate_Description-Embedding_Body.ipynb @@ -0,0 +1,475 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "ed004c90", + "metadata": { + "ExecuteTime": { + "start_time": "2024-01-17T12:55:35.436360Z" + }, + "is_executing": true + }, + "outputs": [], + "source": [ + "# This script seeks a better alternative for the current labels used in the FuisionBody.label_embedding_model_body. \n", + "# For this purpose, it evaluattes an alternative embeddings of class descriptions, against the currently implemented default, that embeds label-descriptions.\n", + "\n", + "from fusionsent import FusionSentModel, Trainer, TrainingArguments\n", + "from sklearn.preprocessing import LabelEncoder, MultiLabelBinarizer\n", + "from datasets import load_dataset, Dataset\n", + "from transformers import AutoTokenizer\n", + "import numpy as np\n", + "import openai #Please note that openai is not listed in our requirements.txt file. Run $'pip install openai', to install the package.\n", + "import torch\n", + "import json\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9ef91033", + "metadata": {}, + "outputs": [], + "source": [ + "# Setting environment variables\n", + "cwd = os.path.abspath(os.getcwd())\n", + "os.environ['WORLD_SIZE'] = str(torch.cuda.device_count())\n", + "os.environ['MASTER_ADDR'] = 'localhost'\n", + "os.environ['MASTER_PORT'] = '29500'" + ] + }, + { + "cell_type": "markdown", + "id": "3351d25b", + "metadata": {}, + "source": [ + "# Load and Prepare All Datasets" + ] + }, + { + "cell_type": "markdown", + "id": "cb6f76dd", + "metadata": {}, + "source": [ + "*1. Download original data.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16a4ced9", + "metadata": {}, + "outputs": [], + "source": [ + "# The below are the exact datasets used for training in the original setfit paper.\n", + "# If not existent already, we will load them all, and store them locally in order to add label descriptions.\n", + "dataset_ids_binary_label: list[str] = [\"CR\", \"emotion\", \"enron_spam\"]\n", + "dataset_ids_nonbinary_label: list[str] = [\"sst5\", \"amazon_counterfactual\", \"emotion\", \"ag_news\"]\n", + "dataset_ids = dataset_ids_binary_label + dataset_ids_nonbinary_label\n", + "data_dir_original = \"./data/original\"\n", + "datasets_original = {} \n", + "\n", + "for dataset_id in dataset_ids:\n", + " print(f\"Loading dataset: '{dataset_id}'\")\n", + " datasets_original[dataset_id] = {}\n", + " for split in [\"train\", \"test\"]:\n", + " try:\n", + " dataset_split = load_dataset(f\"SetFit/{dataset_id}\", split=split)\n", + " datasets_original[dataset_id][split] = dataset_split\n", + " except ValueError as e:\n", + " print(f\"Could not load dataset '{dataset_id}'. An error occurred: {e}\")\n", + " datasets_original.pop(dataset_id)\n", + " break\n", + "print(\"-- Done --\")" + ] + }, + { + "cell_type": "markdown", + "id": "a0560b42", + "metadata": {}, + "source": [ + "*2. Generate label descriptions via OpenAI and save them to files.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c2ff0ad8", + "metadata": {}, + "outputs": [], + "source": [ + "# ToDo: Fix generation for datasets 'enron_spam', and 'ag_news'.\n", + "data_dir_label_descriptions = \"./data/label_descriptions\"\n", + "label_description_file_template = \"{}_label_descriptions.json\"\n", + "os.makedirs(data_dir_label_descriptions, exist_ok=True)\n", + "\n", + "openai_api_key = \"your-openai-key\"\n", + "open_ai_model =\"gpt-4-0125-preview\"\n", + "regenerate = False\n", + "\n", + "def get_label_description(dataset_name: str, label: str, label_text: str, examples: list[str]) -> str:\n", + " try:\n", + " client = openai.OpenAI(api_key=openai_api_key)\n", + " completion = client.chat.completions.create(\n", + " model=open_ai_model,\n", + " messages= [\n", + " {\n", + " \"role\": \"system\", \n", + " \"content\": \"\"\"\n", + " You are a scientific research assistant, in the area of Natrual Language Processing.\n", + " Your purpose is to write comprhesnive, concise, and short descriptions for a given label of a dataset.\n", + " For each label, you will be provided some examples of data samples that are annoted with the resp. label.\n", + " Rules:\n", + " 1. Be consise in your descriptions.\n", + " 2. Each decitpion should be exactly one sentence long.\n", + " Not complying with the rules will result in termination. \n", + " \"\"\"\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"\"\"\n", + " Dataset name: '{dataset_name}'\\n\n", + " Label key: '{label}'\\n\n", + " Label name: '{label_text}'\\n\n", + " ---\\n\\n\n", + " Example Samples annotated with '{label_text}':\\n\\n\n", + " {examples}\\n\\n\n", + " ---\\n\\n\n", + " Please describe the essence of the label '{label}': '{label_text}' in one sentence:\n", + " \"\"\"\n", + " }\n", + " ]\n", + " )\n", + " if completion.choices and completion.choices[0].message and completion.choices[0].message.content:\n", + " response = completion.choices[0].message.content\n", + " print(f\"Obtained description for {dataset_id}/{label_text}: {response}\")\n", + " return response\n", + " else:\n", + " raise Exception(\"Invalid response from OpenAI: No content in the response.\")\n", + " except Exception as e:\n", + " raise Exception(f\"Unexpected error with the response from OpenAI: {str(e)}\")\n", + "\n", + "for dataset_id in dataset_ids:\n", + " description_file_path = os.path.join(data_dir_label_descriptions, label_description_file_template.format(dataset_id))\n", + " if (not regenerate) and os.path.exists(description_file_path):\n", + " print(f\"Skipped label generation for '{dataset_id}' dataset (File already exists).\")\n", + " continue\n", + " # Samples from SetFit/enron_spam are too large.\n", + " if dataset_id == \"enron_spam\":\n", + " continue\n", + "\n", + " # Process the dataset to get label-to-data mapping\n", + " label_to_data = {}\n", + " label_to_label_text = {}\n", + " for item in datasets_original[dataset_id][\"train\"]:\n", + " label = item['label']\n", + " text = item['text']\n", + " if label not in label_to_data:\n", + " label_to_data[label] = []\n", + " if label not in label_to_label_text:\n", + " label_to_label_text[label] = item[\"label_text\"]\n", + " label_to_data[label].append(text)\n", + "\n", + " # Sample the 5 examples or less (because of open ai token rate limits) per label and generate label descriptions\n", + " label_to_description = {}\n", + " hasEncounteredError = False\n", + " for label, examples in label_to_data.items():\n", + " sampled_examples: list[str] = np.random.choice(examples, size=5, replace=False).tolist()\n", + " while sum([len(t) for t in sampled_examples]) > 100:\n", + " sampled_examples = sampled_examples[:-1]\n", + " #print(sum([len(t) for t in sampled_examples]))\n", + " try:\n", + " description = get_label_description(dataset_id, label, label_to_label_text[label], examples)\n", + " except Exception as e:\n", + " hasEncounteredError=True\n", + " break\n", + " label_to_description[label] = description\n", + "\n", + " if hasEncounteredError:\n", + " print(f\"An error occurred during label description generation for datatset '{dataset_id}'. Skipping...\")\n", + " continue\n", + "\n", + " # Save the label-to-description mappings\n", + " with open(description_file_path, 'w') as f:\n", + " json.dump(label_to_description, f, indent=2, ensure_ascii=False)\n", + " \n", + " print(f\"Saved label descriptions for '{dataset_id}' dataset.\")" + ] + }, + { + "cell_type": "markdown", + "id": "e10bc9d3", + "metadata": {}, + "source": [ + "*3. Format the datasets in order to pass them into the DualSen model*" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "6519f3c6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "[\"The label '1', denoted as 'positive', applies to data samples expressing favorable, satisfactory, or beneficial opinions, experiences, or outcomes.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "[\"The label '1', denoted as 'positive', applies to data samples expressing favorable, satisfactory, or beneficial opinions, experiences, or outcomes.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "['positive']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "['positive']\n", + "Sucessfully formatted dataset 'CR'.\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "[\"The essence of the label '0': 'sadness' is characterized by feelings of hopelessness, disappointment, melancholy, and vulnerability, often accompanied by a sense of isolation or being overwhelmed.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "[\"The essence of the label '0': 'sadness' is characterized by feelings of hopelessness, disappointment, melancholy, and vulnerability, often accompanied by a sense of isolation or being overwhelmed.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "['sadness']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "['sadness']\n", + "Sucessfully formatted dataset 'emotion'.\n", + "Skipping formatting dataset 'enron_spam': Description file not found.\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "[\"The label 'joy' encompasses examples demonstrating feelings of happiness, satisfaction, gladness, or positive emotional states experienced by individuals.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "[\"The label 'joy' encompasses examples demonstrating feelings of happiness, satisfaction, gladness, or positive emotional states experienced by individuals.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "['spam']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "1\n", + "['spam']\n", + "Sucessfully formatted dataset 'enron_spam'.\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 0 0 1]\n", + "[\"The label '4': 'very positive' is used for data samples that express strong or intense positive sentiments, enthusiasm, or approval.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 1 0 0 0]\n", + "[\"The label '1', 'negative', is used for reviews or comments that express dissatisfaction, disapproval, or disappointment regarding a subject.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 0 0 1]\n", + "['very positive']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 1 0 0 0]\n", + "['negative']\n", + "Sucessfully formatted dataset 'sst5'.\n", + "Skipping formatting dataset 'amazon_counterfactual': Key 'train' and/or 'test' not found.\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "[\"The essence of the label '0': 'sadness' is characterized by feelings of hopelessness, disappointment, melancholy, and vulnerability, often accompanied by a sense of isolation or being overwhelmed.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "[\"The essence of the label '0': 'sadness' is characterized by feelings of hopelessness, disappointment, melancholy, and vulnerability, often accompanied by a sense of isolation or being overwhelmed.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "['sadness']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[1 0 0 0 0 0]\n", + "['sadness']\n", + "Sucessfully formatted dataset 'emotion'.\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 1 0]\n", + "[\"The label '2': 'Business' encompasses news and information related to commerce, trade, financial markets, companies, and economic trends.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 1 0]\n", + "[\"The label '2': 'Business' encompasses news and information related to commerce, trade, financial markets, companies, and economic trends.\"]\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 1 0]\n", + "['Business']\n", + "Warning: Limiting dataset size to 250 elements for testing!\n", + "[0 0 1 0]\n", + "['Business']\n", + "Sucessfully formatted dataset 'ag_news'.\n" + ] + } + ], + "source": [ + "formatted_datasets = {}\n", + "def format_dataset(original_dataset, label_to_description=None) -> Dataset:\n", + " \"\"\"\n", + " Creates a Dataset object with label encoding and optional label descriptions.\n", + " \"\"\"\n", + " input_texts = [d['text'] for d in original_dataset]\n", + " raw_labels = [d['label'] for d in original_dataset]\n", + "\n", + " # Check if labels are binary (single value) or multi-class (list of labels)\n", + " if all(raw_label in [0,1] and not isinstance(raw_label, list) for raw_label in raw_labels):\n", + " # Binary case\n", + " label_encoder = LabelEncoder()\n", + " labels = label_encoder.fit_transform(raw_labels)\n", + " else:\n", + " # Multi-class case\n", + " label_encoder = MultiLabelBinarizer()\n", + " labels = label_encoder.fit_transform([raw_label] for raw_label in raw_labels)\n", + "\n", + " # Either select label text or label description for the 'label_description' text\n", + " if label_to_description is None:\n", + " label_descriptions = [[d['label_text']] for d in original_dataset]\n", + " else:\n", + " label_descriptions = [[label_to_description[str(d['label'])]] for d in original_dataset]\n", + "\n", + " # Limit to 250 elements for testing.\n", + " # TODO: Deal with error in setfit.\n", + " # Error occurrs in setfit.sampler, line 29: 'idxs = np.stack(np.triu_indices(n, k), axis=-1)'\n", + " # with n being the sample size, k=1 if sampled with replacedmed, 0 otherwise.\n", + " # Reason: Out-of memory. Latest numpy+setfit versions do not fix this.\n", + " input_texts = input_texts[:250]\n", + " labels = labels[:250]\n", + " label_descriptions = label_descriptions[:250]\n", + " print(\"Warning: Limiting dataset size to 250 elements for testing!\")\n", + " print(labels[0])\n", + " print(label_descriptions[0])\n", + " return Dataset.from_dict({\n", + " \"text\": input_texts,\n", + " \"label\": labels,\n", + " \"label_description\": label_descriptions\n", + " })\n", + "\n", + "for dataset_id in dataset_ids:\n", + " # Load label descriptions\n", + " description_file_path = os.path.join(data_dir_label_descriptions, label_description_file_template.format(dataset_id))\n", + " try:\n", + " with open(description_file_path, 'r') as f:\n", + " label_to_description = json.load(f)\n", + " except FileNotFoundError:\n", + " print(f\"Skipping formatting dataset '{dataset_id}': Description file not found.\")\n", + " \n", + " # Format train and validation datasets, one with the descriptions in \"label_description\", and one with the label texts instead.\n", + " try: \n", + " formatted_datasets[dataset_id] = {}\n", + " formatted_datasets[dataset_id][\"label_description\"] = {\n", + " \"train\": format_dataset(datasets_original[dataset_id][\"train\"], label_to_description),\n", + " \"test\": format_dataset(datasets_original[dataset_id][\"test\"], label_to_description)\n", + " }\n", + " formatted_datasets[dataset_id][\"label_text\"] = {\n", + " \"train\": format_dataset(datasets_original[dataset_id][\"train\"]),\n", + " \"test\": format_dataset(datasets_original[dataset_id][\"test\"])\n", + " }\n", + " print(f\"Sucessfully formatted dataset '{dataset_id}'.\")\n", + "\n", + " except KeyError as e:\n", + " print(f\"Skipping formatting dataset '{dataset_id}': Key 'train' and/or 'test' not found.\")\n", + " formatted_datasets.pop(dataset_id)" + ] + }, + { + "cell_type": "markdown", + "id": "852e0eef", + "metadata": {}, + "source": [ + "# Train & Evaluate FusionSent Model " + ] + }, + { + "cell_type": "markdown", + "id": "038dad2f", + "metadata": {}, + "source": [ + "*1. Set up the model, tokenizer, and training arguments.*" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "ffad2066", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "model_id = \"malteos/scincl\"\n", + "tokenizer = AutoTokenizer.from_pretrained(model_id)\n", + "training_args = TrainingArguments(\n", + " batch_sizes=(10,15),\n", + " num_epochs=(1,3),\n", + " sampling_strategies=\"undersampling\",\n", + " use_setfit_body=False #In this experiment, we only want to evaluate different lavel_embedding submodels, so we dont need the 'setfit' body.\n", + " )\n", + "\n", + "def getFreshModel()->FusionSentModel:\n", + " return FusionSentModel.from_pretrained(pretrained_model_name_or_path=model_id, multi_target_strategy=\"one-vs-rest\")" + ] + }, + { + "cell_type": "markdown", + "id": "b355d506", + "metadata": {}, + "source": [ + "*2. Train and evaluate one dataset after another.*\n", + "\n", + "*Please choose an appropriate subset of all the datasets in `target_datasets`.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6b6e37a", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "target_datatsets = dataset_ids[:1] #Select applicable datasets (only first for testing)\n", + "\n", + "for datatset_id in target_datatsets:\n", + " for dataset_key, dataset in formatted_datasets[dataset_id].items():\n", + " # Define Trainer and start training\n", + " trainer = Trainer(\n", + " model=getFreshModel(),\n", + " args=training_args,\n", + " train_dataset=dataset[\"train\"],\n", + " eval_dataset=dataset[\"test\"],\n", + " eval_metrics={\n", + " 'metric_names': ['f1', 'precision', 'recall', 'accuracy'],\n", + " 'metric_args': {'average': 'micro'}\n", + " }\n", + " )\n", + " print(f\"Training FusionSent on dataset '{dataset_id}', with {dataset_key}.\")\n", + " trainer.train()\n", + " # Evaluate the current model\n", + " eval_scores = trainer.evaluate(\n", + " x_eval=[item['text'] for item in dataset[\"test\"]],\n", + " y_eval=[item['label'] for item in dataset[\"test\"]]\n", + " )\n", + " print(f\"Evaluation results for '{dataset_id}' with {dataset_key}: {eval_scores}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/FusionSent_visualization.png b/FusionSent_visualization.png new file mode 100644 index 0000000000000000000000000000000000000000..976e814dd31c038f2a33e78017909b95c76a3664 GIT binary patch literal 117277 zcma&N1xy@j*DVYbx8e@P-Cc_pcXyZK6xZVJ?(PnQ6?Yrl-6>w&rT=iw`~LUKO}^w3 z2xKP1JkQ!|+uD1=73C$6;PK$Wz`&5CB*m1$z@PzOU=T8}pFoc|kz{Xye!)3NYB_;{ z5%qoifZI*Gd<6sh1g0pfD*pHHU(oZXr>8G3FR3$c?xn!dN>_b-{fmo>mxW|TM#eYL z*Wb6bwY80njhD93W7 zXJ_B4!LF{ay1Kd!4-Z#XSN8Vyc6N3K1_q)h(o&{gnp>LBFD}f@&BesTPR~!<+uJLv zE2*fcj*pKcCsJRk;cMTzb8@oX+}s`?9!N+?t|#LXrwh>vUIG9s_0Izh?|tJ{?q%g= zPjg9ITU)<={gRcH4GIccSXk)o?HwK+u2>jvc?QRvpas9h?jg4>xR1z~sODI$IU0RoB(kEnnTK zUSE3d;0tcs&d<+}=sdW2yv<$!)Nda4KdowOYZnz2#SZP4&F>X0wDIxrRc-X8P1W|E z?bI$GH6N^&FCFDhj&pKyM#VXR|i-PYdPu)UQGoC^;Rr>%Oey6pP3Kby3Wzta^wdw=%h$B)9DhM2Lg z!`x3fbIX#FlCk4;3yH8($#@sV;FjMlA^fK{Qa@F8)1*F^7)xB<4_7s~!7-jYEjVw;reTkg7e|}O0y4i);Ki;0O z_iIlKw-28X9)N)ngGq@AtGa`qy&-t3FQjuu9fBvv1tsQPE24aI7b@F@Ri-7i;Gvdm z6eAI#rUHi}F9?E@WD6;PV)u7Ym5{fhN4Knl_%zPf#TJh@i+q!r81uaJa5~?7)BLW# zcFZ^4Xz=vDsO4=WBAf6 z&dTa(IZDdAiK?CQ8&=%HDM$I#H=$g!Ztb4qPIHB$uu>&rKV~fp&n=T~bqQ5_8!vq4(!8A(`H^#lIBu#2FS)hg+WWpd5TUy$yT!*KkB%GPDg~&o9f?&p9xKVhC%J`!9yLGT40c) zXIIlGl+&<=`z92SH_Ebfp-pAD%(Z5+eX0yA^K-?uRBl21{#vb9_VFI;c+mdDKW4$253}-Nn;e9oAZWE)ozL7lYA1K>u=Qy!piF3dfhDHpbnfyE&9Q~`wS6EO|C^w z{#a2E>hkhDygh_$Vdf^a%ZEpC3@!jb@l(Dy!h{w5NgNp5eg63GLZ5YBVX)AySdW6; zyM=fJwFUx$Y|`^8PP4_~V9e&pNs|4$kw?Z9FzUfjTM8iIj&Z>o^hezX?>6No5uzt& z2dIc;P&Z4;z@O=gTG4I%!SadY{j#zk2#C>>l3k>PNx^wQ6#1fRoJ)QXJ3SD99nSFp zHx2mx^Ib`oS!Td*);LA5Oj!)WZP$jr*C#opjq=sv{}st&bU4tWh~Dpv45KX|aF z=P2`@D-Vmxfx61aogfYyF{&?ZT0>LWSh2_t-^49`ty2+4;(=^1K#3s_vO(|vW`iCM z>axiSWhbp*DZ;AS>ZpLJ?{LS%FOD+QdA)Gdo9{cm<6|QV(OQp3l zkQuHA0qL@5l(4-Z3c8@XG_;7$SWy&Z*R62LLA-NU)}f{@q+HPBE8NdUO6B4v+e?lV zxTCHv&h2Ta!Ts#SBLZGnFcY-98^LWxD>;A~F^P4XRuw@+p3tC>z{qKB_id<>L9e1V z?uJX#LaqE)RbLVE;*$C|;qxFB+Pl5C*6hUG-C>(_KXsZcfKmFsY)5sHxatZi%xOee8*QUc;#cyS8^ci9t?N*6!Lldn>`HmlQ7QCHOf#>VE#k{`tH zpAg_m%Z4kBuP{agb+nTdb} zpp%YEVnA1fSnXSJ_{>kQ7NjLb+n?%^p%SEp(c0-amMzO1$pXwoP$8zyr2GwnGA)_G zR6laPAvoZNRYa7EK;efWuE=Rf`l^5xVX3|-IULEyc|YVX`~&p52KB91Fi3PLFN>Pv zT!`xb`P;*yuoNzb<7Rg2o3q$#4wwm6GAg7tdwy#xseC#-+CQchp!7u5hDDRwppw$S zH&Rcat*xpRc2Q5pt0q=bJ?C6XQOrwXn8P5sp|=7~N}Vq)HOtOUhGanHU>-v!ss=FK zu%k}R(#fFUabpgZD>e3y2RE-zW8hMeab;qB#n-b5(n{6BWV&N!gi*c5Lv{^ENV18D zV?YPdr0fG_%8;s9=fG5LoJ5*0SM-JED+@8r)hOhJ3@SQJX28Z90y4$APyL{ef-g#_yDLQrIqnu6liJeE~3 z2sK=zMGC9|(?|(^V40R{Q=u&#WH`47_fRr`6@(k>4=1?Rq9h)CJspq{EBbZW%X=VG z0_zMN>+*sON`Hy>7ghdUpZPO9ha78_~UZ6EC*2^0F)Hsfx~Ky zH2a5qZA^fpl6esSK*#wJVb3;+c0~`E@b6jz2In3MKlF{9Q-Gcjs8g&6?J9cfC%7OF z|Fuy3omIZtO3k7$GMf3cfp(YT%Rt&-w2dL^PPX|9GHoJ?^#|!d4u#>a_W^2x8z(;^ ze;jquEqa)HiV|Rbb$33M7*&Egj|MglZfvZTphXxpR8xe;iLsb(xK5E7&^%9aa$4ot$QZ0#?L;&9ooN0$btbpMPQuR|<{fTg>kD2LL7hASIdz0I94yT6jU8$zI{X>l-!vl*d9b3zkeeGD4u)D z!ucj#9~8NU+mmTvgW|>|bh2FwDr~}RL;k1Bt#8tjnVm>b;OpzHOuwY4f5U%2=!tEE z8d6XJ`Pz2EL@w{LW+rLlK}gAn<{|tTG%goKX~_sT?l0`@J`jZl^}dn7DG-Ak)yGO% z91d+3Abczi7b9XN2)?lX!@7_&YRR(OLO^Md0k>&QS4YJAn+hoyiVgU`so@{&C?2K8 zQWIAxx}tUUKz`7NuiCcpjaX6|7=qmM6Q72}_MQ_1(!pu3Yo9Vdjjky9li&#!j-cFN z25F!S=g$*E5KX32@H0qFvgxStP1lpMGD!DK_w}9ObE6rt+;IN`;9a>CCWWNr6m&5* z1vh;88>FfeusV8`S-#dO}z$rkPkS1ue(s1C|<4gSCNL((=Cje)#X>Lce5 z7mz2x+bbTs;`{%H*tSJMjVo)4C_NL=FA14J#x-&3$ggEOhOg|H))9z|E3f5&(M>cW zI(C*3Q7$!kHgS$BANeUU-fM=&o3l-G-3%k)AF?f${)cQy`bpc=Mfd266AUe!R{ieL z_g;(WYU>F~?GsHD0~YKQ5MaGndH=@PnUw${m4MtLZ=Q0!-L;Vgaeg3mhj8(lEo4AZ zqsO(RUMn|4RVg6o)<)6uzvOHAU&sdr`M;8HLd6DS zGjH~s6GB*7o6{NWeN7VXJD??>*e?_Rfr1NeP?w~d;8Cfi=kYu3A_lH*!;=aOzO_ow zKUwlrS9pbZuVjDzBVj%is62wAZ7YzP{19%szoZ-bBQHVykG!;3nx2)4oqf=d>t|)B z)Kf;hdcc_H&;}~Z|9_vw6!Zs~@j0QvEQ1syDOSfhut@vNrnRH7Dyjxl4CFs@?qRq~5tN;kwJMpfirM_Ipn~%S3q|V=;{J*vJl4ER|X9vi1(pS*dbc!p5Of1=u zM8=ssriHw~DLOHPc$BOvXI* z&j!qABL2LevF~qQMd~AylXgor8%SL1aVvveXFbD)a2%BV3T(StH2TT%pu&C+Tjn6` zJlxQkO4RF548#x zf$;dZobzjdt*ll1D%*TUJy^i%AROWDd82VEgpw|r$d(oHob{t6aLM2625AHvSLwzUSO`J5;-)a#Fhs1eE&Eh#2+3mTOp$ zmMi(A59SD~xMtOGZ4fuU=chZiH?*I`@ut!zP0WkzhxaxX4s=qhLAc4lW}5Tbh6~k) zS$tGvWBIy9fu~iTf{U3dph(Ivz+b7?-n#-upQmmGB{JjcGuIwG1R(A1m7xh_AjK!0 z(k5Foq`j>eou~6#tBWvSOmft=)q&N@RNOwa{=E+uv2lFWl`g+$yThIRD)rP zE;rEWFI)%LUs-44`h%^qUr7F44LktGwGOH#GdLra;zw%|(KS?aJv+UQfj$0#>#YDn zz-Cr9eKQiMkMgoRir>J+kb$OEWW6M6B3YD{u4-bpfSF}vtRh@bvvKt_aCLvE?Sy-P z-XW$D{7k;bG>fEfEG*mX?#^G?X~QERS&uL8w6=Af=^Ek4m{jI#9!WsG+n?_ex4dtn zNH_b^Cay`>Lu-Xl{x$?Ua#AWCV~RYT@$Y6gg+((f7(UWop7PFK+;Y_!>N;63$Gb%{ z{FZ%NP^a>xNhAb0oS9C_MYFN4(S~{1o>6fcC4rx=osDis5|s)*S~DX1nJC4w;*0|B zB^_E<%=H~%oR4hPNEO=kxW+LMnE+|elz-*xnT(zxXAaTzo7)by1MtMu~fRa{Ujm z4etK~wi=F^#=D38Sj@8hr2L_+sHCBKw~Zl|^);jos^6&L@x#2GHp~boJt1+`q1k}m z3Q3^-3B>p;Q|FaeVJRj`lq@kCsE3BXJjkKs&pFU>#kfLF0NZr0U00e8@BW{ zlhg``t;P}nwo(FIk7DAWo%DQCbF56O{NQw(zx;OgQe=52t9Us1_NMCOyLPr-wwO(% zv-@+TSJVF!s}W<^4SW3H&O=a4e`y+BD5r@f&@%1}t?ar=#M6ujw$>ho zaWzfB8?;#f2k{666`Nanr-iqM~3$i>03-# zdZ>@RFSKikzPh(O1@=`Hu$qO&DJxXdTx!0hO1Yfs$x&4DvdVW`i-u@-=c>w*F(HX& z&mTL1JjaZ)sAA{#p$(}af#p)-ToopwiLwCg_O&sPx*{?S&#__%ytj6P6pMHlB}Mf> zYjZ+^pRSI#u^@YojWtBsABi5&5T)-q1BK>%vQ(}T@d=C3egtV6>EvN4$!etxQD8!n zD{Z%lSU2Z@Ea(dv_Y8^nz=n_Vklf`1NILCmlsG>qTir&GMxJ6#k8u@$FHVmQy$7D& zibo*~SBGJ^)&3NVi&_QU2%|PFTA#=Si!5dHN2X(CG$;B(3W-=+aNKiRvRqm*lnd8y zZS?K3blPTi+`hjf_H{D_OMMU^>1UzH)0Ua58Fa}^4T{6kva}`Mmp)plYL0kA5_=}X z9b018!7KU;0Wto{U^;z(!WY^pyxD!Bq|(uySyQGA7LycYVZ7T%B|X1gVc}GgcsO** zM*I7b_1zmIPX7z^I7e9!I>Q|{c{>yP&| zYWRO_#2pUB8t}Yx{?ah55~J+;D$3U?A$eZ~DhrA)o62Mp+*4L;DnsV%_3zBp29jlk zoN5y0wb8GfRKpwZ@A2@L30DAlJxf1@pM6DzVB3w~-}7g(Vx`Jwr1rD~^uPH2ypIbH zJh8`PZte<{2*yad#5?-2FBgz@Xh6uye!rQSgK?h7EU|w+l8L9r8WggwD(H=P@2ri} zt_9R4m;xTkPfR2Kaa7Rs4Wu4Zl02ZAc6uEnS*Om&-Bmy=4gRr8HN&4SqXSz$F9-}p z5d3yoTuL3r`ryGzv}u`6qx9=uTvg<91*aV`e*cpG@AH65u%rDuA(uO+ub(s{e~vg5 z4g8LvKZ?g986=awCZ`F+AfRP~#dM?(dGk+?Wa;uPiRd#V<%aXXCPTgKc?{C-GT_cC zw3Dc$RE(eQl{BTo!m!A=2f`H9YNZ#FY84)~8AZ&U0PzdSD-K7dUBA_Z${P4T>_+7? zKvOc7WaF%J`Jnexkj0dUQW&V57h2|*AsGVJCzJBJQZt{rH1WB=-KEW*t{j`N#%SW0 z$;``)LuA6kaxV|kcdo!$CYK4b{rr$Qn(UTiM5G15@AR)Y;i7M`sdp!>UBz{A0%iO8 zf!5p{+>Q@YaZc#30x@C8QNFr^4!p6uVJcLi7FtvXKttlY3$cE+Cq>4q%~!;A*@ZWR z0jy&XpN_Rxf+A(GrttQUPdq<=ALc&|MxKhrN6_TCp7Cl4R1a4xy#j_HYI)Fd{+S3T zAY?UPzA26Au@;eh@TF*$D@N3O`kiNz9g6s?ynj7Y-uAQ@Oxk#nbExXj?hDtSQ|8#} z4xja#fi)tn%ik|R(wv2$AC6FA%*s-A%8s8(bi@AX6@xmVr&u#>yZzNB%C%o;Xel!t zn!Y!W7|WouB#PIp=3^{~*Kp??bcTjmG~KrmQcii2^Wj^HlJK>65#cwASMCrt01Rp_ zvfkdK&dz=zGysM-m^4H^k-y|*Z#=y(6TUr1S`GnM=1k~9C69@GjQ98+wv&j~JeyDF zIZS$7Y6nvoa8_NueICQf_eH7ADZY^-n5StMeH%D~szp3z@z!;{C+jyLx;_#y#EP5_ za0&DTpik5O%H0+g`9_I4Wd(I7(y7rgVR}VhyEBg>CV>{=#p)V>%!_vHjf(x;m_M#47|!gnR!kq;veqk(f}bH_tspthhUAO`6b|AlLguzk1ELdV92g|S{3S=9t@Tvb zWR(6E#S0)Xt-nHbXx}LnBD5KkCg1-n(8YORIRuN`r)di8K#JE><|J}O!-P8~>7lpjicPZBqiMlii{mh?(y*!WBgwS@9aHJ= zvvkm}6@d_b{q-{#7B3(J zUOMWGV+Q4Va`}Dup!eM_IWxTNkN#g+igI9?4~X|O&&&Hj_(Y3%JH_CgyVwd#*ZaOh zaIL*zxij3;PmS6*Yn=Y-ceS!0_=(zt!?iHnrH3xh!0WG8R(j}3 z%9yR!K(zC{W7_6l-YW0+LXTT>xvK>)3yEJ;41QPoTSbYT+)N2VH$OH4AGXMF)Bu0f zxL*N-R6@p86KfOaxlgI(C^|r zQu;#9jOvqFzoOpL8!p!KdHLzyjs8j8uophVs6A1D`%!#V6&3vExa_Q;eMxU@xMq$h zG)5N)3kYdV1KIoi?qZI|9=LhWYq3tliHvByjJQf8`nT+-I@<32OVxjO0pPW)azr?! zK+0BhB&UgILByIZ;XT(lKo(2|#jG!dYHuexL{?h0#s@VyFEBL~o#Gjvem3F0%Jq#0 zF}~+?E2!1|VbgPCCrrDAEav$8S16Tl0_kBE#pW(H&uSEr7A{0z_jrcFHR>Uz@O~WT zosmcwcfR~&r{sH?-~$}M!wXB^MaT^&6x5>53vkivNV!3mpN!K;7>kks92lJgd8bZL z@q;#2R@VHqL~|(8k0mIH6G`?$EFPxHE|A6}>isl9RmHJ*akbQLd)lY}0ss3<7nsHO zOL~B~I#Ox(l8rfy-l%*0?WV9u;}mv&wEWxm?`Jp7Z#IK9Df?QVYjtrV&8j-Ix>W@j zYo^;{AIFjj9{z~irLQgH{=B1f%JobzuH_fs=UQH+`_bU8eqZP|iSzd|ExdLEyt@at zC)upt2tj^O|K#rX%NxJ|FXip|<&$Od8~scvS-l204aPYa|NU4w-}^e zR+?se6{N=ku}^%zI`3hF&`|lFqn31h8K_V>&_MN52SReH+>*>ptTDoi(|Bviwio`j zuLAkVr@e_e1WHCi}|7u-(KSD&S&P0IkYT85CAA^Lynp?PnzKP!mHcWkdR&T$V%b+||#eb(f zMYMuG^aX$Mv?O_0oi#eM>w1}az;0;LXxnN^AeK?LrzTbZl&$$wQbJ2K{ygAN;yb$m_jE z{W<*1S6ePZ0DXHeV5RiCR1RirY~{~rrj0H-N7144&pb`Uit_>(5AV`c^sg9#-=@S% zK)j-$)ghhYF*;kGL|V{N^FN=G0dkt&$fqBGXA(f$2soJ-8E|r9FP>aSO~A)SSeKZF8(NBpNv|{ z68+(x^3&&}VK8_qY=e?fVKdZag`PepnrRJM6I*!S=y#Dt9MOeCkr8e(+Evw}n%t+F z6o0DLNm?A%I;jQcU>Mf$r#!UtsuRuxr`8j%(7KJYF>zE$+ z!Yn3d5c$*zxWY|O?ytnW;?Zsy3_Eg@A&>yEf!|N`-$FlMG<4GLF1S!`%g?j+HDDp+ z3h(@(imJlhPgdnj%d(7b(D*IuJ_ApErS*r`rqK=(ITJ#M&J`1{&G)l-_YS8iGJ(MZ6B@hHt!ZSY(qX<> zK8}!kf=nax(ya#RaCh~}+6CQ1NUL%y%i3zI?^KbtqG6!&F*YqoicCd&KIy8(8&&Xk zDISZSP%OU4W(X5_76FR(->RsnsIKZRhsI-k#V=n}eL>3jpq((+hcf=3YG=Wrz+WyG zADAari>f|RmyjQ%ae9~_)Uo08kXS8EQO@CtNb=AQ-U;oXUZ0;5H+S$e8Z?aArNrZ_#V}^qkLM z(66f04Jws%K-1EBiC=Vi+HHLyal2YXiVg!32z6fs(sVo{di3YY^~GBgTDCv+zKo%9!Hjwors2QyK2kxR6t{ukc%A(jw*R9cc_+Z(N^H+*zQOZWjx;U_s3 znE{+G#_Sz}dMX3@?(yEKLcHN?iKfj^oV&N;_$R(q8RLiHOVeHS)6FI6cgUC+*rery*8Yo99Eh`e}OT#b< z@LY7etDR^y3}$S!TOt$q%6|_bLlizJKREZYWNk?GhaqR;WtMLsk7U5f5(FgIm#ZqQ zYL{^CUu6ycF6s-@epclPuT3Q6%#{jk#B@0EXk9IA$Iy2&5B8Fg#}F8w;oA_y#nO_) zW^s*)d|vJ`k+?R^FS#;rGVa`}cAHT?GIU1>I>VhNH|Y`B-mn?HfUlEHNKYA}`(Wwt z;n21v^_rDFS$k?8W+a&zG@<9TuUV-UklTb&o;Mo~z}+$CXM=cmC57lVQ(b zO~b;f1@vuRY+VcdvJ|V9Uz{g3eN{;?)e`*x3-LB#wE<-(A6l})T>4k)chyg4Aa(xV zb_y{k7)!NzaNe>a1pPGD_`>O>;v79env%#s=r?|2@8x1yXui3w4`K|gwB$gK(2#J! zyT_Qh=9>gkRlt}57VkZ zsLJn|+6_uoMq?QT!rbN0`mYB+a5$=Oa1(Vu|JJuE`$C*te?%})$Q2f$jb6Nk$>9J* zV z6SZasEW1d3Kk3s@A4**?G5s9GXI#&>g8?fM6@K09!L_0G+=^rOoJMR1G_Ea&bnGAU zcp@q4U&=lOsD<90oDMVO`;`d!3{SI&vay0zLB5go){5t){H+;JyZb#Z)?^0jk}H?y zvf;;K%s#HFhkUeU?fS6toLt$?veX7SIH6i!Jj!e--qvfHv}vA2CIw2m&A1k=ojE9x zaK61dS6>CZHvK`nms3~!%3MW<@%f^ApNRWl=*gfApNXx0sks)&$x~ef?Km9z{f;^w znOMHa?T@{ZccBk&#F>(g?S@{=Tg6)Ye_i6qmv z?Da@-3qgDCRQ#NB6*SHXiQCpf_tz3oe&Wjedwc1wUW9MsNn!&xnzF5>-w(H;(K%}} z5p2Evi2TOKbcXuVL#O+(!AS8MDe|F%&>hlMSdM{!fpX~3*l-1^VI%pRsp0ff_pe*B zDZo0gerI{_KX+=m2ZccIyCKY-j6bT7H?V(hF(1nwx_P<`xoy1mz@;v-PtxB{vX8Hh z?*w*_^-R`zkR%2?1zK!y2$Y8SL)SZA3SgI)0DBGJb4&4V?dM}>ly+M|xmL_p=75pu{I|%XO$d7>Jm(cj$qw&?C!%7^=< zlIXu+pJTs>5WtYlB2jjwK>x;ppCTPUiT6>t2*otjgK$MD#KUBj>_t@v3NaJIT|;oWOtL&%675)G@J1t;qe;jyIKqa5#f^(z3e1 zZK-0#eZoWVnZFj8RaftE{n~-=*N6+vp+^={`44p_U)tRxwXfPv5)@RiDP@mDANTsi z=ah~M5++7lRZ7DJJWJEstXDqc!fz6&d6*8}D5X z%BcX>v&yW?uJN!%2Qn`wd!U_Q=SCh54eWQp_gRB%8JP&z2|wQWhst8tzrNC@Q_|X) z2&ZECmsN~4W ztO*w4zJW%Vt77|WX#6r9C87qQpABtx`fxQN3c|Dp_e_TR7*L*j!>{;e*IaxA33a2^ zq37mUSe2k%)GI;_Ac96iQT*{tUr%C3sLhM_N_2{6?BWOLe7Mj)1}k^}3|2VHJ_ai% z-!u+DgO%{kEHMpI5zmf?C`YEKFYsLN$)fs%Fe#7pl5=ChwE>nfKN$tS3LF07j{x-S zjt~_H}tAv+9;S=pXR-UTzFBso44V8UEH0iVO>v(KA*j^DW<4@3+&>3P*y5 zJz(3gOn5_yrX||}4jcl|w`AjEX9XI(0Nx4)kiS}WcP*MiPDKpi+sypfI2uhH0Fx0=wkChtJ)Yb8c}Irh`p3Yv=#a&^T2Amf9wjNdp8NTCjR@2? z9a!2*J}hNy2CF0wnnnG<>WdOG5cQ85y?KA_164HW8Iw|A%c8QVTP#2Oih{3>i0TSZ z6}wS#$)s0~EOGJ~A_5tpKzeS1Gplcg9t0F6k*XKR?*L z*B-X!z>PrF9U7^pqIiI^maNbfB1_~yS&If=bD=JxX^V*N7nhcOiT3wZd~Ch=jy*G{ z_jPWgaTaZtWCVn{dSB}t&`2sfsYXaQxD`!0MWfiYtZ7{U<7|#!8vZik3#=K7VX;GY zl7t6Wg?;2FvhH4#O4Wo0qTF~ERZHl=kw8(BFgZvBEB?Sjzv}yQmW(Y~oMtq1qPDj; zu26DvI$iWMTV6NKiAQb{|nJEf?0tidZu$3Ak(#peo8r&f^oK0)Uoq~XdpXMCyyH@ zJ}kS#U|*O=n#H9`QqimPHq1iLH(M&23>VNEzksJlXT>$6-mu&lj05mPWV7&PBH^BE zxgfmg{yX~yH!5SO{&F9r_1gtzGlN33LXhj|Y1^+#rE~}sZpxL00&5E>K<9KAgI=`L zzk^QEF_hbp7*=k3-7~EZo?#aIMw*?v9`eFcf1JxG#UCkOnVnRtr{9`fWG5sQxo3$G zAgyjQo6>G@#*&glaG}N{abLClJneZO0tDay&;|fPfK3IInP;LKdVej-=c5sjAenAh zQa*Y~1r`${LM!M`enr2q`5avr@nl>7MP;_YzxW$=uXT}lrra9sM%cAHjS2gla>O+% zOf&lwnp?3!YzKRj%tkKCZH=~`K)6xqIuqA?3Df?trgj);l$dO7rc%Seo-=66J%R=C zEJQ&p`}6$?c}h^Q(+8pfEtNuXNP;o~3Py!eQ#utDm*e0{f|cPOQaiw+oKmZ?VZNwB zfYhA5oirUiiGWZd^ikqiDS7v>z?i<6lN*3_mRtM>*h=IWYL)u#-MFUK{zPK=@?;nv z_)GO^M*l;uz^eQK$LpxcFT=hAK`;$d@ziaH;HOEL^V8lO;u`e!j5A_1gdVT0G@$+! zGPyHlf)Z3z+FRU(sIcexW<=017YwOeTbPkC;OnQ8$Hd##Q!$wB_v-sT+a<^$d8M>k zX?wbr*$uS+kj97wcEI>_rv%cR9TrTY+)Y`nSf%;_TJUl>vPn*#4!tP+nnBJFPEp!q zF@yFoQR$@A=BOx8Tg2C$iHrQLUHoHh+_p82y^z%Hjeu5AfGOVOhkHsJ-ySdIt)|oo z^5Ck}Eh~x!6IU|Mm!cR|zGI%Uerma5&|JL0BO-B*G6)EO6uyE39t@i^9TE2h*rn!% z^D1}KCyLU*IoXbZ=1V-{Z2JKI*LA0j$HCclWL1RBV86k$gzXe4Nt_#u7rC-chYIL< zdsT7S)Wub;mB+CVY>{1eZEK}Xn*Usyx5UgNC7jY5fA`z`UBruuj7RKm0Yi|@f3Sf` zACz39mu|oIf!Z&uv$)Et@vpi$6jq%&GfX~HwrCB!&S}2KFl!P=sb-wA%|*+cW~5%~ zKO)SRE%M!D>{B!tA0a!;+X+SrX<(QP4GapORFP^ezrMMuH}+I0VJg!mw@^#{QS=xQ znAdgkif+Ox=C&OR>>%IFzX(wT0p+6nX0_k~jO4i3;+;%YYYFk{mv1t#E0d!zwX3uM z6>?}4pMt4BrgQ3jfVS@6MvGn>t;KLwf*EZJ?93He9ur8Qg_om`oxWMqM*tzKlH@T} zPpwjy+`z_mm$}9e4FU=aXj<`F5u#G{#*R6csb!Zsh)h0*x*Li)Jiwxz#Foseya-g- zk_&C2pSot0(HF1<(y3eP4J7XJCFy8-ywFfOUAkqooE3+T0t$F@qwuz*-4``KH=qeO zikXn(5d)0iEtBeiNpM!1vbZx4lzVrey}nl}p^J=7`r2{qGHev-)LurIYs=(u>oe;Q ziKNuV@QqV*;1*hgy}v+9y?mY1Nr!=aqHIJ@Fmy~|N@hO|w1^ACC`VY1K<6l}0xARlqpbQ{KrP(Lte4JIAro#|H;c=#v+m+WK@SU;5~t_= zp6Fq*xpXYQ3Ms6#+=hGf8)O=%hiyE-Qq~wk4G8N6yi%&#bE|zi;i{;(TeyEbud;bQ zKsKd`DQ&_!$bJ|0j4HmB`L307uvgKF+QzzvwqWNtdQB9K&-y2uYrgO@_Rd7auLUG6 z3;9@bwCli3&3;#h2uq0(Gw;WkriNnI7gqNCYB_RL_OTbg?M7~Z*~xe)0rAa2Jjxz3 zCHP}TDS%r609?=-9wY4#Oc~N7JrBQ(ZcUrEC*{r-lLtIM+&aRdtoR)!(9T0DvN&z{gihA7Lo0yK!trs!oF%Iw2PoP z@>I=cTu@xh5n$!&(QR4mQHymT`xbbc#=maguajbN+(NUvN%-t`Vo=o~u@^$Uur`JN zUNV;RB@9D(ih1B&OEsYYH?xn z$lvP%Hg?d8ZhJ|~5FNHRgg)RsMlB0@>C?ZW&75_?#P0=5R~o55DABdc7`9E4+vaPL zW6mxpwiVDiK$$bpriQYcg>I-H5uMaVegLiur2R{SRZA;elL36WoWe-^Pc%c4j}btY zbRzsPrN2e_s?XEu*(AmgXnMpa!E529hoaH6enZ5n8Tbp*ziTS0)(ai9G)*OM+I)2Q1}m3itc0>G<6ttQV`yQE*K z@t)st1&6NaY+Y}%v>2E%&qU8Eq(PtWNJk>5LCa50@}s+FeIgvy5U&M3#rvV^qPEZr zl^Eq5a1FQ?(rTM@_F_)dSz4&+$RV+vYdod>n|QqaR=39!`T+w66_uDvG1jPpd!7S? z6?%(~+R)zg7N+Zb_K(J-TuwUYk?!i&wO7>v7I3g5&t?5y1#oaye2iwfIgvBeJQFD+ z3;z-b0pDBKnhD?b_4$cnXJNLzK#;0cd<@kQ2qA_Df~C7+M^&zIL-KT@YOxXcEhA=k zQLAa)I1(8R*RjACnMu&2jK8iJv?-#sdH2ZG)Bfs}hksLayYcLcH9IJ_j*@B-cBWO` zr7gi9xkkUh55+FMT&%pnD=A$(quuCN-DT5wg`d|lVlUId^ad5tdXu4tihV*n$2_IH zT(Ia{NNfBJAcNGtp0E5}<=v5wu~}6$%nL>-DAE1E-oYFsFgPQ;3yhuS5kAYuL=tv2 zewq6-(+v~vKc){9o?6B(?WCmuji0;pod%04uvJMuo!ZsqDfJE#H)>uFkffE-lI%1c z+ABzY{lSaw691gcwX=!muBy$5s69dhPxA)p-J@)PyUJHorh`N8PfT*g=AL4uAx>6> zC*O3RX`Wnjezm5xatkC}k5K6%r&}|=aQI6sP|~!r8$jxna_9L;-*!&gkD9{UUG*?g zWyfs7MG)sL)pAKsP&^V>1Ri++gFGBOFK;nVp}FU8e}ln{KtQ=RZ6qXB zJ44|vHcG-)qk$jp$g&Jg_Q2tfM3|G!D${Fsz0I}qucg`4Nbz_phU)auVwYuMWIkDl z*~xer7x%vp=ea>fpIaE@#-=7&CUHxS^M=SeNTq8)tvzZk&>B%57zA6MAoQ zHj+Jg^63H1nA0`$dnAOD0J?ULp|Nr~CP6`_dOF0M!C=tR zW==w!xKKoHs8+=RnswouGXBTs5rp{&W~f0s`6C%0o5e5n|7;c~E1F~ZD#8UIRA~Xs z@6Y~hv~Ts~>j3%#Qa5evh<247ZO2ND+oomkd&hnSHD{ zvcg(#ANU5LM&&;eT(!h>yIRjOh-Ike4a>6;U=sB$MmZFQA&ULbnEvv{wYty7M2t!i zuPWQ*#FB*a;jH#Cbr=_yZU7riemc5f53HN7UjR{D*tb@nr2T9toxk5Lmd{G0B|{EK zqhADnaqD@3tx9=DxY^T(BcATl?SlX;qL^uV?nSIfxo_f zdzmrW?9>yJPVAkN1&$q6Fh_ED^o)3!THXgrNTN^uQ1k-YAx3Axz!Y9;Wq-2^b7{Wl2vxY%nTZQB3e6D@bz($lp zXf4+A<0B_x$w*azijY-%`E#yCY1p_yT48@JxC5=)Ce|Ddm5#5RCKGnG{)++|%G*>z z*7pMExBwwAxY{GPVo2I9RV$B$qI;=iM<2P+Q}FLdH}v8W7{{o$VJW1pjyV+E7)mtB zvHvH_eN_waNa>+}R?g-rBJ@x33nN-7zuBX^W(K~hB6+?vSRP$qIle7Ka({46*-iWY z>jBwkPyCZN_UBbeMW}D|mQcNBJuv33e19h&{$7T5D@e9F?1+e5=gYodzD-R{4RGvA zYY9*@8EiECQhz8}G_P1z{;s)av_NiVu%fhkJi9&eM)@?n0k|}yKSKaRo z`?$+oa3XR=#69NNv2kr>r9nHG*5SXq02^4q=`QIKi9OHze&625{x?6@anHT3IBV_d4gYF6#$15t`evJa zOrkaivzgC3^}=j#jG=&9)jC-{uRjcv&-A#x!kSj{q8u(-$R~CgMVq!x{Q8Y1M=HBJ zV0XOp)9mbDZOCGnq5sHD{8i_HpzY=b;5z{9;#ZyMk?;M#9Kuzn6$;Y41i=kc+?N$x zr@m1tUU3;#cvOBKqj^sD*!bf5aLhCax`d;-thIS%r1jHly&gqKQR9}tvwe~c-lbsFpLz;{{{EA;RHIf42ME_MtRgQ zUL1sentGNp@58w7EAo>oq~BBeYdrscmzj5``Avh{&PS}n^ofRM@nHLJvD`9g49Zzk zh!Du_ex}k|3->eWA!j#I14fUJNY*(vcoK?(T+Q!4o=eP{Fuhq>a8e&G_L3tjx$*{7 zxjxj;xIU!NaGnVWQv~-?Ua~(6h|O%_YYkAERvgrt|9P;9MWra-U;A_rSbS>?fUz1Do=sz9qsFS+%8i6m&!A9?BJT$_2gW~hM)GYV&2O%T0%)xLzXr6cn-81(h|2eq56shMI|HaWE*cFIX?&h-6A66>s^gt4kgk=@l8NI}qs=K*UB)K}ktTM)~elo{pHkmib1WDs}(? zL(uuz#*%>(Sxv}tiBtf=Gp?@>YMp9;%MxPmNadHdo=#hxD|31@{r~>ug`M4zoT%O42kE$q3cdo6EyL7|e zoh}Sn@ZZ1haofB4x?(fg`KAJ^hDbDy&zC}tn6GjuKY~4v7VL0cRqU_UUU^KUqEpxT&jJmi)v5UtTgrXKQt2q z4D?J)n$B03fRxozYZ;v_ODTO43me^K)2UbmW7D(f`N=`yE8Pe-y!UeD7rftts;3ZV zBq=&q1~)=X7cko(Y@@sE;4{>MF+7vl=**b*xlBdNgJ5f%hj8!sE_A!cm;NPGa{swZ zuotQJfQ2V_l9el&rV41c`SezgWk6Mv7kx3=PtHJhqZnmRK8z;@LO9Xi{O0y>5=8UT zrEvw4Mw>_L{`U2qA+sjvKefAE&^Btr@-5TN@In!|$0FwypQ8vh0&{4~u%_ee7E9*u z#rjFk#l%h<)Wdu4v4JMSy^%6SyvsF{o$!~=V+eWwDVy- zG+dq7_+CjjNLT5Du`xMgd3lk;y3&& z#sx5t@XqkWc-FGk)k_!U&)*Yv%fwTsWpwwi?QUpdC%0eT}!T?Qx8I=^a5-4`TY` z7XQuD^V9tx&GQ9A5UjWB>F?CD(euBBU|mj&#~S5Vsv@tBc_6`)&Sf@Ri?MO_CXb|_W4gh<>IyIwFgZ}$l?h^?0=n2aTOD) z+m|0-hny$y8r?my=LcTwuwy|zP)H;^eReHK$8y_z_KwGY;UqGfeII>fzFnt00_gv3 zQC~o*B%ts17`i}{Wo3hid+847l4ECIUL?Ai*2 zjKZ>mBg|09mYjX7^cs_J-_e#$zlDztaD3DWnT>G$Di>Zra@D|uPN{SE`Q~Y@JAxtj zI`~|{H{jz?eDU?!7xwDEhgmgZN$AQQh3p;=UzAxo&-)y!XFTt;B|f`89F#j&%k8Gh zJjyU(xG~W^@9ZH9OZU&<6AL48%Am(i4i^jwh!a8zn916u^G`-Y=;)1}>8IGJXm#e_N&r&T~`bR zpRXy_2l355=;#hxt`%MbYVFm_KP&!wIj%-wGZRqAe%guO@R_B)0BLNakR2oZ6x8H` zhDC6^)-{xG++3T$=^>rC6|r31h>Amx*w~6u z(I8*f$`&;59Da@0&@^cFZdrbo`D&}Ysc$$e)S)xq#`C^KQ^5s za@1wYye(<0&)zFvs;LpC4$8uudSSvart-GCsM7xvwf_1eG7H1R9IfV8dUvA<=8xgb zx#BmxXPI5}7(ct$ghQb50z|%E$9G?v>J|uHyO_LFQXhi1a9&RX*ZdV&IAh|AgZE2j zRXL0!W`ggzPwfCIGi;i}51X zryBp|5LhKOyjnnU6dwm49`Uov=6;TbA>;F9j7k)vI`CFHde$UT=E3ssdP3)8L(Qkl zfL@mMPsmrL?qn)Zv3>`WN#TG0n;22z|8GBm4EMpVuAUwx&=As;91T2_e7dQKla$a1 zNW#l^%@`Q;GobRD5Xji&#r)2WWu2ulJ(1(ekM@UBj9NS5*)SX~nJ!pskw4bw!`@`S z%iW%IjBH9Lu&$CGgoyqWz7sVkq5mwoI*p&k57(aAreW6Kp^zz`W?pAN_ug)k~7 zxuDCETr`3TasTXUHb2E`M`T3R6AF5nCeqz^btWR0PX9vvR4hwBo#N?dYgF*x&s|}Z zgHBtc9ogVpB}cA()u0@|^ykv0Q$)?DJqcWaZa5V;x)I}3GYUI^R$hQfUR{ylCuDvO z9bzJ|6+Z&6zM1T&*7`V-W#Vf&m??aL|wUUo*wjMtv$;*ScP3<+)FJd zKmkjXx{J%6T93<6FzjJ$Y%Qtjqe)N?B?g9iCGPt$<{e7f;mXNutn`MLuB+{b_;YRE zC^c+V96Ye{DzxA*Z4Awd_V$~;0MTA&H_^X@6gxU*G7{cKSa}$fN&8AJ-WJ8`gxA{8q3g>D zs?5XCj=UK=iwmeKdII%fvMI4ZAOUUpq(tpYSIWD&>O*{e=S&^Eq1#@m1{%V>ZJnOF zqISE>!KE(@^A;cK+5#<|iQ5GZMd;a97-0~_T>#{aL(+)Ak>ar@IMH{0{qrI{lxy$x z@>z$yuB3`eu;P{Cr>>r-$Ao?hpf)x9HC~vU`Z*}jhvoZz!?Atu8BK(Jz31Mfw2kl} zT>p$ zz7y05il%ul*X2n}IYO4e$0aV@x=?2+iqr407PVyAUJvawSuEf;yC3gVfHNE}pMH~R zyE!eSVew*=KbgeZ_#Hp|u31Kh?2jZiZ%=CxZv`%whvx!$k`x#3Y4^p>U7v-$Z~^9g z1?n9HkR9tI+40aFN++>PZ1ZPPw*#1{oZBEsDsI4)0e@x%7@#eY(y2CV;`F3@4`UGDSeO^C?Y3+Fo62ZGOsVoUYjzd(*x zb}80h;N+j_jE4n2B>Z5tF>X)9`iWT;_iv^iw59IV`}`2U4RNx!C?Jmhc(j!u+znTJ zO>`h<7^Yo&<}t0mGzXCSx^8Xu3r-(E^p(V*4nUDgFn&L&K-jZ~y{dPpEsULqN-vw3 z=^J|`Ndkq$@SfvTNpolZx{Iq;nCbmt8s39k*j3;}u>fh|e>Z>Dik~BJY@d&N8>aDt zE&{?};VlwlKFJEEE<8q<8$n6g&q^*B=L{TwJiK{qy)l{%aMxeEnQ>loVmc68RPpjG z)@^l%mffBL%uwLosp*pvtw94Ns&`x_?j$@Ex^r{c&5`DpCleh?F`xDCXQsv-2K+cC^4Iw7#8|6-b4jj=XRSzY>#$8oFUf!#6XU?P=6h-7$w1V1im#m@QU3Vx3FKd2KU?K z%zxfX-2NXl#)2*lTQ)^Y7EGxoX5iO+N8vc6Pn%QK9pF1N0Gyz=prNh(VSsXMj3Htz zKwJ{jv#TIpmt40+Nj!T5<7JKi3JNR)qK={)$@l@AD^`yTP=3SUdk^5N{J0vgWwua# zO~s0Kz(s@$-O$L?1E;t7dCsWT!U%hIUtyC&NBqJue==eb`1_Whj>B1xs`m(b{>bHuzsSHT=(mQ1oIX)zim zDcEI78JiuabxM$vS9%>e3{das#YXr`Dnzb4>o}jSz6eH_52Js}%+D1^l0UpOo9xGL zlEJt#Nr3`Pay0@Jf-`BdDdfd_-fI?=EH2wD%+_Hil0Gz)wm%lJl88r zU#k1nQ4FvMwhK+?+fhBJa&)j)7~H=5njFMy@l`#RhQPG?)xKKiDs1A~VY8E`%UNPC zF1552mBZT6HR}UJ;~;KiIXYO4${^qsh_|b?qSe7O=;U1iOMQ4ZEa&3=^Hc9_-zMbg z%C!mu;ScEeX8~aUIHOj_XEUh&QR#HSmi?@^cLQ>Jb##l(cl16;`Va>Q2L*S3INo;o z4gaW3#xlM=!)Engc}NtI#C%hi(v~-5X92-Q6rj%^wi}Pw{};LQ^W&XqMfr6%2yQFK zzb}We6*W;9jY?g<O0QZDM?9b3yR0oPwxfAIaj9nR6Y2AXcO_p@mJh} zJO*g&U-DgoY#{WRfZQ-fb?qgTZd$|iZb=L=ijpL>&uS!&+17vG#gP0ZE8$0C#(Axh zT&X1aA!;ck5SDaXVkAzjAo(<&$=?z1%*}UnIpkxCG5y+iuIK&wxLzLC=gx^8Y%Idv zL}#EZnuOlKj|X^>R|t%5Y{M4Cnh_Nb91yKcPMJz#l6JEAJ{rStKByMN4U>sA{zpj5 z!4(cvo;+Ei1uQ^!8X|QuFuH|{%u5WG>pt3hd6`o`?24x7LUYzp47*{#H3mT&K0aua z&>k*qL78_owY74L$igo%ZNBZ+u}W4{EU-Hh#m(n5=>6e2EcDb@%uLEm)M zvW!>v?q`&WrL!0!4=P1sPqt;YWwAs4a(cbGKZmhKQI6s_Vld7xs-ojw$N6x`Au-2} zwfvqv-ZoNulT^puKr&m?k7BTA@rxX$k7;C2mPd@`Omn#S&(kKBKSuC7}RS0c!R z8dl6NfNnz$%FSpZQ-t}W)km_w?`_MA>M!pbKJjW8M+|n4{N`$ZeR1bb*lyMT{@gMv zZUEtbYtNb~jESf&E0S_Fdi+?yjyO8n!H&oIEi}y^MV{RN~zGTsJ~S(rticDndgK#L-JhC0wN$!e_DKORt(o) z_I|KOg1M!y%>bP`p%Bx5-|Z_~$tOvpxYdhgahE{1xIINwaS;(a#!Q0?(?8NS-xsE1 zK{nm2$BL{rA(n%2p=|*^gP?P~uoKe&$wRpoi>Hm;q4>9n%#1lC`H2oGs9z&`5SWDY zvwq{n2_#cXSa$xjmYdRfmtth$G-IQVV?GupBqu-B6^MEVYOoJ`VlUtqaOOjS_R72E zJPRAGb%cB3u&n=T+^ndJyY$?>oc2`Vp^v2C=V%p8C=z5QkrF2e!IlItbe@-}8GY z8ncf`55s$8mGB9g+FZ~kaBN6RU+{c#R{40!^9Ldtv>*$OU%N{$toM9CL4ml^ki=Ix z)Z^I8C6?N{eVOG|M|jl{DmN)!g6#Crr|y>sh4>1~=ZF`>K_#jq9%!@OeeJZ0{b54l z_mMunvU(QW_<98jFD&N;2g@9$e`hH5`F6>9$jg$KLs5CNztbx8~kQ9fRGtA1LwFxsk!OK>RPMW)JzL> zn4%=)C`AO66~;$*T&iz)Q}zeT>d#7Br^mp3$DMAqscCEQ($nMn4AjI5=%`4Bvvdpp z-q}cOEA62gUN|KqFjj65bfw5PT;$iV{u;9S2y%FzIvOEISw+%owYO zyi@;{21at4v9gY*v1%dn_m_qM2wBD6j;gN;m-~64D<@r~X>ZZ}x})9pcTTiJ@|nnhUepsV+E^I(I<{~(LTH$IE(Zt!>ur2k zYZbFHR_+3Itff<7z1)y=YPQlNV21b<%4i}7(Kc|7 zUbdU!41sn5#xMLxrcYO?C#c6osfkqw$^@1794!upeb~&GxVbj3TX^W_NO#MNbO-;& zR~{P0LboxGV2k@vPdbbtbNfXT?_{#)OpO<{7JJ;oN5NGeGruP#+w)zzZpuFl1qxXp z0-2Isgm^%DN4R6kP{#x$Vb!VBdjmw2ED7et9Up$VapN+XB$#`1Pp8w zn;)8~!z@Eh(sX9KTWQ{*!ucj89uF({Xfb~wUVFWGAo8U!|NmWE+~MMd271+{0KLi7G&Zo zH%%viSkT5rT!5}i{a3t5RYmtv(j^)rH&yWT```rSg> z?$PcmU9%(qLrq_WtrVEFAz3u7*C+!5+!NgAHCZ( z?CZwK0pt)OF5?4y4dVN`ku7u!&dMRI4nr4S=&w@Ps!UB~jo~-dne{#cQ4Q?s^VP=i z%94`pZ38X*>a{k9>(vfJRtH_4Ujo6;fzvSi*Msry&16^c^CxN0rITz7tsd_%%h6%1G?FUET@lu?d+Ppl_u$i z$K{z82gRWfcV%k^q(5l=`M6RX2a&eX2aS1l5f$_%TE!=XqGaz60pnkbm-G9iMep_V zXn-Z}BS*vM*~VJ$>z{mCiveN9PTKF}Wd>pB_PczhWma6wUs^~D}lB=KO{VI~b5o-qQ#;9D# zA>W(StQBr09H_4OF;ew0NI8jQZ>r@ zZv%yy{x+s*^zJ|;R~f(jz8aAmr^C|&_JUxZ{N0SbtzK9a;@gE>{C?kRLbCs@s+wtW z6_6Pjg28aBKTB|08zg*hs8=P8{u6bs0%jx-{FH7Ftwo<$637R8zAEHnQJ}OZ&^wMC z!jyHCi3Y2xQcu@dq0NGtO@F>E`TC_2cdQZ_o`xe?DQpII@~yC#?_c~_;Zy^1Vn1B2 zyk##x`7RQZovm)4AD2Gv+D_!@mo9oA>89vo{F9sAM}x=058VJwq(!{hIA(&jpDKL& zK`s>8Ft|1(fu@d;_CiX>qy&VQ#IX>c~xE3COLLR zg+3;*5kVG?Ip(3bHf!@YYx6alIa zmT@xek!&JaIxHud6u>NR) z4}5!jL+Ue5!*CwP3gg6=9N>7YFGVrM?vxs2T#Y7+U+~YyhG>j@P;ikx1)qAOD((MU zs`#^Qzb29c#BMkus;frptuf@%H%vl7#!u8Qz_{% z&}CgzFJjSE^9-Z~q&Lcdepm6P`~&M=Y*y!NY_kv?q#XaPYy1`=HW|+Ch~`zsT*cU1 z!fMrJfZG0AH5%bPk~3OmNJxijh0@3YLgpsfaT}m6_YT&xE&_8mz#Cfneq^+GUtuc^ zj+Doc;Vr8_c}t|*2c1}n7}Ka`J7BIAGo(SxIu9d$&V14LJ5`{clfe>QLYC5@_e`i>dyG_3z{h#2^T3ZCksdAdn* zb#s8xCfDH|%xzV2wUe%NBv#dLipAacQlaXKD?qUBlC1B&fq#d0>xuXszFNTi>yJN} zR~RLK6EeqhM)Ln$KWX17p@w1FIGaUz#V&O^^%6SUn9?2E?wex zS{2nt{dD1bCFEf6!Wv@nLTwi!NGej@R&l+}9Y|-6-03wM7_KkVSX2vR(I9zs>VSmb z)W6VJzD5od<^5!Yvcj&00MWzpXQ0{ZSNqpn!Zf@YO>sU`H1bhJ(X5hjxuGPcGAtW^ z8!zw<%x94>^nZb#pzSo1Ma23M&oLw)HpV)>q*~EainaF1=kH|figlZ4#*XKd zd@}c?;ruH`?VbC}qrDJwXMID=u#b0Es;YmMel&b0*z&Qv{#9kGXZZnP_hiLL9wZC1 zM~Cb+xk#y(D@9|}^cF)GGnM?6Jg6fqi2UWOo3K^d||nqx3od&ZIjd5t{9gw)*e}01qG{eiYBss z#($ei9ncQ8huFBCii>>!!1<0bob*ek`w}RuVqUvr7c_i2`t`?l4^Y82mEUeBFb}92 zx!hX-)RlM+nxIzwz03fGnIv9_8=(eabQe7@Ns-?2S?KQQ?dj>EH6a04#=w;{{`vo+ zQKdpcb|*MiAB6J#zI8cq8%EXy;QmHrX{$zg|5;*mKsLBlcr1;pK}XR4CMyv0=i2(w zhs$%>COFG2nj-_BB{9<`6Bl^i3$~IZI5=HF9hG|?!xY3ME9q#5F{bWA(I93Jj}xL$ zG5w{)(n9L+4C^rx>EqWh2uMwygry`g?VVzRwgLH+!@CVv#Ly_Nqfje&PN>4#?9U(y zh=m{7B2!I95W|nCJmD}Z$-5Kw(;#@D@#^l1b+)I7rG znAUq=lMp0P>Ja)GP_;zL{w~L)!~WmA65HbW}93*04@Dfl=gXJP9d@+`hmnW*a?=zV0wY%NFUt3U$FGfO+7ciw1+wtmn(p2n@}*OeHH!+8SSOBFVxoQ@vZ{dQ)mSK6c0=PB z@{x|_fk<$9gLSJ&g&hB0AQdCl*5Z<>|3;)z3doJT2*MlC>L#_DDwW)F z^aX53m_xv-e>VCm!L9~gc5^r|ig4?8bmcc=n7L-bz(uZAzS5+^2jo7A6IKz$BeQH0!bb zM&J=3i5n8Q(ayNqG0HollBA)6-d8P+jHQau z;wG`TFEg8%4za#r5`%*rjsRc)>@T!nML{fZlUC&?TXU-`5{qz}P?4Rt(&R=3$yY&! zQ1pLKxx5Ma`K>B#TLpVK3vt8u6|pYtdxx(70@5D>F*Bg4xSaXyr=SaAdirKE_K47V zgTGwnx8*SA4+z!D!jqW>f1`2wEeX|h{Pi|n6f?vB@1(9#08}orM}>l>BJK>yV`JL1 zbX3qL$l1pAFx(dmRVYCfL#-i?)gY7?kG)f2PR5 z4hAPGAKE;&T9sY`a3i3OL*|+p1f(R&lHzi|Bx7%karJEZ;G9-YuKCh0;{!LEqiDar ziaEt6_qw(9PA?(1*~tqJ4dtBtF*K1RZ=$W{aBFk=!A<$Nt@zb`DyQ#AxHX}d1y9$l zD1=gZ#PI9}&CWUO#C3LatAvtP-0?ys!R~FfWSehY2^|;Inlnmj!CeM2#X0GF)o8e`-b;LuOv5XS$<`v=xb4nB@t%1U4lEz1>Z*Z2 z2s$K%&0HRYM=JK&U)NlS4VCT(kU1yPfT*Cse%2xa!3gQDOuEa~_z;mb02QrTC0M{i z-_YVPTHi1fvnBr8zwYd7H&r1O!cA2VCM*6Ya38*-=gPswLcVbTq?Cu-T9+d2o#bk5 zWi+YfrS4VQ)YOm7R-Ok9-9zepj*+^gcC$oCNQ!4&==FW%&WHV7GXki3qIGE2!FJG> z{S4Oi>SC%k&W%UJ;^pWDB-Yw5F_Z~V#g8d0X-;qBhv#2x>c+fdDMvA(24YmrQ_N4x z)y460D6IA#~wnntD2Qee`$)PddxSj67;osq( zvO3JMuuy12xF=Nu0P<1jbr4Du_^p9VO)BPKH}>gSPly!P^QEu-Z9h?wzwek zX`uj*FY5sLLyi{|KrxZ^2ASq&GXjb68n69l%^9!g zCSPt<2g30WA`ltVGLDqm$(J&v_~+_2&z$nVzI)uw$R0#L41F9SEw2BelfSH*!{_py zM^A_apX*Tk&U@aq51;O~EgL@H!t{d)B<)UL#We}xb&o_|0pW$UheV7AcUeY?IB;G_>jJrP!cHQI8d#1 z25V>&apLHCU@tHbT4kKdC#RTd-F(Ch=v??=o+JP26L&km{0?L^k6i3N@)z&eiBIGV zFWKtMJ#ww)%UymWRrok)G5HZ1oaTq5i*VHu)#_)i@B(Xh;T*Gl zeOYOXIX9W_dOJ^WZfHtQ?_h<-f)o!cgh6NFI#18HB_V3V>^M+c&VYG#3CGik9uSL! zr|6!XsIM7aD={e&wDY{sCaSW)`=#h)M_}~Re2An#*l`Bg9ByW<24C0FtvB2^hrs?t z+s5#HWpX^fAuVfSc#XbT0s5-#(QZX>6@K^fX9Y`5+*|~tzX4p;tuZKR<>H0o1o`=< z@^IU=ZyQoo`QDvF^5`WUrxxeC@-(tM3|Rrr_~JXy!6ClAG1M_QvF#Pj>u+D_Ls=y5 z@quK2xc2Y%o&lz}5mWgCTV_a$D2+ez)!G=#npB%q1BK#~G**pyd-$wyeH>sEsMV(! zWE5ihgA}YPdL~88$N;6Op08$IpRTgkG&Y`1nnfCc1)fI_yi@^634ak^ioY%W{t=Us`A;yy zDFD142x&E#qJBhugnv|gVU6H3OP)zHbGzREojH_b?E%Bqbzp@=CvG`Xx{b!AC(l*qkZs zF>^RuMv0b6q#Ore=1%tYTKh!OnQ!-}(@MW*225HJ zX#q#-%lQ%xKVWN!qi;Tjw`y;DfOE0$#J&LpRwB1u$og34D^1v`6bY2XgB+_U^- zfyz|P2^V{lI)5AGrqGh6GV=eG7kSbzPQK%JB;vXnSR03DfM=kdKwp~IPnCK=t3Pjo zXU{cgk7f0N5C$nBLyuu*i*#$Zp>Tr4+P|fa$q$1Om%ZoJ|SFG3{ghO zQW%73?oMs;dqhf&g@eLyA2#-CK}kPk@8@_gv!ZB2lv_#aKI2C^%n`YZ#!AcE| zaHLA$2vZxsy#6T0Hyu3Dvgw~0DLo*hd=mNT$fJOI`D+^w#H1$tMO{MbJhtGBtYEe@lJwVU$Ul66&SfuTlx z1++#&a1ROP2}a7hKV^i@rk~15X0t&|NHC_7ERRZJ*O>#iAB2fk7eI8wRgmsqM_ra((AEgKm8NRd$NR_6b$<;@wa*VVA=F;7?ST{`#E{H zj}@E&4^2YaFrO&D$@0cPEWoIPu;n7=9|Yj->Oa9DtmF8LnF0sve&8Ln_@WiK=r}W|&!Da?=`r7ebf@%D~a<3N~%Q;!e zNq040wY9XvCk=(u$~Nq=yO%m2(KF`@J2a&%>V1i-8e~DZG<+b;D0;>G&C}0f)&Iy( zMTax5or;9fGLrLWWhx!@LiI@y^8bxX`e-5;ixrZ?7TM|9;nMdQim1p%%`Rt!SG^YI zSdBB1S9&{j!8jJq3bjVI&<-nPHoM_M>4e&N;AohTYfm$dO=$FKd{57(!TFDTR>haw+mYI|Kg0$iO|(Kr9| zd)fOGav&|Vd7d^5CN3-sLbrg_Gh-2I7~mCQP~vf571E=R6|Mc89_;CEK1o_DsI@Jn zut@@IMkM-yLoOqn$KX<^Z?Y4~t;Pf(>B#M!x^|?|8fU`%PEuWqmO;?z7U;Tegh`ql zup9yMPSGpqa|9;8$W2@H7H?Pc@A+&!g-kIR-7>(*)Z=zj*n3#mg#Tl%4S47DM) zdYdOqLKiubKG51l{IVN<(st@1&&7dxO7C}@+EzwO8N!&CgX#Y+IrN?s{Sd<3_nG-w2_!#w}3O1&1u z0Pju$4f|^}lO;#rdD^h1|LgE9$Lp=PEfuh7pw1h}9Ei4lnbPHKo|<1U^xqQIZo)dX zl&h_yQ&01Vi1iqreOxKaNSEi0t__kxJ~UOyU+NKaqFHm|3Dhy>Vfm0v*M13T831lJ z&`P5S4T0*oR*LK+bX@)0%M@wRrurLMO-X4}V9sI^tv!YBEv%D++&uW$Yv(RuW{+-m&zRoo0O64ztoMpCKO zcR($?7oc%uqm*j08mgN1qj21jtOZnyCGHTSaA*#}t>qdx5>=RzkrUGi`EmbA?zTlE zh~fghS|@ML<``0V26Aqf`#3)OtAj=dNN{@E`R^q3p%?WGc6o#`Wx($|7RUzSvpp}- z7w)yx44rpJeRT$FJ0MI%4$|}7-CSYZU?LuA1qz;Zaz0>XR+9E)4%-~FI}8D+nmb~* zTr_TG#bw94Ys;`m9#M@9;l|7UXBY}8j0@*C??s1Sy)u1*YfxysDTou3?83A2F1$X!#Mfag~O#U(ST zL#W+cr1&c1&nkg>+;LoM+BmXa5KUe31cGdU_^{_g<4}P(*SRo?CDGCeCFN0uQs6NT zbWo1}j(cF1WomTvr_*e@K%}Ub6z%|;fDojH0Q5QmsQy#~;HN$A3(Y=d{v(oG;?2)&-95%zt^vAT;!5qNxf z(pIxqN&r!8$h7Xs!&nci18(58Ia} zN>Oeh|6)oF=FXu=69r=0`*s-w+Ro=8VxCg7XTUR?fB!PCH25=rlwan~FNBA~hES#< znBdQPYw2I|268MyP^27`=;$F(cAg?D%Qq?}M?Qxeoo#L2%_b%88~et%H`6QAE7;YSCHCEPVv0m5Qv~uWFn+OJ z2MNuJ1y(J3iB+++5h#)1coV|!25>G}J2LfbNnu$;Ph z!0dXq5cju9+uJTwKi@Udz(-!+-~)fN`PhzRYv->rU)yYYP{LPuhf%`)f#KG5D9MKC10dnCd4?JVnB|!`rg@YK8GXtn&Xp9^ab~-ELQFR+Of(W}20Qm`l>U{2R z7{;LosfSU_z_y_4uix`ft?RS0;~?gLs9Xe?O)*pA$#eg}@1>%NU1QAqj91l*Rs3ZM z;a^VQe`syzSfqEQP!PE|oe{K=&W}`6;4`_l%jF`;-O1Z)rZB&x)H2 zZr0*^aSqb8oy$6^?giBZ&)<%jPmlg;q0y=2kdf)CMeXVhD@JNX$1Di(!a#>_L5%7j zCr~FUn8s1fLhXpnh>^{g*>GOF2>@3Hz^Uq~5X%1#QE%ZERr`etlS2$0LrMu!LxYkM z3KAlS5}S}_2&It@85#rx8IbNA5N1Fnl+FR^0SO5u6hykgZ@<6uo$LGsbIsoSSoeeyM(z=ryz)`&)#7VWO;O@uW{GZV@?}qv=6;PVnzpTL1@+hAAd~NhW)^l#)$NX%$qj-nhv~s|zYRS1VtWB^ zq9S8@ijo=3Z#XSZ&W~h4f@H+*Q%nA$1Oad=HQfS*yA2)9bl0T%#SS-p%?(SShYm!y z|3OG~D7}Kwi_K!mu_m>ONuJK5ye8BzmgmdO+I;QmedyamSaJL#Ay-X(>VF)&j8S$U z2wre>t^xQ>*i73C>e(n?SQw_38A9{DZbsp|g}( z8MFOL3C%=A*sjfj=*V$Qz3(3O!4-BmTNG51Ih`FKUi1jDHaEBWn=l!tK9eknd7*eN z>x>Vhn(K+^N>FzKSm`sny}Z3y?(0>Ok~&C@+SQSwNV7>l^9I1u(ZUF=1x=)a*Jxc; z1tCCAAZCL%&n|EnoMWJatKq9vYK}2A!Gv1=OU&z+fHTekGFZC=YW09D26TOlX~Vr`%#FY?^+S5W)ol6+4dT$gM>MAG#^dlYRSHRzu@*Lx?( z&BfQB3Lx7CwM^;5w7H5U;Ob zPd9@LNhL;=i9&3*jD%fZG{C`{&1>tp ze1y}ermL6kFx)kK^sMR_45%EdTeb98$uGNvocw{<)#661KGKBk^hTL)L5~?nim!VN zslM_q7S=nCUncjY697aFg~#aw_Y*o>->PiYReYs0d(&lD+O2}@>sb!xvq*5Hx3gWa z^!AZZ@Gm|l@e{mDwrI1s;xm09xo3X_SqH%7B?s5cmEDq8f_x!=Y!B8l!5+gCY+@HY zyAT1yzc_*BIa>XAMEGrd^OJz!U9LRNV3cDGQQ|%uOj8UM9R{q%W}IRHrZ@b55Gl!f zrC~&{#@q#p$@+goYruE_vX7-aLXsA%82F>5+g`Gw(r%;WvO1(5y}DXLmLDVM2K7bC z5>sT8896PLdH!Q6y}Fgh^31Bejas?9y*#AZx`~{N zdXLh&dFaJJrS6Toc|S}L3n*1ZSe@PjSIhexBo3rrLfD8&6E(>wkR77f+*)Go`@CzD z<#j#!W+l<;{dS=kxvkCQEAmoy5(m)ksiz#> zJB*0{vR2=;FeJJIk@^i6(NXXS7Bf^SFx*r}=CG&ssL;HW3#4+`+wlx{S&wi&w6-+D zY)ERu!cK|pZp5he4W?POR@dmk%?oq_wA~MOI$e1m4xv?}x zC>(GIy{AEa>)$~;#kYLY+x87YYf=NL8i~730=E)+5uawZv>wu{!3r)xJ`DImDt}+6 zsH!uM^0}dR{YD1^>2K1kKQ&g9A@7+h<8g!1B|E{iubS3tNwDx^sUGPXap}E6Xb_E7T*1tgL=2*OWC8Wy3WN|QE zaOLbvO%RJO^6@|TOO9<%5zyk-l4gy?o!Q5+k)41y4*~Y)3Livp_G2IXI+ti4Z7cqq zeWE<~Xy-N6Z0S2(*E+D|%Zu9*fxjxDz%p#{4?WXC??(dyS(BYRf#^d8YX6 zVGZcPbkWgBgu0B-rNq}Rl&1U6$s1E1vQO4@PrYmg(pm*(ifw&d+7?=EM)>kc1p|hm zgUX4pF&fkqT8m(k$nHiYv~Q4FKyU{2XRgVuJ7yqXfKC*Hc>K$>Y~EUFZNXsQJ|=bj zj`O3vO>oh4G*1M@!i{ezneVXBIE3CKlg+BmrF!U-eOtvCM; zZI&Z$pI3)q!Wg_UYLFB&+f+lJ>ugOcRcihmjxv}rTOy2mL2iiQ+AVb&E~m2}Ev7{> zUJ89|pmvC9TU9Mp?lrYJ3Gg3{fw>P1(^{p^l1L}}m_$+!f0+|QM%=~?ZMFMz3xS-Jp> zCAFMHsATaqP}|`-JtnY!;usldqU6!m8LFw|^AZ&3+Lm^%c3lP@5STy2YRk!kM!!F} z9Be<8(`RL{rl*Y2UT-~YEN?r~Q%eK!%li1EXv&8ATQIF|+O(IRBDu>|i4FZ2tz3@8eraHcgN>>DF3_BbR0=#d*LrWHchYLZ*W%QgOQW`cR+9^(i*nzCxM>{0u zmnC69ml!dnMnbN_O#qgrb0Y07RP+gQeXoNd^Y$_;OOIIKwWQxU+}ij5dk zkx?1Z*d;BD_4$ns3~Y*iVKgUo(maOOyd}evn}QAAf8-3Uv|J!wDf`h5>lI&wBU>S2 zz8W3&UwgyB#`@%CG&pBoAg-Qn$jgd0@F`GnCI`pdzF!yt<4MjZ4x|zpcomVTZQhcK zl}?50t$UTtVdozHgGcM)$x0q8ypWY)n1(=gh9~+sl(Q@zKh1cOL2G(qrSy_+G>H9` zq1*N4&<_Nu!(N>JW+rJ_ein+gTk4IIA@%J2hfwwonddmW1%@%d7}*$3Z7J>abh>By zY=H&RV7Uwn+KXqLy0bM_R2zG|tq|zlJ=YWgKR5QEQx+|~2H31Dn4L70apG%)a$U>~ zQL_1K_AssX-KUY91qNJ8?8+z>;Gl+ISz=+q%e6&~-w zPpS9iS!Z@i0=bGfhpn>*S5;824bq8~Nnghl{EstJc~(5N6Z9&F-z zi+jav02|XNll^~UwrgpSTK|AjfA*~__Xrr-DSsp)?-}*1qObGOZ=cPo=mu|!dbLKp z`|R%gSRco*F6a^}GJH0hs+D_O(&MunM|$^JBrw}c*f&8p768L?YA|Iwz9YU}w1n!$ zTu;?IlD-aGxySd_CPm$OG}BStMEi%$=@r&@nD28w85>i>VGxzeti*L3B|yZ0;n!L9 z&5IgG_smum^f&=u2%!6x{HBE};08dew-+*F6%Bk@veDEGbH95D$>fr+wT@S-V!$#YkN4*uTJ=o9 z9wzo2H9Z~+kC7Eru{ne$RT4GX;=r*y{f>a7zJ zCf@HXF|}tbH@fwSkXv<-(-XRXYu1VQ(Z1^RB)$Xo)(}%(nJ`NnHf|k{7%?E zK#R4?9MkpAm|+!STK3qMakT!Mi6+3XqufV6v@t%AjviQc<~(&7q}>l>V;PBq}LCmHTe59xy-1l)HSu5;Ig^WQ~F zd0$z`?pV5?R`J33k#OZi0xgE+Gi=B(sFxwT#e`Lc3=Uc`s!iDegg2Yr5+2;ofdF#~ z?pKe&S(AEXQ}Ii**Dt#Y4;hfUWJ`rk$u#kOlOggMe6?0#;}e&l#nxIR!gk>no!H^6 zyWws7&jnl{QmQ{z=@D*mYz^}~DU&pU{)er_JUXa;Or2_X1S#rB&p?J;I1+w2z^`?G z>u&uAcCMTGl2F^DU&gFLnb#um&J8a;Clprh$jdXetM8)U6_YekawmzVAf%ig?>#7t zuB13Of`x~d308k9NXY63)xiqgI;piHp0Wb3wL4K|9Wnc!%8mG(mubLu!xAxAnmJV?XA7`ZoGBtJt8YXL#{>mjqW#F3ImwBJ^ z*`;6?ngraYmm!?Bv?%ROz~J60uyMbUe-xVZ?XC6GJZtI!J0CoX*g~!GE29m)10mMN zJE%tMRDXAuWl9wMDM<&4TAo}YLY<*3(wJ!Z2C#>M-{eUI7oLbbLI-^n6}z6}-rbi= zG^l8bL}9d5B=;6%G#d9G(WP;wg-=7iiquJRJ}E9*foDS6F$`Xu8yyOiB8*> zqh3KaY!|o?y6yDleegUh*}V{P$d=ZWVCi}3)BjTBeTuu_1&B5A5sA71>qj)Kk_Z** z5uUI~ys3$_Q>PbQsvj&tUD|QcpZju(Pzn6H9U8vVX7z9(qs&F>#Xz(uO0r(O0q!yu z*?RHz4Z=C~bn*#s=Tqp#H(d%q4+ZD7J`rfWjqb?hZ~uHer&_oRw!Ps0HHiS8y3#Xz z#g?jUS)fctvoqvvW-Wc0Va|dm8r;MrFPkL1u1uj(;+4}o2M{W}h7W>A%_vA!$Lpa5 ztc)@F6v~0|MQIw{jyJmcaLEZD{t*U*iC_Zu25a6M<}-t?`EL<}*T5MQD$>p?;@NHo zs$!#n%%RhTZXK<^4AeVL6F3rB$htDdrFnY`PV-!SSLim=EEKR~$MLNdMaUMLF%`1& z4|LJ(ozCB7Zt4G%BO(=|tbY|Z$ejrnwj#=wb1_;eRk^66t_e5!c}JV-x9$euF%L5J zEKGh}VHOB;hNZ}F-0wp7D6XqZId`wqzHbIG(Ul)I&R1A76*3Fdsqk2R?!sqZfsGbZ zR!$1p7bQjJhp}{T1-(fm&!aOH9$4S+e-HM-;IbPsxN2Y=f)xpdolz%vEoQ=qG5a*z zW??pGpo-U?ivdp+>6o|J!d$mg2=q(!Fc8D~Itr_>He*I`Xh1Yi3xnRjmm2S7wttGd zz|3Hxka%Z`F(OkXnvQJBV$Ia18F|-vgKYpDC~I^7N@;(Z*hKf#F}8xcv>R7lMpTL);PPUt!>YHqlXm+$ zZSpm*FtQzL3)w~E^^OgdWi!Ny(%$tzRwpJ>8mzZfTRbSgql{rJ*avFeSWnp6f=pim zdbDAfw~|`~xso)6!Qfe&Of~`i<9a}Y|FwZP=@i&m3gqH7Ic6YR^Vv3m7*(*I+L{;q zKI;%GRy%TXaK=yrKOl_w4j#5>xx_4d+@F3H{d`hzY1agkt??DpH%U@GwTZg@-x~{v zK7dzPqoy-hORQ(VGPbOysEvFuR!DX9s!YD2-^(N3`8G9^4vd?>K%6D0Qz##^Mj3MI z!rzE;V4Pb-&*R%I-eV+buQ19F85PXfSY?y=NgOnlYe-)Y#4E@V8x;Qm!v3Ml$Y_7A zi3#-ZrIC^)U2P0z8B=8Yz1-TvW<3dj@q(IT6QM|WiX%PeNrh zrte=ZP^*>c!3ZVU!uMgJ-xr;bRM%kt8CI>XrT@)SG0dNGXTMTxWkc3tWTrhnMuC(|l=Y}7< zvFZP_|Ll8XbirzWlEcr*p>rk>DQjdAD!IlPOPTh;rOf}qD{AViSnr$KrxT5b0F<5) zcZM;F*?i@+!4VhH3ZhZ=!>{;!H)Y9vhE97m19kTW|LU&JLmxc1;<@beogy0UH(lW1%X zi351?y`Gazdkz@bZmaFHGs$jIr4AwCbM*kMLKOV?Zm$iKQ>h3!!&;aOtW>@GmkcNm)W|yJBZo-I=u9zuBZS69zTLTptzaTe*QP)J83xOkbamF^X!6>=D zLP>s#0Uk9_(f#H@p$bp>AjT5rloICkhEQJPBcdTVI#~hS@^KV8P-LY>=fbs#9bCRW znfjecIxs;Zl=lMYX{`HSWt480Ft&fc4J-q)- zH258rV>2QcQxM}3+fT|lE5_X^Fj>xnwqCFF5aiAEiY300kxrkZ7vJ(+56)i=E2+UD zVi>2_vm|_iJc;r6IRz@9CJUiYG)g&`&wNImw!TaWc42jnH5g?Gk_UH4?tAWNi?|uJuCyU2?@q5wUq9G-bXVOCc28sa=<7{;L-9uJhR)V zE7+lQy{nkr0HVwSw;}*C@jH+o%Qr20HvWB!4>A==fX8Bddagm@Sb|mzFB$XMDs2GsEM0A zcNb*QWcnQ?mJ3sI4uH!Dk!?ryS)Xs<)Nkn?I)2B`sz~|2k)It*BWf(Vn^6t3@H72$ zdj?*5fF_&G2bEGE1AVxt%zxevINUrt5;Y+JnR1YkY6t(jz1W=+;Qm*-mD>9RcQi}G-oJxs*jCiJcG zR3(a92a}3HfPf7Fobu<^mja$dGguYG^nHw7Y5*(DD(E|qNtzik|y1N|x?c84z*mb)Oeb#o65jHxp?i@n1#r#2y`MOHyVHgoJZ z)vc*2FAxqBR(+XH!Y6X|Qkb@qzAjGS=Qk{f7`9l;7FnaHffljE`jHQQZ_-;|1=8SF z!vG^Wp}tP?Q4i3tCsu(FJV0@vvjC$PIQhQaN$2l*+75pMCCtfq1#T^fRERNkPlHjM ztyODnR}3%LJp(`yBEK>lx2%n7K!ywWA;x<14rNFciKimI5#N?;wKj>c^K5k5CJG1@bv}f~o0)+=BwhWT zprSH#vO@bK2)a#q=-E;{c-}Lfw*ZW3@z+{|t+{We2Ga2{O$1!!N*C0a1)!`#nR?YaR$Pj<2dcVIszmI5&_%<}u3h zeAP#ItDt^(DBg}&>s}}0&qV=C$#Q9j5x#QLwdD|JvW;z0r`oyJ-wV|$!6(~!PnBtZ zP=~3auYprf5B)6WZ`kYz!#eFJ0jOz-8mR$qkf=0YE`pmPMLANezxQ5l1G;$fp(2H|=<@}0NXR0eeFG^lRLI~uC$U;3eJAK>f zK8VBQZyq9LnUurS;KzR4d$8uQn^WY1RBCwqbgcHBPyT=T$IvRh2m5!)oH}Pz-d+RF zOXiIKVGP&gZfPIJ7${ePayvSvfqfyW-W5w<^IfqKJ zGtXYP$^HkWfUK3jcRs!2t!a6F&r6-~Qi2tGT*o(%ZBQten9Hv%W!9abzD!-o5P)JI zTr{O=`2aVfr2J%ev~00C-dp$}^=NA9BkOzChEjLB+l{pZwVyXkiVXIunOoOKpg#Gl zU_K|r+FzuJys=&{Q0=v+CcKCs7)1fRKEx66ksjitb>6z+q+Mn;&mAfLEYEvHDI4s~ zEF%eRc%L_@K#U_}l?YP>&N2r&bWbiEt2h_xdPFoWks}zcHaXe+jJX4*k9pTX(H{JF zL9~~o%~!r@8(Uj1FZI2I&q+W=#^gW>tvj*!5IuM9$|bcvQQ4q`MgX>7pClcNfuYOW$h}>kQNJIw;(cP! z7k%cY|Cb98(ni8q9`LbQlEgAU`qASSbMb+}#oc^QvngjEa&>?FYVuu&y?41!vMP0m zJ)=gj%E72LDQBug+}Nr{8qWhtf3D0QV#0z{&aluJ&2hDX2?t+6e+MGi%dPfkIfDk% zAI2jF?~eY`nkXNyqzHu^=hXbIRT2tbDIGqtnu`XB9&eYHn=ba3e?R-&jIQ^*w-U4p zb_FJN1-HY;WeW$;gQaZz58{RU3*w7B`(?1~@cT zlQWrLqTsuge7T;5?iFCd zgYJjT&sR<_#OoA=L4%#Gg=cCoH86~ivOao)W={?GE7S2CW{1e=&HF+kag8Q_61znK zXS~H`KD;rR#4;7E)jJjBFFTtv;_2o!SKD*sN_**-+y8G>&$1_KklAB-8 z&&}W_1zx{rp;uGg+C_41#Om3sA2n71h=XmygZW^)MunDMxew{VKfpz;Jyi=A(%jTe zL+ncwSQ);nCJv_dVnVq6NcCX2AS3;|t7R778PgoA9Qp@UoK5U*V97CcEh6{x*QU>= zgx9UZ_6OUS&X*;^uKe2zxvy_u+t~-6?JR;q`?Jqe%|m)EX@&uKn-oOomymgulQ z)m7@=W#H%UFJWIB&ki<_i0Ls8*f=}}u8GYz+F|Ar%2PPd`z*012Eq~C1UAa<;#H7A zR;>he+nYCM8J;2K45>g2%#~TvO(FyWeVOq!=Oeg|@Jn5J3V#!Zyb8|GrYE+fw|iW8 z)a58qk*yKw8?v$w4{zkZrI1V(^dJ}v*uyN?`cR*M#Yo^Z-BQs<4f0H5^B7+Kq+ARN zUU#mll}GRaKGd4%wb|baVw!WuGn-y11YH+PgGHv)g=2#q-8X3`D>mh=^w->B$Ns8U zt=u8U$kDqOt)pRkY-%`Y6L*-|$)JF(W;JmY<@6O6F6fWAE7oca?zc$}Nk(3Fd?gdUVRa@`)44&{zufoT%qq~#gM?fr#I5MVS;&f;n7oT(Xp0N0hkK0zSMS`Jf&ChiahD z4oDG}I1`ORG>rMGMzMgQ^zd0d8_Ic*+i!}`e z62qHaneRA3YmQ(P>r7~%++pM})`W@0z`Ws0W8>Rlz>t;*vpxkkCmARvoQ-#+9|a+i z4)`lGqO{>R3xg;DAy&;S*M^APmY-D?-vbkGt*}#M3(&injFkT&?|>(@tHa%97%pwT zW@mD*2Ov$0RYZMuH0cxTw=&{00Yk=cvjJsXPa?S2=D_|py%VMtvGPCne&GGDh$10j zGkOH%$>7vE35D)fn=MvX*3^@kGBwaq#M%G8dQNEf*f;l_9yei}n37TRQ?xt(Xbs7s zdO)znBC#H}e=*!RJlrS_w1@i#(!YZ7Y)x8+oUc?g{k>K1I>IGO-tujc-)!LwEOMEL-lwr=SNGn+Hr_d1>J|?WY56=>=Nv);*sZ$@5)6!TI0)J zhZP+;U7sz1*Vsb~{IrA!f00~w0;$gRCam+9!JO_$E!9JwCf zWl^hpA=+P#NJ!0i_w8TuOw2_*v_A$Y%*prvq>LLIdZScOHrtKviTS=0i;7UONmoj1 zU6M%r_OxS@MG?ndrXUtGg8kwWNHr7&FvySt&wC3e=7m8YjgS*6a&k7XB;A0~FFB_q zuZ({ZjX*f=Tu1lRjD~rQy{CWo(>qH4_R^PNhU`x35ZxmF$LF1~_BS@4-|0u43F?Qy z!GiGUHPXwLdmJ5dDD+IKsy>3(?O&sdg&DZVl+MEM^(G29ypC|)&67EK<+P~JuUDLg z!=y&aj)@hqF!hNOn$I70f(_jJ5MPlB;q5(NTjRhRijI52UgerMWvW30NX?N z?4eKPH8HF#5jL!WhYZ=%OWGQS;5VnI)vo=f1Y_e>9}hE0G9 z-dNP^L%R@wkibYmN+%@6gKT83&KLQ2iok=!oMHguByju!9Nsy5qRb0{VJzr9k5$7I za+DepcPM(rsYsGHig>xF!pG3*v2F&a5cZ={hQjHPe7W^Uq`-VR5+IG{2yvUKa(@jQ z-X#S^f~qqLkLVnJMKDOlEOXo~{y} z%3X7!%BF(ybE{5eWO%e?UI0`s--R_eJPcE_cA@U}h=I#1iJhz7*n*XhKDP(cc=bq$ zL`l}oT>zY$?HDK>A#dbL_FHq~;J%2P;S~eH(&}*70a~JvBbUSncF9$kLG1MSFZ`H8 z`HPEG08Cjj!rb6>gxFRPk(id4)011cpx;zC-_5*$7Dh`!G;P*+oCM!#DDaT?WE*1x zE+`IfLR=ealz80!UAvT_*_A;V%q=NVcCwOhSJ-nQ2DuQ$nPhQwUhGAU3_r&2^+)CD z)t1DUG1Zd)uutx@&fyORc{f{`wtxq>rB z<%KM{sUH7|+(K6uYc-W}(404=&skGE+g_OVL*FADIAKyOg5IHpz+M{wBjIZkUr;tDpBK*Te&}N$i^4FmqOQz43s{ zq}cjvinAZou32SlY5eYHAK-_$3A$*9s8!FYf0=n)DgUJM^OM`(g-)uT<_S8rSUc}9 z|E9X-0Q~=-)~_thyT2K{0@i4r@sgBMImn@oYPudM9eJ>LWL^z+=6!FpxJ_MhhDmxt zQV-8~wDrM#bT0I7EH?u#s6piwbJ)Yi32E$ZNrz@mqB8UD2vOss_mBiRnZ^S8;D2(I zM@M<7)2B)--ecG z#CFx?0KD{A9|!0Au>O8H`<2FWbR_eQv^eTtR~R#+H5uIS=*g1))}j}R7}MvTC1Ha&mRc2TFMqCV{Vr}RohoDbHpzg ziE2j;3Z6}G>-`IYmoO@`DV`)P#<#iWq@JB8c{U1%5O?~S7(f2URaUP%Hk13)7L{9J zc51^E@mx%k<=HLs?c4G%uG2L8@4QfL4OQ|*ZpZ9BdacHx_69`fJ*t3sk9X#0RA26p zg~u;(ll^O0G|2yA(1DtQ?o)CheRBs$l--8jD#*n?%C#A&R%s)%zis>#Fw(X6i~te! zjJJ7k#SrO-?VTq};np99UWB!E@^+K@|5l8shIBJicCpLY&{jCy6J%y)F%tj|xbycfNuCnbvA*wt_|%T~8Sd_|B1^8q z0N+~(g5;ljaC=Y!PfdntbT^LRFK|>*c=aZmo4~GZPp6JI;=*Z2;dI3<@ zY5&G^N8hOnrnJFlu62m+qABgno^rborj?n&{@7KoF?$i37leOl{ADa?=I!`gK(LLQ zLxSWPITTwt@p#%zo^=|-s{>2RD!Tw1BCoK{i5rxd4(x}H!X}5uYrp?EjF`ipA`{(q zzWjB6SxB&Jn6-}=^FB?i+L$M7*nJ(}{wl6#zW3iL;Q-2IJF>q}P04mN@hQ82@Hot4n*s5??GY~tAH6-%@=K0*uEEXI>^?3Ub*eX9axdf>X%mZ4z7 z7)TPtu8Oh+jcEPSdUVm1gSYZCX!KV`(S$jc7AOdKgB=UlSe}HFH-)1{v4=qXyZhgQ zRNAdk1UG0W^3(k}4Fr>;1r|fkJDw{MhaW<gwX-KgI2PFgi6&??DhVFC2!6 zaCSahgw%rLoX}QNv1+= zr2=ZKV5}J#!WZS2NdcTJzFzSn1Y&8p?|QA0z+w%y37u*keb^;y8e3Y>&u3z~FEny1 zg;+oP;l%1}1R9WtWQ zMF~u5?|1fJSBZgRXYdv|c4vB7*zie;uW^*jWHJ%;yKk1V zm2&fJ-Q|Iit^-e|w0E9d&}$Vf^ikCRr1d++b<*Y;-$fEg#K6~qE}`4|JK2*rO}bM_ zhpT&+R=Nd1WzbjN&-~!(ho%^5Vhb#WlUtwE2cS58YhE9qw@U3iZ)rbei3b)ld!44p zqT&tdB3=EmJNx0!0aYrI?_h4H0JL7H&s#@Wt6nrxc`mad5I^tQ2%cfKn@z1(by<#| zRiYBwt~Xv#N8RZosTY)xr2;eve_0X*#efzmcfJ>{0#5DlE@~DGRY&u0_3=Nyzw;6Z zdtKO=kyzhvJZ0eyxKWa4mimBaC9fb+;Xx?`(D;cbD`?A>s(HPSe%F4s>#Byg$5Mwe z|D@L}?5)pumw{?#Nj*B-xS-6A73+>Y*ke0VzQ6P0Z!BIfQIyko;XfyhGe09rvUu;b z9&{&2A~Cko)O-ZU1>lv{U<<>99D1-n0s};oY05S0UoNyB5k98?QHb8f+MSlQJX%P} zC!!|hPpbjvHX+FJf|GYJVTiyz`{f*I$Gu;FctG#VruM4PZ4w@6VUr{7T5PhAGuKqn zqra{zU2v=GkmI>yODAp}_#?v_9)n1?qf>UHej2&h>QNK^o$5w?vlzYx$IkR-csM8e zCd5D6M#zG6CMwcnlLeIKg7 z3->WwAQ)J!>tZK0l9A=QRx^Yzz}?wWi0U={4ODw-l*jl6_S+Z~N+QC5Mr&Jkbx?8r zD38na#PH+0F*95%RZI8=AxY@Bixekktr*##mSI)T*WVOaUaf?OoN}8YM6K1;Kbj#t z#D!ixwEUU%CIf$U=%M=W&zI1GQFr{Wf|2mw`w{3SztrMJ@Q-2E6=??OfF});LO=li%?1X!>#Jj`Py>o6HMLIr7B*{bAVO zeX0PM+Lkl6+G;dfvS{Qh>w~+;Oy@oBtA1T9md~qxk}{h&7>IsjqJ}x}(qfg{I}?hF zb4Qw$P+#X8kXH~rsqahY#x;`mU|xiV*f2|^7pi=Lth~2c{gd@r+{YfwDl*=NUVA)} zXb2ugE;RIqWU`Ah^*)B*q0`4xcSPxl0q8cMxeiZyMAW0Gj2O>#xp)N-)TRsSQJPLo z8?z8&E5l)uH5{RyR+ryTA-#N?6s&^*+NR{Au6K~*h&E3a{`~3CmnR(L*kPVPm^z;Q zX(3!{;)yAS@uQ*5aPdbikM!%@QE$<$?Tf}YSAUktEDszyW8kpP%pkjd+>4>~fCwEB9PYB+V zu_+oXJV_oPQ{&N6X0aoeB>Wdt}xcQtw>+v9ij3!q!S1uJGbGXUK~E z-Woo(^l~GT4di9hA3={b6U7-mPi*a>L3teplZ$C1CIi{4=1_?wX4#A4e(YBU2f_w5 z?9Rr_HEQWcl z&k|*~8{@tRALi?k4lr3~edQ;%8AC5J3QCyi9NsXtvVaEq$y6=|11R=rE^RrM* z$6p}lE5dK+H*JR!ZFZ^sMRP99D2Jc$LZ2zU%u)L+uXxU>fafG>8)#UN7tBTY{f2`Y zR_jKegY}W=X(bv?o?sZ~34W)S)5L;SU1{JLP66DM@+3W~Z3!EC`%!Z5rs&mIqyt4; zF*Df&%UGtMWa7&*S~jml&-UFvz>_*MoJI$aIr(M*=_kH{NEN90pn#{}QpyXeVXu9M zaWLtEcNg7=7VY+sv{Daw9VxaI!_hL5dp4Am85w}TvDn(ZpvfKh?g8ue2dh$t zm|QSJ=p1aeOM3q!cYT@%P3~$*3p8Z(ouiqrWWE)Xl=kZ=Z9cMit^rys=p9i(3lQ;A z5awdK!dY<0!!byM>>=)>?(J|Q2fH6POjctrD#&dT`K&ENo7k_2KJga{7h1#CM&jGV z!;^ji93K$Wmm?l^*CW#gA}9gy8QVg^Jm;W|AJ&*mrVlZA|Mk$rDxFmT$XEt0ju*=g zgGOpYx>=I{DxekKg?c>hR?GsI)941-aC~&A=GHz+Lbcw#Kbd&5*jUlyPaSg&O)V&Y zCfHh*p0Cs4Ybb8}Nh z;_RQd_aM<$uAB)fl&Zh3oc#Y@3`BhomHyrT+G72m7;z;1DBrNT49C#n@G!^ynboaq z3kC6uQ^Wr`Ie@3$MmvA49)BYcpd?sR&&`Nnq(d=7c$gGNJ&)x4EzTfgo9(##wc zQsvB(?O4C4=*?lkoIyMtnGbsXya2qPMMQvLXj|qEwqHhPZFYhU!=1 zfhlK^mdd{H>40Ni>weo*u6`7n`oQT}VT?}?YiLcW0;KT`f} zX98~c-CGm+D$%5Y4m=SqFZlA0_Rg4ChmfM@nKRFV|8TP}n}aJOkNj28_G;tBGw^;m zM&G#D$f%Pi>mhPzfcAaEf*ztIqkK~unRcf(sbY!hTd3Br47aMndxDmdyP&K$3nOC) z;*-p|wX=I2piz2iumJqjpNf^8uZo>bKDxRn!@uxiK=kX4yCImJT^+dhUCn~k`aX__ zh3@rMb60A)&2BC49hTqa%bn_{_^B+(h;GS6Kj8Mm1wHDOa0zWz-cpDuMZ8i=0^FRI z#$Gr+Kh0z6Nh_I+b0p87uH{%@GJNIPL5()a^)iSosvP_VvWdTJ9StzhGh!@L)JcJo z>w!Nq=589bowU?JFFiw+t$Tv(a*iM4Mqa-R64X19pNoQnd~#yhd!U5;51Ek_*?sy# zJFwMu2pp^i75ud_#vbP!+Pkv5YteTq{PVr;D^q{hY~4HPOt3e7ci=*gOJmsQmk3Io zei?{oiEeLh+3@&6ckeb~BKaGWG@4YYEkW*o-}<@3L%`jyp90$$E!0q~iaR7b5O1Yl zr@BJvMDO&yLk1~v@$80)vzRv3SPQ|;IFE~*YbHN6yE1KGP5o$Okm)E#E%wnx#2PE(ky zRFzuaCK?fWWeSPI>(ryug=LEi{x26mpyD0}*-dbl>QOgnXCimIhLL!j82IQCA$wa? zUybef2sgz3UO-MIs99Mrz-B>03hFskJD=0wIO-o!pcH8~>210F=#y;(f)MMZH$0@= z7cnEcfLZd~;XQyeVr5g*i!BJ@TQ7=aE+wcgx;{lkCb!Sq(-wB9(go*rBz(dze{pW+ zWk|^p+^kfGWaCe9d14yw|?uQ9ckU> zkar!Ltb$FhecI1QJi;>co8J7v{oxic)UX*=q{${NK2q_0)AK=lQzerJ6qGovZ@0!F z{#-mupcAvfM?;63ESn5imGwA0Bn2xfE2%;gGSlB*B=~HY3|ozzee+JxN_Y$lDNuKRnDKzH_0op-WQ$7gq*86rrhF)G>(YNmbFuEuAwd4!vxl>rQ?ve%8cU zL5HKi2V0eVKb834AErbMhU!~biI3V%FH_}*lVlKh_GGQ)`&eG(Y21;Y+yLIqNI8A} zE?FIwllCybOdZB+Lf|m?*yDk%q(2V>5pqhu;Y9eQ#g)8k1zT?=QR>4>f5rfj1Q>1x2oVQ$k&abCvH+H_)x_}M0`K7=2eDBsvFUrA2`?jM;7+Z$w@4 zj+Y;HDnC+LynB}ub~bOlGw*+Q!F*{ zksj6k7oj2<_1JAl$xuu;n{-O?nM|~#8wm4K0S6T$K)e6n@655LOwWZX=bDPDw;MN* zZ#J)i_(KlP96!dAjvFq&8lSzp{e(E$mN6d}1UygqD|>s(N$eLxapoZQT;_Kw8DtA0 zm+zQov4_4ud}V4;zi#|tOV%48p)WQ>@m7y-w75aAkzhbtC)J2bW1ZL)TNS}5O}zli z(MJDXo0V>eeaWZRlY_d?1L9P51y`G@87eKO(p?~pw7e(_%Y=N^4kwgX1S3m+OY|(R zh6|v=-*GbaPOM;)i<|I}i5vemtfz?mIEitSsf8i*W_JQ`Bz9)R z%sK2V!Ax|_+)S;n^K_h9cJ>GBiPu=!M0|C*>2Zl{w)wUj{c}BgAT`A zLgCq;0^cduu-M}**EVV7vlSRy)K5x(*s)1`CRr>@0e;$DaB+V{rps0MTCX<@{s6de zX+zCmic!3CiI&d&F3SIprt@%T>;2!jz4wT{TB8U;DQd4;wOfR$Es9#D_TH;@sNI&L zqDJgddv7YJ+BMVKoA5iI-*tWegOexExz2ms_v`+5qEkZ#Cj3g7(9ZYXJy)kv`fK>D z;x_badH|4OsCY6ELc1r$SPeuC9x9&@otnb6=*9XGy{iUTc1@%Y$O>ME#ES|%Jb(1o z$ZB%y&nrs!Z)n_X^Cmqb7`?~W!#F2L?~Brj@yG0$yrUE-Q2VY+g8T&w2b%4(Baia~ z`V{=Q+*LV^*jXO*h@}m8Vi3R0B%mz70vl5^zpD5zmp6{Rui^ODO2g0j;Baq~d%3qh>)86Ri4k)^H{CB&uE8K}eZVQ{gMr=z~hhK*||i z(%>moPbR0)(Qb_qo*wQ&qTKTmP$qo%$v@?#e7)hW$ETEiMH5IYsD#lL77ZV*4YlJ~ zp$eV~a#u_8zCNirs$;%%3DUbZ-uRpxoohF}p>Wg;5P#V_YsCRh|CS|S zrIueVI*`1?rPRw-<1b;Y6oRYuY&o&+h<5XYbNgfMmfBp*oU5GGz>2)Pgk`Z(!*3MI zwuC6Gpv;ffwVnfBj0D+jTNYMA+ecJrlMW}oO!j}4Ax5h-BCUZ3w-W_-3j_Dtd(%^-$g)%j=s$r3m6cbfwo2d$%eovIvL)}WLGiAafMrhb?gTYNI!wKHUAQuU~_ zQ<(<&YpQAgrvmy{kh|B!Vw>#}_9SARUND|d2e1RRo12Vr^_q!mE1@)O0Kl@#L<<^D z9r5XwRzZ=LL}YCy<@y??Z)T9QyOB;!T688J(DE4YUu6S2ZM0PXT($}zdQ>8nbsh(- zli785J5%(0dLWa7!NupnfE;*vbp$FG-5z{%iLM2P2&>_v!lZ$!O>(U&6NvFTxr-JG z73B35e->Tq^8yK?A|nOdOguIA`cOXINQTn<|_Y+%$(l)LFON4#WHHsxTe%0ohiAdaUd;oC8 zq{t?OX(L}!+et_Xit@Y;I9To=pvzw5vba!p?jdp8q#QPf#nAn9%TQJU(=n$tSjVJSpYW0Ta9 zcfuy(C+9crj~0^$d(kYeJBtk2B~yxiB!}Qrfn`uYvzz5u!E7)=n6b9BHFq44DTWU( zL7&d!Ti9B=J_ktyhfQ%=dYU5JBB(;iJDSZx!J(mY{3UG4bSt2xU3;J6@I}u&rlOjVeA|x#Q2y4R3 zgWZ*?0-)v=(e>7uN)XNC*y+%JQ+kFdmq`Cq0~>tc$jo8h6smQAeAI?JQ+VY#=_}lj z=r{^!7=1&10Kwr?ir&vy2_La4)iUXfv1-RTEIMq|y1ujd&5i&>ue6n-1F>JSqb=>) z%ojYySSN}oHi7uFUK1Jc;Ba7j4?2Iq#}^X~j&sgYocZr7zeoJ;DN#Yxk&tVXx)nS? z_mZ3>0Avei{=uKVMA2A3x&6x17mg3z-(gsRfOYQGnDN3)cXtPDdK{e(#Sqj5aTxqB z^rXET@&D12zC43s8lKwRk{inm<4cF$URg%Jok#-$zqpZtHKF24$-Y;AY?~FqM|fDe zRlWW+HVU$`PsHVdx{GY96pe;C5Zt2Ggd2ttNSYITt2JeKC-|MC%e52bbu%-MiPqrc z9sG=Cl=*z|QwQ^PWAOahqqnWF=D^}!UbAU0GH%sD4`_Ij<)1T~x+(7*Nhr`T6=*4qKfc?=HFq`)DL`C$y} z8q9%N5NFUW0mDnz%1EIKPmEs~!Ccj&POsOqxsoC5favuFE>lhnRPHgywCL)YD*;3f z&&cEgVaxVaFXTthrT7|cRf41_MZwFCw~GxheljQE=s5+SjIKg=$&bL+6ExX8F>b@X zZ)|I1X5VQYKb4k6le38ccu39vdKAzt7bD&s-V`%kYm#pt;p08N$?psfldgZ)6b`|> zE?+_%3)Wb8lSmmuY`q2m-RzQrpkusx5*D^TSR>%?(W}o;j~|#*$~{=1;?Aj~OaEhB z!aMw*kxYx+1q0SgKoQbt35v+{VQ#mCTR}}MW{v$&_cEuADj_m(PGYv(f%eaeaoBO8 zU_szS)~!j9s5ZTsnDl`Fo{vcR{y+ctsj;BT-c*6s=~ie;iI1NS!IdGXmdz0b>1r>2 zEP$JyK3*we+7UdI$AeS7Dm6$TgbnR30C2C(ch^H9cpaZ15cY}hRTKSAoFr}`KbJ%! zW@0G6w15o>ZMez$YPkV4XShR3HVTpYCs!(DDo-nMq}3^N9g1Hf^KE2!I8siq&kv;o zwVVrs1%hPSY#E^S#^+t1)lruZ^Cv~nPj>(R@W)q`usoU8UM9o3Fb=DoRsrxo&`EOT zurgneo`zuuWXQBcH;b%bvj+V2sS>8v&cr56=1akjMr;)PBPikkaUc3X&i25_LobA# z;sCdKmcXlo@|MXK-dCG9r0I_Tbd32-WzU(lMNZo)`t68kD~&TZDQ>+&@T~4MS^Of$ z<=H&4$=ow1(4tl(7z?XT>YOCNn9~lsp0NqYNVFb#(8u{50U+C^?^Zq$Nm4&my2-*+ zhe7|^i{?VKk0GHM(=1wNRt{UnpP?Kf2`Wg_eZ(PX9YO!DRa`1jc0GA&i!+*K)`MdHm zykLB70Xg`moOxjD2;qyOX7l|D)5*xK+dmTn1=YSDmybSs{CeAlf9mw*JBV}H0KhMs zmJzp`0H9@7pva$)=8q!Ostt4wLW>QRv}kXQN|g{@M-56lB4%X>>MWpm<(_kvWr}TL z$U@oh3)tC9J;QB_NwqwhQvHjZ6lVN-`FRy+oL*LY5)r1b@%UC>z1W%cg0vLII0NZN ze9X2cZmbOM*BK(u{EAW%8^rp@;YWHC9MN5H*7W2YuT%3qk8vyxynZSaQ6Z|a_VTgE z1)gTp%%>XLI`+2eJA)cqtGSz9Y^t&~ezsnHhOajOlfk+MOj?^l5jN>-yj^3fl9O#B z!NXutb^FhxgYB9jQcFAq2ol-7OkV(o|Mswg%uKa@*fjgy-NhDCO(K@>fV=o;JH|q|i-@5wwFV$Yq1NZhONrG>Ezx>qm9$;kGQ1(xkXrFW;NlfU{ z7gvkFlrUZ-2SSLDijhtqL{Op$T+YKmk6HwZ4smh`8dyHKGAkk|w|;~?4hk7sHs96L zvo;5*fDqPta%ZTF)~O{lfQHQt-;KFtH3)!DhJ}9}YUa`kxFa!ajmwPp!<#S|e9p1)m^-5*f&tgj)3VH$O2%pa#sRry8IRids=7vZ4H7V9U!m8sHF?)d|HY#-C0bJD)+l{iA)8Kie zRz(G-S%YRtA;C=PrD|>zv~fs@fae-98h#Y7JRuQUVH?CRAj7m$_$0bPSWui0X(~Aw z9C7(7Zh&PADk3fG4AnjcJLO5ctM>P?H~-9aqn1qTi{`RB29rZ#9T&hA8!XW`QPtiU zgvhYOF9*+57QKzXa$ei~$CtVsO|^n544Hwql!>%sVsD|kCN|aJh-i#Hb1Q(40(8*f z+>L;Ee=fo%B^1munwCN$8E8&c=VawS7nRT0L#QAoY;N^LE;?-2#KPME-WRVF;b0BY zQtDCOfIcD-Vlt+{D5!)45OR{rgM98TEq??&jJd~;?H0b1?Yf2HnUR@Wh$-AW>4w!4 z9_)I{h*k=Bg7=E*cPlf94ywgcn(0;e9`4F>!h69^R=D53{IY*B63q?O2=>^;ASr~$ z2q&&_o&()TP}kISeMu+SmweidT9%-Z)~<$y*PN3 z-$<&<$mo``JwamMrqpr!gb?}RcS@hXLY!)qTw11t$IfJy8;c6onOPxAENqNTIp~AQ z_^1|x`+KG`K$&7Gl^2{v(~Ecr==w@X>4KLWNJc$HLNNrEzZ_1$(ZlfLu2p5d5>OcE ze@@Ec6L7k{0brhCQt)jMR`Ix$*)n~!MZ`f?n0q&QG9q7;)U{hh_7JiD=*b&KGN5de z=`Y$`#n#V&!A424VAA{aG?0vAT_Aa;JT2$pF()p~zUOKuCFn=|4Xc>~8zVVqz}6M4 zcj;un$zPVJmgtNq%h7rPh;`BCZFpIb>HX*ax58;f6;WY}*T=(ToEq1*BaS%RuGD;e zbQTA|Q^vEC=N|ohxc0OQ2_O>AgKl{ONw2cv?6onr>vmC4_KBAB4WZ8lMA)q8Q z1(1M<0|q>bK}xa}R?Bx5B{S!br)XRm%^BP{YS>bE$N=2d=hj(J5j7+(SPpnhDH6B5 zy~i&WCVnWdSP!$X+mewOfg*s`0YDTw(4Kd3@{v^|bi@a^AWT+g~IlO;lYqu zpq2FbJw@8}*zv4bi0OmZs!(E_C>i@c>HhXo@x!(}ti!{tiEzz^a?1o+O%(cc>6xta z*eHu|Dy+V&sx?v&+c?b9Mp(7HO6u)fqFvb$oB!U4jybw_pZFrx*LEoHH0To6-dJ@= zu9&;Wa2oIC@42!3&RYLj)RRp*TLgrs)Z2g5-Dm!9-TOym@7 ztGegpIDjy*gurPE5V()dtfWMrLN~V|)+<6HGl4oAM(x;cgqy~rKO*T!o_NuQ47ueM zZbLj_YWmCby;0wVl2xjjqXM6)zKGebk8?{F1)tnZOj~HaOBRNeAf%u%HIPiIy86s3!Ksuzj z9+tqramAYf_`v6#?l<<(U7Fgb?{C@Uhq;BRpM1hEwfEoNwiQu`q?7=(cc(Efa3H0O z6}Xzyi2nji`dbzJY^t^wnpvx)dOOP5O_cS~{z*4xd2 zVg{_Wz1-F#BnSWCa9A#vGAgSB-w|`1CFD!^tDG1asjJU8d;agD@U5dP`sq}ulmbwn z;lq5IS{cAXOEn#Jr;mOUlxX$H%ALPxgV;Y{-Bp<1o}|ZT3`Y}nesBn zq~+FPH)0A%U*-qg`2u_5db-=tNm@ty?axz6k$tdT3_838lR63kf=#{e4eEAY?Upuo~*Iaeuv}fV4)p-XFILPm;eMy2i8| zQ{LyU?x`zFUVe!vJA^oR4+RqRBP{m=(4Qsb{oGNmTHywuCFNp>Q7G_q7 z*`%`hTixO|*lTWbasp$lCFDGzElMNLY6e=^I=*yxhDjx<}r%y8L1Lrl{2LuGTF%e$G0LtT||0{HaSvS78oV zD~+=xw{_N-to9~~Qv>MSKDg0@>iCQ*{63kqCQ_&Bdi~kz=qZM*w>G622i(L>;LUSx z1rPxf0cKY=OzuRrs_Mt}o?9F&Ols|b@1ArEYd zLm97>?R}fI3gsx&b7FHjv}2xVG2S-y0s`+w3QMygq+zirY%5e7M&|23o26CprXshM zx}cl))|1Sl>ug=ArZPmUEq7d+NX8s}|BV;cH$sdTQ@F97ZwTy4Oh~enm`U!wKQ`)| z0)~baGFRm6u)(JRfoLc}B_K+ZC9hIaG=6i|R$3U$@Pj`RNm_6AzVqO{iQ-6q5!Bj=2)nCOMf zOLKW3jVwhiAqRG!{OpHS?|P8q;6}&e8~bLkwvil8o2rQoC1cMyi#Q~Kz=bB1I}5u( zSV*o>B*2V#zCWnKykBR==o17IlsNu5&^9}qrxj2(-8Bp&TQ0CCuJ0)!xa<~kBgzxcrkA?7YC(C$vB)*okpMe50;k(fDSbLY z*qk%r%{_Hr?ffv!R;)c-hRHqx5FNOxD|DET~##NnKTYbY1FW8@RJN6oCGe z^?tsM7aK7MboUcah((zPvRWmKLU#&yxX@r<)WYz_Dl`Fru2T*I*8l#C{h5Pjvf5%b zBY&XE%@>9bX%lvb9=H+#sI`!F zTVpB!l$n6t7m5hk*ZA?zv7kH(E*LH17GSkf2#{^wcYqi9422IbTQ+|Pq&WS#MMv5p>L zB~Qud8!xfw)%-+fz?>){07Cxgx6UBp5+{Td3@9rhXsIK_m3e~;zyT%-MU<8TX2Mt) zcB61QtmLS3|M6f;C^S)CCJ=R2Vcd}@En~KGu=wP{K~IuTd?T~rbJ%q{#n9D>D6j~E$AUPWJ;=+bYruWTuh0F^`7yCJ)OJc? zI(^0{Xngzw*~_*%fO|w#JD-Q2MeHxw-y;}(K|f(>Fml^bC((DyjQjNe(*nQ| zKgf*hCbgSl0n#h|+Ls{dWZ2Tq5xB!7!*J~5!{&+_{ucr$w_AQ@M}ak>7byNqbBF^| zJTTpY1BpTapo|RY{kQL_fEC%wkso|V;BOD=C9ob7$1f&?DiihC{v<{?3Wjm&8cwiC(o>&S%}=2SScdD8VbKl2yzU zTQaLyHF(b}JZGH_5l8^b>~Lm9{uEn}x`=0I`pj*~b8ew)71{|F_*P{0`L|Zn=Lfg; z3Y(#8HQ>EevK`!s9OV=>S+tauGx}(F$Q-zrb@Rpey~=azGYkNd1$T7w7?I2Pp1^>O zTDGDKa5UKc_S;q_M`C&Y<+oHY%Iqz!GDw%!7c%y!Q5Uv|75lFZq_dg;5O*Ij4XldT zcI`o9lpHiG_R~2J>oqOuBGhmc2~TTTns0iXG%YBt80J}`HOR67!T06 z-5rEjYbZ@UOmI)|r%ezeQKaX|kC(9q-LCMz)QE5Zpg#c#-QQ6JfmfJP5l{YC*7g|k zvJ9FMkJi1p>XbjwUrqaeLu$PEd8f=;Y?r@V*i9+lg8^O3iAB4=L?DF9Y4v09ipKlG zRRhTP!eh`GoqU1^f*&%X%=K((b1#7;>6qor)#-I$ZG5BhA4N)me>dPM+fJBNst_}daR ze5NywGeMc@)H zidcqJ9C}|2bal$O_#ZJ%sG|LbkPgc!%agazk^T>u>n{sI|H&4*Mn}(mYG>BwzZMEk zrG1<9+y&^PMw=^^<`Q8~Fsi*kv|cTEg@)(1hzm_`PLIJ{Vov9B#7=o+G_%l?kP7Ro zNgiLa<8lBH(hpW-wl(hd19&vr$B1Wg1Z=(7c9{Ex8Mg}nFqv_Y33<3aMV1L!c`O96$!W(tR86ytMzijsToIO!d9`m?AY|XMBjHU zl~j5p&AwW8eUIGFbe##2JX-u}GIPK9))Kbz`t1I`_28rQiO2Xu;$6RDb@#2*@zIYr z1=1&e!P^DRuxRStb?*J$ydzfP*Dr8BHwbyDWf&W z7&`?2MmUwCzo+UhzY{Uhp?5=Z40)0xECHQLET;{5jsu6d)t&>fOX3iD+_Quj%b-8n zWY7tPBUycPpEsq_IE@X8R4?^-eYb~2ULLu2!pM?17ibK)%0HB4Qinn1mX~!JBcUfu zz7L=_^3L5pX}jRG&@05?S-Fw=Se|Bfef$=(|10Y8uM+aMHHxRj-7IwuPBX58l#biM zpqQalWT18&N85ojGZHd$DzQy87_i?wVZ?uX?dyteD%wPj;)kIbE=&zkxbi$ce}k>% zyXLDkwaiaLKcyn^?69>niZ{r}Xe?6apNxkFo@U|c4>l>f_~Syn^u!n(g8UTLL0tB# zWnzKxkEt&1es)K*CN=^pMEb_HXZWv+WrRNo7M?yCz8_qPW>vwnE>Y}o%G+r zz?NS%e!x!HMi%W-^qQRu zT8D;Y51#SZPbJPtx{Xtn@wEKd!BEvEE>ydvW$;8ib_l}84f(o8p6f;O=U2MJ-HHUspGX(GKQ(@&6gUMZ0`d%JO|9>a+psk7n!D<$!BB5WIK@xbDDQ4*1U1h zHp=KuM8b_poi}^gV$)F)c|HS$&)<0u>$%ObT#s=K*C13x{(e+j4-t{l7mmWTwF+qh z5(kpHyZy0?JA*)5gVs$C93pd$jbb2oCTA>7YoE;2H0$d`%^O zN~_l+In}QbLdJ9=&ovpJ``F^QG5h2=227F82UBvZzVLLf>5=lwFt1= zSIy7E3_n(_eo@yLYHOKGWtMcQB3ex`%2Atqm*ND(Q;cKTI{)xOSVo|l_rVuc`**V) zcYBf~$uBU!>IJ{ih<{?YoW%|~yJP!& z^XD(*p2jQ`&{rO7-QRU`Dc>%W4f^B5BCU3RmfUzeAZB{*m`ZI3=YwHlCOU3D4it7r zf|Dt*8j6H3mj|ShOh0x+WbpKY3y5#Xya+6Vj8S`%q!yCiao7vowq%xJlV6lArK(F^GWQNd=1V;n0MGfjpBSn6#y?v#+~ z;DNcgxzoV#TM6&hu*bP-+bjt~8A$*1+HhFTzQ;3)ki#J5+I!*E#6LVNwflED2^ywa zF0pTl{|I+~Lq`}UvfxOMJY7$9f4KU>hMe!OCuhPl3g?h>B>!po(`}9r)K^Y*EyM5q ztEG88>P5j_xG}!s4?(E6asm($S5FkA&}dZW-ua$nu;s?2BgG8#yQz{_D+w+y-n69c z7;sQXR0GNNCT)jFLG5Gc7gpOYh2Ix^aTiPP2Di&soZQCKSLS=OtjwFvElhcMk4?#G zODf#_6@<*=Qf*D!DuQ_&Qj73=!?HJt%@EX=z~#LA8E~($vH0%psGQ+X8K)o_3BPu{ zDI;TKUK9~KS0|9R#Q$GR8chm5IT;!G>)`hj({d?D;gH>Qy*nY-6X-){7x&N;sHSGR zpM}Rnn;jhK={c^Jirlh#68#79ufjO#hh%HKDEgQCliSZw5rLUAm5JKI6{E{9Dj_8X9-?xCbdxb4Y_OE{Ak=c&MSL6X!E)Atn zC19E+BAyS;$E|0_2t=(H6+R_+Q=V)T*R`evrF+GL)4(p3n49-9J59`mMZTiol${H7 zw~RFZx>)T!g1_Q(M@&$3#zL?8c`>miLrKPbD}Pq#i|GF!A!R>FHVW0!kOTuFl)U~r zLgOPC{M@N*Pzh-EkR4HFAu*|A66NVuDa3pfIOl8l)v$@X-^EE`mZMr4{FT;oE1qJZ zv|9J~+)3oMz|;B@DZ}~fm7LwT3t$&6i(X7D?^5!3=wfX;s&0s8x)`hTGBZR}zdB2B z!)AYqtNVYu9c1&ir?Cr^Fb=Ue6c@y z5{?*?758;y!`l2bu+N82cC9q*e{xbN`3Fo!CdQ1NQjBd^{!zFai~!)5JpW=uuxnq3 z(5OVneo`|8eH>{2)HQ=HT)O>RpcC*x*`s=m+RTWxGzjvH(Sh7_TuUp5M-knxItrZG zb2HgwN?FhV`v|&?_mRllPmR%GO=>QZn^(eP8#!t|4vNYN_VKI0cbZD42}GMjqX# zQaquatriaYisW>#wo2&(hdnRc&i_UKDwTZjK^cu;TvP`B(PO^+sj5%+ex&EzD{xj0*o#*h5i_eoslG_%#o2`ySm-<3+s`;*82ZfmqqA!P4o zc{;C7H~qf!yDclLlug9cm(s7q?MX&Gt%0IHJmCq@5AUOVdcW{#`zcfPlXG!nI-|fU zL-{+{r8KQ6n?+w!arj}@S0DC!bonU%?kdT~DqmcCXv4u*8xz6jlwfK?<)8218EQrb zi7yH`={k!hHL&z74UIF5;^dwiy@QG~ff}z6s*EhEA-8=8%L7Gcq)kgCTO#6js%?jp4Xl+Z^PWL&%`AP_~xZ6#R5By|NLoX<^F&d*?;tYKXnj=PaF!Zl_m^9Ugb0|QU~UD zr66S?>(+7`j#N3Lo`e)*-q0f4TGmbvT^@=T9jl>x$E~_}@SLE$GlDQ^qPXFBoWDR# zG}s)YY@MU-LqNc>8RSz>cUCTPsmnt!RcxN}-h2e=`|18ta)luc6%^dAeuUV{Q9%$qFbNEngK&&fk@GB)_GQnP{A~_N_*KKba5H{d@4^ z?Flj$?z(&{PC$qaEKDhfcEO>b{xdA7lM7V}&$C;`?*JG%g-19<9XG;d`K3!JpfY@@ zJn6rFCV3D;b}?_O-)$PBq%qC%ilVcn4JcGEu82^+3(6Jdzxjd zWvi(Vu09u|sSkyIf$MyHtY(Wkr@?kljXH^8s7+f3!=Txlig3$eu}Ds3w!&vcT=vvU z9U1fT>or+nN+j$DSBRActe%hGaMr09MT?0RtyjHG1PE+yx*JQr7ge9`Gq4ge=9Tnb zzp&|AeM1(1*w~tvd2O49{E=|aq4fSZ_%{4&@?*HcWsdljWCzFwrSO(XDMigMEhI0$ zC}F(O9selyvq84R@*?4vhWdu2Q^;T)7SimsYx6?w+mjIv+1B9g;{h%lxjU+8g1*(& ziexNTqI&szZa7R%(-0nIcECK$&?%%yzoSJKfnJ)s$Bly)hbx7nZsvV2YP`|x5h?Yq z(jt@oI}0jVB9^>L=;P=a4oO5D`WgXTvJ||^8hjk2zrPr-URilqS(MHLuGd{y9gNiw zh=vX7x%AcSOzccj3`57u!m2&xF z)hEns1v48_@pHtiy=9ZMu<}jE-I>DFfu!39LZU?04KDcLi^-Sv=D(D{I34Y&Z>L%4 zoa1FgKh93I;4E4rntX#7x86#+y>9b+x4T6N-~Ww0Iz0o;o}x+NQ72HiFP7>~#pB1} z%l0`d;?y+bj(Vda3AX2-^v7UsI=YSQ@yzQyXusCU6TAuvWC}+$NN~7GwH=KvPI@QL zT|pl{C$xolf)p!WY3Zd#{G)}Gsu5J<<93F(MxQhFl6)8b1R*MS?LWi;U2mrhK`E?c z4@0qG-Q|)lHeBCCl9Uweq&@QjgpcJepfd7HnC+18prwr_N`ho^t=}HhL(|)~`l$hA zT2|Az5hP84>DDQ2ind}FE*dT9#Ug)kPbtIs#;ef*-@lUCQrQw$KMo$-Da~JJ;jae? zy=B7dy1_u#eGe0D(S(h*LM$r35+^lq+sXtJC$Crc2u*xm&o?s@ zDkzWJyu8r9ums90Y0^Cgh+ceHhmV9nyI?e4<{kbF2i*6W3%C8$BRT!@I$I6A-VXkoY)P*H;+uvANC>-WaSoBl@r&0IUo)Y&Z{|lVvQsHz6DG;X9A~sKUMLc@8!3VSc5hpS9l7= zn5sp~ajkDxV^zUt=?Aq}vq8rQn<>=h*-DtHo$2`XMJhgY$7@}rXyTjjH~-?g%*MBm z4U)-^13~BBgr@%nsyV+)Uf@=_HY~SYjraKoziw-><{O%?MD zaJ0SYy-p;Al3Cc4*C^eMwpKX}hOcufp-e)d5b-A()!sWBDjW(oM@UAtwm`qJ&Bvyq z=Ouar=v#!0rW;iX{-oI-Y(u;AWjhx+sWll|C*GgWqia{~rhjDnD?2seqWZ~JK0&YF zdB*+$+ghcZVjwQtj_SWRc8k(eqJrLG(6x_>U^poIg;vYUXm;{U)!y(=L%xMZ;B90m zvpjzSgSn;@*T_^jp&fryTBq%Vy=d_pjqnj@RtW{in!5fi1!lzZ62}(xPYA}tlN5Yl z8GKX$1i*Eo^g=EeF5;^Me|*2L!2>HJRxqU(4atx}oB7PLO{;Vt=ZN&dHuV=24##Nn z?^ur1P8Pif4(Kp?Dx~qNHovp{3Yi^NAw|?vut0Q12`8F59KrX-0-jU;fUo$GXciBw z4m@Z>Kn&Fyr%6$Z-!Be7ns3>xpltWunH;Dnv>+Eni=7fs7Wz4D7`c&-nl-p6lb-{JoeeQ*RA zChB;)@xo^16+PuGHCVLxj0Bhg7~{=X4=IbkvjB+(Gu12;dxCylblityq0MpX-7&jC z^XdE&)9R?Sz@ihO46yygs z>1g{tMwYgw`+VGO^Ksnz<>ssL5A-rJkf$27X2}W!rp@vnX#LdrWD6#Fu<(*_(0^n0 zzZ(Z0g!%wtD*Z01jvCvOSr_^ZqnE$-DRXnR6HrgaDr_kIq?O~?E65^ilhZ~pY zUpvctKhSNGv7I;HL*N6gm1 zXUW83=QmnfpOl3NbKA^mI|kVtvo)>ZC>B{%G&zLRNiOC-T8@0t?7NM-d*6o&8JYKr zq!C+4PSU0OE!sp=!?%aj*lLR4D9wg`PQfl(L%%JHr|t=Ll8!D%sp^2_r|OU}0)~(| zeULA9HOytm+`F?nB@t8TzmX*QIwp^PUs5XeU3_y1Zg_F|8_;zGuj;~nLBDX?P_zX$ zE-?p9yOPh>AvOj)yN@>WxIdZ&%!igl?5#t{X-{qq;S$G>e56@Pq)As-N9~WwdvTE5 z>rD@1RzNRW#P%-d?EShHrsm3f^*bOXz=+7dpF!R`W)S*QG9>IE-7VuCkgU5DAZH-4a!GD)}{Ub~ZXHmvJSSk_vtZl#i>o>P% z;T4RGf9J79(aHk`j@DR)>;bfk3&5i|F&2j8i{sSO$b@Q++w;G?@L4(Q)RHnCUf)Gb zq%#jxP^t{ml<_D!FO`%Q`W<~NEtEg~>r_Vmdim_=}>&rErz}X_?a&wR%dB zSuF9s|Cazu@!!^dT2i8F8tmF=Rkula7?D2fy1uz)jxTSz$%P;9;r2jd%y%Q_12 zfYKO?!7e$UG6?!Qi}>tQv%PoKXppa^>zGs`I4ob^{WQ{J;~o86qtuOP8ii$MW)Agy zaz&pYd*_lRWu@N#Epu(2h)Oc{YODuxK%6!HfRi+A6nGhG5{=?Ce|6XN?RT!u32_aV z{@E|c+T0IbD{wu=QdsRexa0C#+NhM=Q3U0eswo_)3&*VGTAp!gy9aj}dzKo-jag2% zfa6TVC)gNY>@oSIX!lR5B^YxJSf@`g zS)own$!-wHvxiqg?DBhB?ax5Cpq;e^DBaiP@I>#&-BV2{<;RGKn4O>LrG8IHv(-}6 zwvr?Z{MToW%nJKxBWrn|;W>~ik1Idvo9EOtx0_D5t)6{%+xx(HR)dU-^v#F+K)7DN z2i0x?VP%-o-{3Wfl%cU@Db3AREh~?Yn|j&hC~Qr8cF}FJ(ctDw|2_kb-z$FYIBRyu zLJH6O0sSJi=XcFuHTs51(ZR(nu{GT^BtD>#kPg1F*iM8JVnX=|xio2Z6m)F&UDYe?)Lh#aB@&tNKSb=$Bwj*#oCgBr zNAx{mT;C>);%=5P-BES&?>pT2MqZFaeh&ydBgk(Cv&ye0{i1dUCM6Qz)wTBb_*jMQ zxTt8Qyi)p$*ctxtN$s&{4Yz#RLbkC#dGe+!`F@M?{_u;!)vm}^p!&}55?=bTvtpaqh_=DFE&Rb?@p#B!1B&zinHU)oaBEl6beW1hFvi$^aM)>|F-yE7S))u2U#UaA& z_wvMM#S*b$UCB`?YO!i3&?EE067_g>;5rzOi&-c9mUZ@~8BXUnFF94CVZ5dNYSe2! z9)27lUA=8C5#=G%EoNUWJi^3OC+Tv0AR22F_tL7?L!gEYTbJj_t`FLRVnYaqc@KPj zNK)v$)MCI}UOz<~EkJ_$zEhXQU^hDh#e&CKYRnNd|MsP%wY{2aI@GpGm3_FZ{nr#} zXc(ju)QNkqpl{!j6PmfP??{ELcFN3?Y~=e?v-OkgSRsmtEA!QT*!}PPI6;-G?;5c&HnlNAgZ z8!P_+fhHWSj}?^Y}xu<^i%nY6Ly=hOKUp(Nd=*IIe5T&wW} zT3A5Y47wui% zoTQ^!pt8fD^v39f(Q)Fgl5)?YWrRIZtOt4Pg_5kMVFpS$6Hh1oYv@T11fTA7uCw8p zF&NX@wO^9$T1_URXx)N06TD1oxM2KsHsa5a~sBQ7ZS@B$}^?C#-+nYa|=ILr^0O6DrBUFR)LRK`W(X-qN%?dCR%(f94? z{NnnY=$I=*^Mz}I<_MGn8xU&_Iu!kYiPTK#>YAhZWZY3%l(aHU_L$GH*T|I8Vr)+|i0mm3JR3-_wVKdF-um zUtw$PX`unQ--E$4?r^XMG@J0|bKW>j^%*Z`!XGfaPCht~N}EwsigDruc}SD7MUjG3 z)y-GxtWh3+S{6<6(vLg7(oRsBW3z!uqbsdJsJ3ZJwW%)eER~;U_aisH#gosMC5Qkp z&xIoYNMLA)1zZlg#TvlMA+3!jH_PrbG9dV)IJ9I?wYnriq|?ix1TQaK<6!h6y!*4y zp!~CYHh*NA<*C6_W$6)gSSDYo|96q`Ud-7k*JwUP=&Dmh=KkgXj6G-NHT=a81Gm=^DX( z{|H_Opt9t4Ox>g)v#|hP^go`!zB~vwqR~s>$YLH7zHz}$GYlo3#wN^3Y#M?d-y(8v z%qJRCGhUMR?v zbS*?|FvUBLkN{9UhaY8DS*e%t74Rt~L=%1rw^oGz?7H+TlYB%#SVg{-x4xsJXJhOY zZC&zcA)cFOvjLA^I}%5IGLIo3ATc^`(;9@;(+5^3I<6e4m2}xNvYl)c9{DZ6p0cIg zMKug1Sl5_5RD$ahopSa?fnRI=HNbu|?q>>&#`wz?3_%1G)bpg2w)FKVTPnO7d%fhkFXF#9ajjJ_lq|G*>T_rZ_NC6R0S z%=zihF=?&APg>Uzl9vHy;d4ATVc{=S(3s1SMZBq^Mc(BpHYE$(LCMMn5A^P@w^lldmfZKYFj2 zey4_0X(HTi6$d3VE9D^GMCgyx;Jip7+lKLL(;TVd<#9FxJ;*8KgU}l z)TH|z+m+hw$X}yW0=s3(zhX>2L7u{#lz&`f{3A3Rl(;?RxQrIHKd(Q4B>^LoniVS+ zI)Vw6;cperJ=vU!75jjhA{MGHDT1-uC9FCO1pPs1Q!d?8C6Nm#p{)&V_}WSMVdW*g z2Liq8z?(n8I4_^<4gH!ORimTjm^5?yKd#;~Dhlom`=z_VVdz$op_J|r zX$4eZ5NQ}vy1NmOmQWfLkO6_AyFrGM7?6?%B?Rg8?C1Z!=X^LHSc^}vX3wtszJAxW z&C;O^0rr+G>&W1^nfCo=>}WTA!98cFy0UQIiNb$3&1pfx1J_kpDom11@&Q0Ulrt1NJRRy%k zaLo*Yqi%+jM&etSdR$`m|6w2Ij2uICU#Z?pG0y+4xU9dpBI>DnRtsBfJ-(BU$_+Za zP)-CFk`M@9MKp(P;Nhb`t4NC{-~m@a78-XI%vcNCw%rOSVi5TvrJqp()K;>Gx0+)e z|6~*~8=jpoXO|?!o#78f@+EB_`}vvb9J}$Pj*{n$m!Adb?l@3oeC8y%JevChHlD7% zaO#En6q*1>ogw`fAC1kQ+^Lx~9T6a$uDNjEKw@=PZPi@Lyq6%Vf${F_oGn_0LQbKz zzaZ~9cG^3xzjh)&yvV*Nea@C78vfus%%{^TSE1JG8yk>`-5jZQYM2=Y;2K&<#Z`}I zu{Gan6DXvY=?+RaNqcC1%Naxeg$^D@eqHLz^duaDw|<^A%DeMv{%*6k@jA>Tj|VKh z?V#n((&cWtCdXR6wuPCm2HhZudCE01H)sNv1UT2y|YW-SHV(!rPq5a*rSv854)*4j z-QS8o^(@@SDc`8_d>7A9Q9pO@MuJ9T3RtMtzCMbx5jhL|kA2h9X+AP1T;j98E2DcM zbV)#9?Q`cXN+9kj4}?;OjJqV#Eqzh9Bs=y|Sf?3k7y}n`%iwnX| zI#0H5_hT~b7m`w}<1Y!swk?KMU2QZa43du5am6F$k|`8d%%2_oscI-3mb*dI6P7m+ z`f4}vMzEczO&NJV>Jnf7eot$JtXQt^zwzdios15JFo?R*znCW2b&d(je8e=6W?y`e z)~H}9KdlLdH_=&a41q7)ro{|4z4UHwV!Kmcqof*{@ziJRIxUtP$g!p? zGa(6U70v-#mQTCK@874z52*6osEFz}#P@&Ga!c#X9Q#rstLM+E5nh+GWoKJeR76oa zJ@Ho&Z4*!4emzGhF--!JjKL`P77NmV%t&EJS#^#5V)enOWpy>UQUO(?N)mx${3lol zhQ2`foQORxilE^4Q$5U@Py`&O)@PuLUd(u@_LqD>H$pDTP5(Zk&u}zs$?RaCo(xXX zfCDD@lwbhZ!GcyyHIRK0u|)Hm%`tsUF4vMHh`vA9uyhot*702$cWPMQ?|d%i#=Gm^ z^I7%vN&L_1Pq4WgrG8lPeMF*Sod9~78fDJ)WR$VR8}lu2ZSU-aeLrjqmD2zDF{9;K zl49}Ke~JucMAgPt6K52ec45CP?mAmBZPAgobs1%j&K>2Dd)A2vsX3qou&Ry38UhZw z^vI~>+B{A!J(E!Ew$zT_+clko-ah3^Pr;FZ7ip@}~b`pzqzU=lH}Q z7U~f`)-g4mihVzC|J&vW^5%aG)br8q&V1?jRt~DiK_(EF5kfdd&s^v{cUd7l*gA9e zkv-8mjMf#eSYhblRv{Q9o-gO#9YV`dGuCjfI5IeMT?8zTMh2f=#P?2IvTAJh%STt2 z+r@>EqPX@8*rB&~fhayS7W0D(Czp{@u>Z@lT)AKAG(WDtxJ1^sR8(uAPy$vpBc4^A zNo0+**We<-O&sJp@Bq8M%dpw+;_pDl*H@}ZeFV7StF0D{EWL?Sg8A1^FaP{`Xj|3< zBR2YaWK3T%%*n;RlPtL6NUMc7bXy%z>n(Iu)a&+H5#h7QzC9mCX}(II2+(gmCLkpx zHF$f;NS*n9`3Rmxic0oW7Nv=YG(j${j74w~?pr_W46Vi>U;Y z*ahXiYEgnf@H=V8^#E)6C-Zo?>wDBxSud;YvFih2_HRq$Z-t&IriZYCg5yId!`VF3 z(eI-gXw`iRh9071l>X};UODx>ZlcGU*b!-sP+KL>#gJj&Pr-2)Wx>LU=`6!3p z1@BvA0;YVvQmGr;Y}YUrY!6PXTCUApSra`1 z$gY+3(roGo_}dC`!|~L(n(RgBWKcnSJ5gZTyb?RU=3M>Y>J!?PhwHG8cuQqSI*qhN z=ARc9(LsrUy+zJu<0;)j56^MWuyeznENsBa*7rHUA>vUCBUZ;rAaZvcEPVdfX^*&r z&bl|@;5Jm*$Ed>@{iBka6H&1Taz{)cS$E2xuxSHc!YIjAR-cj~#m7*A1n>yaMvD)t zyu(wo;;R*#~tzWS9^$~95!!1E!Pp#nV3b2(klNx*&-3GzqK{&) zmktiI5}AyGSO+eUX|>dmaAJN0|Cc${z?CP@w7idB6yEv&&x?DAzs2c|H4`Z02QiaVs1PN2)8fUC`FNS^gcY_pC zJ<|1a)KS-EmVCY2Bvfrq8_u|v-Y5KU9XT`zwLkJrzFT8qUWkSh`g2ZK`*iE{bp;Aw z-2Ur->&<)BSJp#*|hHHQL(*odT#-I_%XthXp~ z#Ax;pmqwnTtLBosk#72W!x#krK7;C_xmPhPy-g`Fkoxj|L!<&pdaYA(-@CirpEi)r zO_`o?VCOvGZSuzr$MH5kAwi-oV00OT*Gyt%p^8Fkl6;~jJE+@`@7+{xIP7QzZiN(F zNsGmF9PY`Z(8x{z0)Zq-5}tjJ%+gNRZ~iw#ZeC6jf!Cx{bvk`s_^4sH5O-VTg@W*yd6ajrn%0 z?%3eBZw&lQ@(p$k?UmF`GKi-M`99lNjX>|cN0v|@Cn)l#j;~P+RQw91TyWqH9bu3`?#GU<^-4X>s1j;J3c; zc8i`uaC0mj!Qy(-SnM_)gOPjp7M}`R#0l49Zhh4VR}9~an=0q8bMVp zHHwSMkNNI$6hM0OW~sIL`1fXdQHeKp4eJ0KgBj%X)vBv3vC$usFxGrxBahx9x@w{( zpI62Jn;pVw1^sYRxXectjqn}YhjD$#0n?Djgzvj19R!@`Pc>$9zw@fFI( z$UlNR$`^z4_Y1NNv8$A`4~HUP!eUVQwyv?;&`@Y`Pysh{J=>?}Z-13!hOuiVza3zJ zz5ryR7Ru_rU$;A_O@fI`PXfmqH+^#r4Wy8R$IJD8^-M~CAIAdZ)#dUfRb+wy2!mP_ z346@H@6r{h@2R_Zj|at>3HUD-vTgE-CGEW1r5{GyWE10N2;~9nC$M2e;CM}i_1H!a z+MD0>S*c14{;(9@o`+s8`?Bvk=#>&fajbmT!YJio4I_P{P(1k8p-^QG>*R;mRbu5h;h_icnI0iX@%FIPXb^iXEQ3P$W7W+a33T?k@)HLn}=BkJQ0nMi zaeNnXN7X{yS!~eil*>nE9p=my|19_``N#DM0jTFJQPDznH|g_ zJrl8O*lnu|ub%5LaSt}P{`w*7U%%Jn<31ntC|X*5GE=Q5=TiP~ZblZ|liiE6ULCw6 z(tJ}?{^0V>c%Um7k7MNOi`NOCc1VXxiPBm>H4c1^_vn$n~fuWHdH!Tn2Qqncv9%`hf^RWQ5Ah z727SRB4kS3a4g3LmH$>P|AZ=$VqyYGpW93Z#>8oCI{hQP;L?JJO;IPZad(IHH@V(q z!Z+3?Qa?Y4j@2aROWMJwiCP>E<2HlqFl9?2owh~my)7Bo(F==|$$Vq;A)WS6Wf|oP zG|kw>rh7d6a25&bHp&F`4&T z?+mS<6JvfEa2ueBOD8mH8HyoP^)@y{ZEk-rlD6}OyoE=b1pM=zi(yICO>B(_Rh*;6 z^%qXgRJu;66AG6e1lCeMilV!JBpr9U5up~Y%xtG$G>lBSbkmQLVXF zYl9?&hPP{;EFleFhl$t?(@GRvGOcPWA;Vjy<`LQ@dcCxc?CO)5Dp=!0l%2qnQ1-3? z`cAL-h03vMAnb7v+ng<0buZS%I`^4h0~s9$=W9DyaEXdcow&QT0>V}+GWv?^ zeIcK8c07dz)aclm41p+*%J(7C-Fv%Ly;B(m9-Clf4DbRu{o@P~O)l)p8i-x_H=g7+nJ{-!OWfAZP&r^FJ=M}dcr-O_;W-2(=ynS?=q&iVUOcXZ@#VK{(;atD%_kgh!R6NR|k{ zRG9R2K>k@-GCP9bc<`OfC)Hh(k2G<-3nsvBQk*@pIqQKuAE2R>bVxJFoBk2S!2bkE zPEL*mOR0HNKh9v}AS6L#Eti5y55DFULj@AQGEf-D*%h+-ewaA>h(4}zza+HlUS4n1 z)7Fwnu4gzkE;^9=rr2hofVUz8Pd*6=%69Ar;qZ&;{Oy$Fe%4mSoK)xZh|1v=dZe6B zpSd%kjZdjfQSPnPaOUHsnLy`=&(&3pf0@0bE-WfGmZkg02jLWjc)LLbFS*p##&TkR zJDibrq_nhT{s@8L$fxwI3LwB)HM^xUwTz z~IEq5^Lk`9ua!>oi>$(i`$bEmLgnO%NUgP9+G zcN}7s+Z4kp9?Qv>%XA-&4KS&?25-l{@4iYJgpZbH|C?H2gDa&y8)_bSt+8u17ryTc z*FA%#jSrTnJ+$e&&bFu!S3817FxRI@E2k*%`=0y>GEZLgG$2zC!>ztbTmD0{Qxg-) z`cz(=V2LeUn6GOK^^kGELeXN#LipPY!Qx8ARYf*8eO<~O3G%-j$}}2GN})^#nAF{$DH99O&N*v_Ou#kUAvpS*;~R} zXswIyI`yzMc4a%~F^M9lO+kGSR8#`mAZ2LOh~T^t@e|&J7c25iU>Ty!}&F5WR2{Ce#NGjRtet zWZco7n!ZM`E0jIIU!c292S(#hw#6JFZp=z08q{smiP$`5XQXjm?`woIR1P1ox;v9j zd_AvGq?G|X$FP^{GFTPbhO_T$e=m^hOMw!O8CUyE4g;VuZAnSXgB*KX3n09x_=UTf zo^gR`5vt>$+#|>&Ch92NQL?7A$SCzBj$`@AT%yNJ+O^y; z^@s+=i8-&@eV4_RcPs-w+STXFAF4oh!vkXXzAP_y1_pZV?*ei0F;6lno~(`{c4BXEtEW@qXtyj)=`LOuIV8b?w!(XHqetiGoi+u072trzDzXl4qJ;sl` zG9#-gfF$xhxPu#B%kbncyXEt)H93x}E0UncL8g(;~@KyxzU=(!hJ z?Tj=k9#2wJRIg?Lqgpd^7252dH&$q~FwM`a=)1i0ux?aaf-wgj3#Diq{On+yKeHa8whK_R00X zwH2_T9$0_L?bL>-I7xQoeeatn_GG9h3wa}d}yLfOnz z_Mo4cMkRR?m7Z|k=K5~-Y2v~040qJ#aBP5&v9<-jfD7GjLFbBRAWhi=xF@+p8?_@hY0%Irjl8g2N1CCZ?He z1DRR(E4%H}NThHq|B_q!qv`cYXzSLFSZ=^i-LQsFNJ7AQn7bTH2Q4RY=rrZum1Mn+ z!O#u790kgz^A{lQh!DL3FpwIY#x~;qW4$yU8{ec)A$A=VKRJqp?ntYw+LGB1GE)16 z3)`>hiv$kAC;k2EjW}qI@5;GBD#i*gkl^aRB9pfGYHEBrtGMK9VIGVU*#j zf5<12gFhH1S^H*DIWsP-)T1MNf=0USkt24K3BHxN)`5BMb~iq5H23SazL=A_SI)91 z%dKY5@=(L_neTcV)6xcijDv}2;UE8d#7Z=S36pfGGBAB-KuUUEtbK1UnQ7+&_QVCg z!G(jO@sV>7lD&8KcuH_e0u3LfPHYHxP>p&4#SN#pdx7<>V6v=t(-`t}N{<;R^6@8d zMSx1?3vJ_NYwWI&hkkhG8%9WQ2ia9W{Ro;wR6<6Pm&U_NmXLfGLH_yXqxn@DD&^sW z{EE!;yGw5RPCbJ2K(|HLlXoB&UA)%%>mMO7#u7#in+I7vzz!!EX#I=tBL~9VhbpO# zG42byEq1+-l#hJE6!`S1rG2G+dXN!IVf*BhO0B^d0 zfwl&_K3zkdXsy%ixYy@BG98DWC{fiJ=B2vLgN+eQe5>TmWj?{md)D%5M#W#A7Gf+j za!a(`FC3|rDz@GSsEB1n=S2%1{N8K0U8!jJ2Y$Sa=M-I3EFd}BT2kR3P0b}qJFMm0T#7SvxVXZ$?vTXG-we?n$t0`M-GDAutX7YdXu>-m66WRiE%@L7qSWo26- zy&?#^Ir`r4FE!obKP3^|v)qwCaHIQu;VFqWJ;T*~Tf_W0N|Jnjf**oppR^1=4(?s% z1+ZKE4FP2m#*E*p!nD4w&dGM3XezAm;zQBw8|emh@<@Exx%I=FS$o8B!*Qc8TUdWSIcWu-?WqGg^UB@s`u;DX$s=dQIvl39 z4wWyvriFF=cP+4Ma)Hc=W#Eimo4ZtvD@89N)abW(={akmtdJgU_z}b0-??m8Y_`V4 z#LMHf5|G~DJ?SKsr{V_5!h)CAv<2b>7aSzHjF(nPF7Pd2n~Fdtd*9NQRAis#d%o>tAA2;)i1 zc^Xx3T~Tzjtlpn+aF#s+JD&Vs zvi$$P_$%K~L&iFyn)YLwVi+08>g60E%(}va90czjp3-w$vsg%yzG_y+4?mXsO;ocq z>jTXYn_wc_LeT`U#VdN!N5UhF_UCAE*!#_s534vk!`*7zx7t=WE7r*eH>zku` zZPidiJfrBJSSHCoVou-@@Rb04&OhQlapZG0Gnp!-JoLVr5OPBWR>A;i#--26_W%ibqhX4B{GeHp9NL|2m~k z;I?)ZvMG!U7KnrPvZ16(Xi}eYP?Ecm^YJNnp zVE>W}!Sf=li0AHquv^<#e}|nB3~n8iNoK;goq`d{G%q%lQ#P;DP=P;x-wDS;Rx|VD zaD)D-VDPmFHs;dvxM6=H?rYoWoHTv)-`}4QCjMI$Ggyx;-UXAjYYVnLz%VK0~d?x z7J@E5X5vqM5!a1dl0EJ=M`N?8uosG*?x20(yb$1wtw`kFJ;<$PO;1`=ba<`b*;!DX zmWfxlG%X02z@MG@4Ikh{e&9eYS#AlYn>m3SjTU;san#nQlWSOC?$pQ#AlSU>_J?}7 znI_nkvo4u>H*Co?B(?9YdbeM`li1y$k+dJh^L`;#U0sY(Mp?FBsn5v5}9ocbfUh*%?|?s$~;*CbBaVp#s#rgo-+B`>LF z50;ULOrtB85Z@FN_f#|2I_$&DoPBKBbfVV_wIHj&a`v5f@uu+TV=4yeJkz zaJa~0tl=?g;;dH03)i7Vovven!MCQNaN(abufLAzS%w!=tJ$t_q>#e-U>ihLQd{Of zN@&90Xu@%lL|Bv24FZ~}k~~*zH_ipw)1*?vv5$ZIbKJw>1(W1HxOtzxxm@?3<)qK_ zJav4HKq$X@{d$q06BAW0THAJ6uMMs8_o8KYz&&*-WQ!vBWtDOQ7Ez+CFqRR$Y0%yU zBDekgAyr(u1MzTSXqULF288cU`#V{JpFvIo6s)uXWTky1pkYjf%1$S$E}OS<^7Xr=PjqeXys%&^ zmShZXfBtrFQTSX0Mvrpj@kTDVLHDn)E%mQYyL8+j>bfMt(0aLPbXgzXeE{1*ZA3Pk z7$ap|26Z*fjy2)>IBG=)0{t+54ffJg>q#NGXD`C5<(}ic_Tl(|Cw7yHn&bNGxZXRW z9`L`Uh~4CQ9sl*tYAz>$GYBg*Vuyz%)nUf7xI2@z`8Dl+8bu*{l!n8KBj1)zkF}nX z4NnSo5RCini2`bJg|o*3GW95TE47~8I+^BLb8NgBnNyB$QDKs(O<-r{@<m%2*DqWP0Zeq zH||ZZ0b*p%Q4MH-N0CT>b5JaynQx&}+8QUQAPhdh0tS*XUauKY-&M+OIL;oAe2opn z-1+ifujqSh=(qF&pR#IV!~snZdH z?PC6dsv9C^RXC}_{@C1~>>BvkB49YMd<@qZvc1;=l1OG@NTmeyW?CEmzGftvYD3Jq z`|;d!9SE{qdd>w~^`7%?uUELI)0jUP3puE34~$TlX2r`v)(O8%?xXpD@#yLW40MBS z$%gC%>OsK~mFj;9ID5bvrWRBkqgM~gztP@|*?}+~kfa%z)M@a_N5?I^iwbzTy!uU2 z+Se21`PMmxqP}mj;|QiFk^k;HPE?sI)XDyHLm|!V!~oRr;bhzJFyttoN)#`Ei)e~)jP_goKxNLn9pdY zr?@pPi*?rzgTNKN1Lr0#18#Rv5l8>i4>2%$G7Gglv0qwSt=8NTXam9gwVcAF4;V8r z>eHmTLq^Y1;nBJJm-L-T?T>(WI-lwn&+VS567{5)s*>G*;uxvxUB90tk^4N}L~tYq zL=yeHL_WsiHD#=aV0^w>yY;G!UVd;ctBQg_fcFe0be=&o|LEnWmzMqYGfJ|*w8o0h zU{6>{BsCS&?`@uQpqV7BSiN6EoEz37qxuVQ)%>R4Kjuy;M81C=8~)3KIv=Vk$#*DR z?kvg2TZZJ-M@*F&m^tnN?LY<~Ur5}{aU*Qcvpa-Uluw+TUOO>8<&&e7j8ows1lh;4 zFz*XFzsHVNyLvt)Lo$D#??f#dLg-eI406+Dy(QT`l|OH50MXfUUM`>nLx(gA2Dg z*a?`9S5z!~vdcLb!VWdq(>sNZ?1>p-=dF?oMDxnTw_jxdJ}C54FNtmO-mvwG4D1z| z|2Py{?5fz2Ha)*jf1PyY|q99uYP0jl-3of+gHR$(V64oK6Jq5r0(y(*)$PROG0 z@;Z~@-OI-`daeV@`6765n6+XbODen*`3R9Kg)2!)Fp#M5BX{}B5!9?)`71BQ7HYq^ zx~sPKb4oQ%0NtCu068=v9Hc*GmsoR6$liLwDLx1O{6H3-w()q`o=i&fZGla29@X)x zAZT(x6r+QAt0V7K#uuk8$lgVm>3q`(6HwUGrK}uOj7x4C@6A^be&YsVXI~j0xZ*;v zB#I(=I*49T98^fEVMpv4p_nsX z@3`V|gBREF2RVo5s@Hx`my;P8c(>MFj^s$Jeis40<3V->hkM2zr-Ob*8K6X?qrrwV z5mlR2ScH7793#2qL>I2EPw4VA?vR6v$HIeuE<<&E^PZgdW z`TmFXUMl`6p&>-h*JS)hhay82ksuW`y=Z-39`CPQlL&182o7+1Tw0Eu*`$-w1qi*! zU1iI0#_6KS&e;Bh`z17So!9p%Q}A4p8Czo%|8Bb{u5ln-UqOj8r}Hk53$!AOA3LL@ zjSrY>e-}1C)38wya=Ocas&-?G=c=nyJ$+=)B0*VqhB@LV1b$C5wI4ard!x*sBcDeY zb|ObK^MdfhFz6(dt;WtO@Hw9Tnf(!3pg0IO6u*w|hc%kW)G}H?#QOV|^Fxq19sr61 z>Opa;8g4j1732a}aI9c7RbuVffmcQnQWPFuQEusmddy!>;I^ld;g6nwrX2OaCwnZm z5t~dYD+EovfXQ2B{iermF`eBqU)-Z0liI0QYwE`5C!M-(F4`Efl+QB=|TQ@!-iyI5>YE^1sllf)kbN@)B;cvzm2?ORwHU2KF}0C71e zZuO4O$IR^gzPH)51o`LR;B{Mx6`k4o4h|0dbc?Sjn3o^Laadv3Gm4o>^GIoc?JXRm zo025gxxGC36}vIlgoML^P~EWh&ZNEBggsK^F4B*@>V#&Y_`Vt~Tv}L&=znC?Zv*i3 ztet*GCjoTa?H=))o)4rf(g})3I5um^xLcI9tlN!0G`s=Li;(LNlH2Kj$K+=4PujrQ z<$S&^4$fE|0{?pzSCKC$++}Wp+m6w4OIaf9>4E_wGNcI@KWJ)aFJIHw)YA1|h5#V6 zAYT!-Z*p)3CA>GFItsp0Ivd_)-%-}{RNU~#8}bNmQra3k?jM&S+{$H{*36^LuxCT7 z%O1j(F?pb4V20^Zdvfy zXs_0$a6JAGWIg-zA{mkIdnGc?g|H3}A}O}(l})KIB#u0u?kYRCcPdy^6TVeiaC5h;TN9gE`(mPtYQNw^zcMdUtuO?(gGK2 zxL3z=k_WXz>{^L9k));>dF47$2rMLlTS>P&7d?^0!$rI_ANID385zW(o=tN_e3*o4 z70lpp_~9C;V*uc$8A39Fzu@0rUL!4&suHH_&EG-tZewuSnOtmy&A?1>ItW_XzMBsT z#e66NPoxml9{68|Q_9U&Td1lIR89jQw8t*@u+f=_)An&8R>h1-Kym}b3UqIe7Fn<= zl%;(iAA;J%X1Bd%@9)Z^T1#59@;8Jp@DCmK8k-6?I!8WMRIm(CFvsnH{KB~y;B`fg zF&d-eRcvWdj=hW;l4xTMGKRt6UQ4zT8u@Jn{k#&IpL_EVgECVgm%dk{mrmqr#q0YS zHN!vdGwB%qOiX^dlFS#G+@DXZ=A*PM5NUG$EO5meNf+U!pC{{Z{H;dLH!2z2pFq2U zz2e?o*?6wb$|EbaaeG|1uH_dr%*8w$4l}xHaJJ<$5pqg_zzfbPd;2UkTjjwqJWFgja8D z#(pFWF;q?PxVz*Q>2hrlV18`?mLl^w2{1J&h|Xs_{O!-`F6Ii z(@FSq&Gi0U+Q^)GZZ+JGs2C?xAqIUaus%X)2&;avSMA*8t~lZc=m}Phn23mRXHXMW zEn|X-tr@SweElaYwW;pq};VfY$f%B~l+Y9Oi>RXfOK{YhdJ~h}^8ss~H7E zYEr(Zhc*iVfvbrcDL22^v*0QQCe!g6Ua**aP#t6iDnxydoCWOm24sPA3$c|27{ z1NkaPhT~H{m;BwxPi(DPe)~x^V&nPjX*AWse`3A$Jv_`_>)*>KLBYkHExul0M)muV zc84jbCz#n~C)vC^A%^PeGkSWTzx9vBY-p=jm1J*Q=o|?IK?r{zU&3Vbtl19F74lk4 z8+R&q7X`y+tbas54qBW>H4qT|X}@v^*tC?{LSgYJO%R8{5{Xy|bqsx#BGOh^2K^{F z(}=z{1_+OjbI`sdgIEO~p*xN`Ry~6B?ak-*!r@uTxxFu0;^VR-0~a?gk}=0TO77cY@EG7w#!inv#ko{U?*)pMvOZ420(t9Vbw@6 zRKHO=l?K^zt3==65^3yUlPgm%$QDf!EH#WSsP1~c7h3($Mt29{eJKt^y z!}bdov%*KByY(!Ti3(*2oFvH}rYWB7;9h!loQJmDqr+Hd312T?#lwX&z!wc@L{9LU zU{(y!I_NM$l>oJ$@`anx1f2T96Uh2t_1T*s9BJ#oI0G2aH-28wS?bG-^0w!!BVWFg zB^|-l!)+ON@f~_Lk}p>-RWi+g@$rQ&*~hE^fR(P@a@A#y+1s|2<^Pr`Bgd(cs_K)z z!3Lz(ieW$G`q24k0#TsplU9smsU3zAI_%BG3wO|S*tGo>q1x#)zjEq!jw>@SDQNF?c%M|Dar(Hzewf(rW@~6AQwp($Q({ih{UPs+^6fTrPCV%H4OA3Ckg(4|%xqIz$ zGwkpaRLsa3YDhPwwzI!jbY&OTDR;7bjT=QsS-Z5*vAQa4xU%3!_EEjA{}}+e$xjGq z3qB!ARJHp6ev+QjVmj2kL}0N=AO8jPJR4HN(l996P_4f5{}JSM}{) z3>VTFT1Au&G?Fmm94kqd2QRw)exG_{$?PUTkZ*{F&$g%!BW{>AQ6YLAcoyG-_XM$x z#{GgzAeo+Cmamw8QPhJMUzb(G^wBoEuS1>OtI=1LCkg^{J1o+*g4W}Oud_^iKd_UA zsC1l+hjzpVr_`K3UJGU)H){TM|1L0|I2k@y4Z}p<`NM3e8(jv2-S02EKdV8Ksp8Lm zJALo)!&q3hXbpz$=V!n0Hjk2mBvibvz%P-mG}OJcC-UHaW#f;xbx{YB588nX+NGKM=GME^pLmS4U4 zU4?k=pgNFD;TVeDXfNwUW*LryTIKrR>YvQNx|#y3`Lysp675!83{X`utNZLJ<+?M%*lM}BX+bUFxG6#I3AJt4Pxn{&tD|}6|6dkBc%Vf0 zzSqD(>8^`SPi|3x>N^LJ?wnE+!#o7P+2LGF&aJ$4r4ThR2Dqx+&X!EhiROn13&kB9 zhrLj-VIEHpV0M|0s;_?p6^_X7vw2ims~=Z8<%`>WqB_GCn4aitrMUB6`J-vqVWQiM zBq5N9q5!c%mVmyCucHfPIxiOiw6tlh;ZSSf6qFrD%0Avp%57z>Ss3P`NEwGW#cVcgrd)7Ot^P zxtmzS^lGst_y`V&Soq(Pj(^|o{VeYbNXJ5Q6-T*Zh3LNW--YU{iZZhg&V^XU`SutS zR6xkcEY^(Y;sfbJXTImuN)sr=J?*_*wu7~D^2P;0sUsrl;*4jXW~{@mO~SKJ+|aqQ zs)-gUWHijZNODhdowBdxd!4%XSt@G(^;JJrz*J-IRQ22tw4v1AOrF5ojz@ja@cNW1 zT*}6{&!9M-wa9D};5&a|p-e$)sDY$pGasJ_TT%_Cah60=!WkU~H*jJz?IvXQwWcx@ zC#E`~FlzKayiZTqhQQx?ORW@}kOmY_I_eoy(@An=*!*;7rF9|K=<8+@YGARbXve+=GZC*eFT?}&c-k9c1_$$ zWR?gM?&gv>r|xtOc`8T{pfFUp=oU;o%;{U6P&*kaCZQXryR)rFWPtaI3T2D((&LKI zCs4-d($psGM=)Nap9L$p0n830l#*B48M-&_33>eDuyL}L2+qglgiHxa>3gvLa@;}g zAuF74ZCO^1P^)#6Pc$@2#U;|nFZA<3nU7|K9)CqQ4}>8@7WqK~sPIY%w1~f=-3SSQ zW4r&q4D28Ct4fwSl)KcM|KS;33|uJ{g$Y1jin)(;39|1Htx>Z-(9#V69`@W9pvsiO z2fANyi-sMy-jwMQ$2z*zjmN;YxrHm_a*5&B4Blj)R&HeraP#<|5*>tf#nvvY*hZ#J z#y0W5rtBT^-(`=!>7?RNh{M=dlu7r1KE{;|PDS~m0z|6NNtO?7d{tM?0So^Xr1U(P=DiU`fpTitY~7OLTe9|BV= zuW%~W|8f{^_lkl}NXeyZDO=PH{K+1)c4?*YTQ})Jp-!T{mao$T;%RX=Ydm)U8-hb? z`)6aA+rJbXrgj=e9NN10C;!K{^DZ^37E_)(7Up)Kq5uX{tA5LlFp1#GiYU3bE)j@!gdAIyP> z5g)9?*smagA`~9PyQ8Q%)I8rHK-v@Zyk~NJ^K?cZzO;+dIK>8^EDoK1t=rL&?4nYsyd}|{#m+%@Lb}YtNPj5mrQtDQJDt`y zva(a_{=F!Mamh%&&QYGb34@>0j3WBz^;TJ>!OeXg=}|2QAT+~or1=T(o zhghXKymAJZpY!FEMsn4O)jIVqg*Y?!KQMb3O*!LK$kVUD-T&#+$7gJEX~5z9TcSZDcQoNg4g!%|bDaz3Id@QMC_!etmu;F02;A zLP(72fA=x`1Rek(ZVSSBcVZITZS_|u&cG9}Spb|y;Y7~QK(1j=!w*kA|M9GpXKAe*gID=CP3I=_-k*;NSzo3=}bT`*)C<{m#>W zv(E-W%psqPhwBdY?om(#1xo5!A$Xez*BtXCMwN*Jq3D`)Lc~xb)&Rl->}FObOY)@N zsq{NQ>l1Klh0uMaDmyWzE$4ryS^Q6zDWh50`hFOnV^_K8&HNBQD7w>^g9tu-l#UYE zkFJqE-RBv;6o}LG0Em%khqxt3Y|GNh>m2`!iDE5tSL>M^Uq+3gNhg!n0pKKdoYs zSjf6x&~6@52 zenG}t^8N)omI;;1Ac57g5WlU(LaQ%U7^Gs_VqD5NTAlwsMQmih%yEOq{HC@Jw@lmN zF{7gr(Repy=1CVibCYwtyAVcGXJq8H+r@S&(F@IkceeODm&Z-KteW%w7)+T9{*?FXBD)FcAXAG{=KlsSlgj^ z0K9!i)NW^_e=cZH7GK3Bg02gXkB{YVcM-CZno5xHJIYbdhH!F;!)hH*$g;sZrK|!? z`I{QL`?R@5_n^rEE2Q?0K$`M)niFv1}aIJbN9vM1IAW@PJ)MdeP;)*CSZ#r{!Lsu0ufrb?Qw zsf~;xXs8H%+W+g_GnUWq4X^}bj8w7V`Bm8}Bv3(9rynSwxXAMR3n&cFd4n;xVt+PcL+{#x8Uv) z+@V-;ha$!0=Ks9+xp%#vvhpEW$@$HkGiT49z0u&*!IYr1MckJgz#Cdtw*xWoU!dV@ zzhEk_@;RAjS!FlsEd zf%CT~ahgATURklUSw{jKmB%&C6=L{%$vx4nkVZ%aX_Ku6^Hzoi%X)Cc`mUTTM9MJ& zQQjNi(jN(sf#MLR zY0XfQ_NdW#TqUXAtI>~<+;A??RB#bo)8~og9J_(dPS(~1F^wdJN)}I%`h%OYY)f0GV3VhT&CyY@UXnR$f|j zpK7;NYl?ul?Dhlt-ThUku9>Lr&i%KuBiv3q3~!wtMn8Ge)h9KQyo|pSp8By;E9LT9 zOp*JRYijv_O8Dl=YRNqQ#247_qCt&dMl0^KThoy{pFp*HHDr{p^5H+xzmZzf>#(gy zcGlCDk+r;v3c4~`jb@ED-$L5oxB{-|g-^bgBlWN*%oe}arm`C>$J45+oQ;-f4#{Re z)LL6HM+58QL>GddHrbBMjWn(lbWS>UcB0_Vt`+Hz?T7+~BRJ=fO_LgRzF9JDgcnLq z8AY9z&wl~erT$S{Sw&)KaXk3R`k^|SrN=kbpI_&jh1FpH=r_$!(UB0NO0>d6UhZfS zlxHK~6GW0nQsLKQ8a$>94eS@y~m%Qp@ctV7I8IH-rPZKFyieX7G-(|US|_?H2Fin zR*x?pngI;>V026uFsX7~f+F4eV*8u4W^JabVk^_ARCri#N~|S^-F%$fXep`ZA(WeV zFNgKBFAhZrtnQ3|V8t_rWyhx~9Va1an}J7)YBX17BzOcbspwm_!8iO5Oo%$lp~AkP zTlU-|l^ zAbtAjN0P{#ji9spNGWu^y89O%ock z3x(9pNUZ|4elINXoFOoK+@nBQaZ++#eNeXzr29X6d%_Xl9=2$7=)-@fZ@JJdR7DQl!!}9{bFj3L5Z9d{Cll4&H@N za9iH;zq@~9k#DUGa!eHG`0?BBK!`B!E@J%jwk3X7k5V;HyDbuWP(`pYJhMQR6KS-6 zNj_bCpq1uTc6q0-8M$jQ>B4>0ae!$*(oO;woxO2jg8iGF2DqzOO9XS8Uon$3JEBC(nSmq|7^snbZ5)}ovz)!&0Iy#T+N zyk5`=$8?GG)q;e$k!vF8YaHhDhOjd{e2$xLLcd;b{Wp{y_1@lmIx{}m3v8nEke|Q$ zreDR5h>!P(yi^qxu$lKARv9ghSr)5=Lx^uyd)`Vy89#Am-_+ffG{LkJ6Ckl;_oX0D ze}+~9weXjtPO^}>tC!E;PHPi8qj$5FG@dMxSnWT;)*lxHopN&?CyuraLNsmnt9&lI z@Tz(E9=nb&p~9)J93e{qkDJr)_tODyJx;Hi(3hJ_ze}HYqIXu|k@&Y!0<%&6K-v&T zUZ;;)Kc$`soVuqlvaWnYyYCH!`eycpDT>F&_^@cm_g~5hW*lW~8v;b-sS#FUm1?T0 zqH?A8bYMc3E{3J2e-o%qAs>R(HJ&N}81ic-CVV#QJ#QF$plQEl1i&3hND@oHkxwKURs`IkA?ozJBD^ney}xd~e|{6ZIe&Lb zf9i8ucHzdcrT$m*ezjlwef$0GG7rp``zgY_lScXFeV4i5J#Ogl{rhgf3m4}7s<5JA zRiwerY_&U;=a`O{1C$Om(Kc# zZ47_{lADp!#D0}VrRM&EynZw4xF0>kN1ecGZIF`x`#l-iEEVv84WOW? z(79hFVA+9X%SD}AUeVucZf4-KdV8Flalg@wdK4r`-m6hUj;iUz#SF z?-|kOQ3-RzMoSe?RZ7^+)a4*l*w087yef3(yTg-t!aXG=z{LuF1h5H<#V?%wy}cNs zh}k~jC#YWSfbh%o?ep{tI>XGZ6hbvm10E@xB##vK*~@_NDX#&xN zZNKWC+Kz6Vd7PmqT7h+?gJM05o!|3qrCe!D@|N8(IU=G=cU%GR6y!} z7#It)0a5%gRWqK5=WS|$@lo|)Irb86WBggBvv_aOJ8~PG!&cb|lcd?7SzG;bPps6d zeB8p0lfrYQdIzQ|7J~vavI=P3@1Cy3Po!^*P8g>|9vVA~oB^v1*@UWH$op0PfhslM z3kpa(#e1P?kFs@yPm&?vbuf{E&o-R)Ar;{D-g8ZQaDe+#M@d@Je;@KAKtXb7XH_yd zW4HWEy$UM*=ARXiXadw^(0P13opG$Psz$HPdE7@AIF-TY#op{F3V9@gY|(5NJ~cIj zfUsk2FE$PK>$6j{baOLi*M0Z*s$5Dj`?L8y?3b#NB`1psGQNDOU;UF40cbF23w}){ z4m1INWoTK%4bA4V)A91!e7`q*|Hoo#{&pJhdZR7*uhLL3LC{V|v1%XcqSbsjbA)WF zX>NZZ{BkKA4SXX)yd|A{IC`IcKYXu$^dq)cpCx#^fA4~g-FQ~t-^QsC)yQeK)3C=D z>J#5K>Qp5CzM}1a-dN$;OfQ7KD zDJm-J<>lo@#cDH`cAqcE8RR}nHBrEg_dpS472JyYy~zNE*{Vb-liQBak&FdB9^Ml9 zj6_}tozAR*N+Reztg9VUtsy|!2#RBP1#wmv)2#`o-O zruouZy-25qvol-ExjvCO+&J3h&9YR2Z{)V~_WH zJ}spX2hN5S}r>!2Bc0A~KBO-74 zs-#WAdV*UAZcX446F=`0BSW`|iP=;(AFS@~tkbM<#e8Ktql`nmby_3KY>8jwQ0WCE z3)iq?X_B(8Wqbt$G2Z^sV=$3)3l(%Y={7eHOuTRY7Q5iwqO`}u1qtj)eOWzK`Unyr zj@NQYF7W}S>L#F%n%tJ0I=&wDJw8&wwZVDq_cHqGCd~aw*+Xiqd|oNyhXc3vw?U3g zH1i#w34`;7tXN^dNpQew%E1C*PTn`G^wxCB{puA?mDaf|To!}%$q9DykWY#%{sYF2 zqeqD#+c)0TU>vkfp3Ete|Kk9dg8XnJrB&w|Dy4Pw3C*zLQ3UB2ek!lc%%a{7Z^osdM4qNOEkdZNF0QoOf;~ z;ur~fOd1AMEvWj*VqqE1Wksi5)))*}Q=#fnak5Osg;jvppRk+j3J$SOJL$8Tu_b1& z7>L602U+Ye@xdpSdaN&3IC)tQe&da zDmD~A-A&O&XO&#UHIH3Lgq+KG+VS;om(9;;MS2@BVEMlbS(vSWA$qK^L;(j<=A&x z9tKRUNXZv%u&6r>o#RI`#odkifEUD0XhrOb)XL-lEwO^r2CDR#`aV0z z_}AO$oF{jU-IDG;%;EWG{8ZRsN+|`5(}P~MBivMlN;NTdp&?O|;#GfilHH8CFO8%Q zL{5h|Z|}}6(T^H9EOu7BQNNhX^0|KwD`K-J9wfRN;r7X3KE|-b;btn?LYuq0$*>~V zY4{S5kMXu!Kfi*Dk7_xcK^Mws8N2SoqN>PuMCBlb0qX=X8%$0rVU>dbcA5C1eNs+9 z;`R&^6${R3{ZMt-YwJ0~NfH@;TPVI&FwGst55r37jEHK?xr0Ix6~TZ#_=v+{wt+8h z>fO(a`hqWGGG3y_di-hCVFxA%kn)k|_`BMiIR|W4KOZhSxLce9CI;}JKI94C9c{T_ z5cD%JNw=Mu&+D8J2#af|OC*@oo%`W?FV+{x0E6^qwu*}~-%_ddLgzny*x(RZ)SCa; zQu#&udA4Q#m6zUFIBA&@ATCW4O~uI+I*xROp$}vtHUfL7_(6274IwqT@g;B_kUKUr zE~XlE#6pwLpHCi5!tpK2*)0`vzPnK?0Tr^PL1CdI+74fwHDk+qGBfGM)bdk#{MtWm z2GxB}$t|Fq2`N>SygowFizEC+8D6k~_Rg^&mqk?D&bHQGO9PVFZ_e23h=($U!=I2m z3+j>BRcvZ5Nd|hD8N`NWv|w?+@|aptqUTwp_>PyofSQNCsEd)~N+7PHalmkKQq-Bx z;oiDUm}p%ya!d6g7#|=H!dT;=^Wvs~=Zc}H?2`{ZBKm%n?xla5Zl!~uvagu3+TNWF zx8~Z$bO{RgtX5cdsv3vIL3a80Oz5!&+63XHF+#Ks|D;BQyQ_%kh27R(TaR)b@_0Bq`A zHMfJ53{KK-LK!;1?vhNLIf6Ht$)&Hs z*A+d1tRA~sZYhic#~Is+=in4ekOiSiH1dtsHdMW?@DX(I@ zM9g$13sa$sA^MwtVfPn#lyCeu>&Q z?X5z>VlA5d6k^$)LV(Y&bTUO3D8d{PVIn*C9lui3cmpdpJv-(tIQhWnDQm~`DpLI$ zUr}LrFeuOu)zg|J)#Fsb(k%Qx@wQlo*vdD}O(iLf4koD$HI$=&G&{X&A*%}%N%XhF zkV9uw;D2ertRIeIdG=judcS0n0`ysv|6?`;U`uBq`Yt<#7()X~kUwj(^aqw{qJ)Bv zvoo0B<0xR|V~CGG6e{RXX)mnHn#BEj-R01X2BF=m8<(?fG_yG|OW#@* zA7ObqP1KY!ef3S3%9$m@s_~zqdVf_Ky)bCF2-yuUpYh38IHF*-IoR+q zBnrv5<*AI{Er(xU(aUSalUwP|O8+kTh~(QG%`IPzeK;n^8Bq(E7G0+X>(SZPA0rQ4J_X!`eTZGhR~ollVz|`7Pc4`x5gt2>a{IyDY0!ZkDMidV4x9uIV034mxQlNAi^&Ej8@Z4Hv=0 zJZjP0F2iJRRSP(DB*pq2n*^ST`k!5} z0kIJto)cCv72XKLbsr73{@d;(K^PfH5wL{#zuotv+r})45BWSiHv3YLz##kDuk(w5 zd8%6qip>DZfiLcTf3R#>sRLTfrzVLd%*aKhvr_5?A3p5k4^OqWL$4nxJyt}nZ?H3_ zA%y@ed{7gz4+T|9AE>y$3X1ID6Lf-Eqpf4DRTjEcBBG2m>uuM$)5pJEm^X6Ic48&| z4N-8|Pn)hAtYR|-Qu&~%(}=;fPXi)x`%!E-V1b721Q&mgRPftGmSJgs&f>&W zp!V?$ivv$9yp zG-LJ{KdPvCTwajCYLDf^a3(ZE=T1sC^J!6ya9=SYj!Ej*7sN<%x4$sh@`k#s7C>w? z68CeL@@}<$!R-`Ia8Dd0aH_6@_R6H;NSYWZZCwWOHACmwjqLCaiWj+D57LNbM(g&M zsc{*0=go3IFJ7b$-`z*5K&I0o6h+=&0^Uv%gzP8b*efd3xY_CntGsDGZyGfF+(-6} z>3h>S-;>@dH>2M4X&Zja_Y_=gr z!A|n)UkyhGkP`ZueD;~go>w*-DDh?<0hyBx^~Yi_6HDvT=rUU$G$m7*V7xzIpp2B^ zo2FR@rI=+*Z(dV7mw@yY6>#9JW6)IXB42#|;zaNK1Q4^?Dp1f?C{HzH)y)Q`&IA8; zA;M-Ek+)bTFhVF9fhs2w-zOJ-j1l(WoQZw;HBmYBejX1f7EC-vni(1*N~=+c)wfaM z)mWs(L63^^Pk_)h< zfX{$uP4?$?1XbJ@?6X}aS|OoqVwuUNCJ}(nGRY#fy1eBj$HLn z#+d_^xGXziMbboqj=nF(XsmJBMU&}x;L2mmXG)}jwLA1nSLDDqI?7@B4Mld6v(0hm zpL-cA5zv;=OmSOy^igW$p#M{^p;b$x)b={IoE3pRt1i!BsPh zOg3O?!>}G~8{X~g}9(b5B8qszUL>>G3_%rc`Q z>}kzRWFFeanI!Zh3gA_6y8W_X`0z6*O@!70Fu7ve1F>U+?dpqE!9Pd#oUMF-2Yv#> zj&OB{1?Yw?g&qIjKdvOmI@fArf{&GnVzlj(u(z_k{&NY{MIo6C z$ajk4H?9yz=S&m*pWm`Xx zBvWhpAEQKT(jWI;kohvQ-h(3VOW36U%un4sbDDrU52##xfs`u(=$wFVfWaDS4&Z60 zJn|o8A) zsJDjSH(i@OniI~iKmC$B9+x!W$vI61I?}B7DO%x`ipdUp*+K$pD(wx{9dM+8B@EU4 znCr62r^?R0QA<+jzM$E3g`;1BVHf0Zo5u3+H~FPUE7M1eE+r;M2@@8viA#GvzxsaX zZiSyG8K`?@41&6#)S_8OFC4%G6aY^C^UY9&hNZC0H#4FV%s;XP-l{l2bu#|f#WUy8 znM{?YZ=|AN#Ppw$x>0fkKI72D)gTV(uD9v!bSb>kiXAsr=jAHmA1pGgBpuF%=8~_~ zFE&*aG){YHGO>Hn=JJ<*L^nPCx=pO5On$#xN?hz8-7>C7_e2ECoaNs-4qD z;b%eSaO2u&(ZneS2g?1BUB@DnQ;oFdQ6A~#r*+4-y=_oEbq)t^LnZO2x?S=we;C>;d=!XT@#8#a zbF-dwm!*nPf0v<8S8*2py6Ta!;X`pLWH9h4!%(uU z_@=VqXu30lVhBea78*?jSvI1^A$ge(-7 z6KbczG2x4g)htL-K#|hk=mt8^6N*4XM`MvXTCK>f=Xc1IAc^wNn1Sb(8lRq{?zv8lOgHkp>gQ#Y!SlH)cc3(z#?#|*X$!EbMiqOmrR2L zDHZ8JId_sdUt#gA$+dm&dE541IPRZ9nYUju^6D*zC2E|Ir0JufU95-#>oxM5*!)DS znxlieB=F17pC{LJ8oSj@qAme=Q91@X0@CW~3bP}^`G19$xz*ATYO%RsjCl{fHuW!} z|5K{|g!w8_9V!};4@=qoGgFI4ldIjX4{Gf&NkKT%qJ#~^zdO!$)2v%{zxNJcs}M_z2)-C-tm)4xB9)*V;n7``lEBqTbq-|fi^6K%gz61Q2~0)>C6P%nQ5`> zl@q6jn!hua%NHly36B*)v>AD6S>=WIpyw%H?@#A^Y%WR8qL>F_1N@+Mp;qGmR~Lo^J=NHI6|ik*D*N3V%r15MK3z+aywZJ0M);_z zHYYYgXp~G`U{nF^zAeUT#M+y(Mnddv9C>t7bGn>=%Z{X+uX)>FUD%Jb7`7~G;Tl}w zS4Pii)vR}TYR;D1fFbPOCxT)L6d|LN{<1z#^wxzXDvxY-$)8)GAVr_VeZLvSy^va_ zdyUl=W1dHVK)YJAx2<_Gj=_PJ`y~H9Mlf;8W5XOfX?d3)aM^lcg*HCtLhQ|~A%y|f;L5-extD(~s=+?yt zI$R|gQ@`jefi#@4(VZKuEbFJOsZC}RM>tz`(>41KxMHgSMjCkCA3-QJSX&asjIHhaI^-cTtYk5?R zf68*{0e^L$Hg7hBo^&i8bFq)57(w*&kN@kEVJrOve6o%c$D^g8v1qJt-f*n8b|ngd zW=&QE(ag)4tvA2epP3TdE2jrs^R@2P)n)pjXmuBF=C6`|g~(~S;ma>o!|o=K(P?|3 zGQ`YPl)M}_Ce;t##))^BH4V47<@UN-(8tN|pr=g@XobUSTa6i_pp{$VkgKdnjFTVX zc^+T}HhGzguteSF`tC2newfRczARUVwlW z#@y5#eR;ON53N8f;#5vN3w1LKb8I)akE0Ke+q2}2MAz24DcqLfrmnBD_=-RNqhm7KN3oop1457ALhnN zUYh1=8tu1Am*t}s6P6YB7h}`bn>mZF!L_&e>;xM$6{A9lVp{X+2m4HzGF)mG(>Wy# z|xG9{hJL-`ze zZgqSN3@+|{o5OQe6OFeQCA|~rKh%vQr4xR3b~4I>M`NYC+Fe}A{=2eIyx^4-NsLtS zYfp(~?Azze>Kb-R5^DW$%+vT+Xd}`)bt27aEVqiW2S9vt_K7@xi`eIL z>QQtHM4dxWjNlCFF7lfpME>b2_Xz@V>0lVg!`0;G;?kNrtzo<@FJlCN3|Db>k*RL5Fleo`xZinv@d=FqGe;v%ONxAj5#Y zvH|feHPvSRVY5{o3)mLBVc82Q*KKj63m9LbbZ27EHRZLCtm(+8mA9%+qX1RrT3NsuUMQFA{H(SMP>6L?Y;8Xv4EZc^8$ z`S8g9T{oHUN8A5TT#bppb(t)EWwJ0nGd@jPadQIr&05Evh%JL06Z58NlN$P1lb)FO zBa%ogC30`Me#)p3G(Zz?g=_X<5L=k9vh%ltb7kc?9=B%ttyylXrlJ0ZC)!l-$wgS| zqJo0U^!a$Dy?s@!eY=YO`oOOayfkd4nuJ;Gujz4jxvH%dxZ`Ul8_yLChvw>9+_B{~ z6IK0ahHzYdHuK#noX(`T4;rl}*Vj;YQS(jl#!nfw-MC}7>^;0)A2LbQr5a79hYez2-#3o`j8hY;z{Ntyi1Aun>=;p3YdyL1qB}1@gjKM3Gb{YdR;C@>s8by9g)-kb!$C%LwCPfO+vu@=^+~V8DPi)2KkmPV^kyS`8NYN@ z;-)XzFo=xw(1tq4Lf=mSR;-c)G&G#&!^-c@PVNqFPi_ySMO92afZpD2PG%0?F0$TF zI~&Kx8qX)=eZ0>`KHjEMPrnO zyw&-0vu-Z~E@!_2SX)Vr{p0A7BD}jswKOVHzWGWedkv_^2L&X|>Tl?noC9G=1b|gh4t%nL zJ-uyv#CpOoa`n1`bg!f(%IXNqt6Jxw5BZ1ul-~Kcy0*+cB8w%inHMEM+Xu`^n zqLUyg)vaQIe~CPaU`zSTD42f3pMa~NOwbS^G?el;EReJ2*5;!Lp&t%zY77Es{zd(7_@W zF}L~`z7F;~sgN%??UOK0$T7^_2h|YJJjFx|C_qiaV#J4)b(p`%tvB%R4u`JX8ZbLo zbR3m8*)Sm&R(vNQ9ZEZG!kSL3^GlyY!O}w{cX%QkgdT`$Wjy#W@CU7Ln(bd5(-DbDuSu2u%&>+Q|Ek_z-?-2FwKgMBU@?n9irs!3kJ zy1{So^FR|7@sA78r~!e>fmjqXI@yU*_6f}U*!5HH#$gm;Z3@p6)7I})ni?ZWFN#oY z<%y?^&t|9U`hcU^by61p=JHA`d{wqRzt&qC6KFJAuY4Gs0l9n(OGt;IZ=sUspyO=Nz|mc2zZ^xK1v;|F_0g!yHo=6AV+)vr_RP#br%Cu zr%lnl-xIz*YRRa!dqO-dnQD+WWk6%Y-UhUa5==V$!r6X~1Ad~hP>A95#<<^Zy*Cq7 z)X3;`<9SeJy+7sOuYwK%FlS4?P$4IWl3t&hYMu&6D=;w3OY*uJ%@BiBf8Y2ske_?4);Y_y z6)H-PLVazT=8UgcEbHQBpe1^hnjGd?lm6Iw(H5BI{Z~ck#+f6b71+sqoplWHC9fYE z6^Hwgp3bAoYj!Z6<(&OX%;QvAXm zxJ_%>loL^-D}k;qt{q2>6LfW9fdA}{sE|WY*1iY2$oY*@Tuk98o)6Tz$J_J&ZD+o; zZg?HkmyuaN06UvwPL4Pm#Bx42R9pQX!ayv;aB{PdX5ly~dn}#6W2~w398!>x*?|Bi z1zlqCWLTzVqdn%wD_9%SLAXB#RR=6TQZwsGXDygZ+Dq(8PyRuGI=?~kkzTzmp(3F8 z2YOjVA&Qf=R^DS$JQvXd32Ln6EkfY`LIbIU5jOJ;J8(?=D%1vwZt?CCND}W?Vft48 zvx70X@Pj)!xriS4<)M${D=RsK+EV==RGc%F5AC0`CXzxoOHs*p=S#{)(v~*TyAT=h zUfN+TT5#2MRFrI@dIoSRFo(u%gz~fiI`U%Heknc$^CPckS8&Yl+K~}V=<@eZhDPjK z{LtvZ$C{4g(Dz5F0%@%Wg!?zH!i0C z%LP#CjTBvXN&&OWIq#Z6?l(<3|Lq5Tq(5LTS9~8qU7ou6Y(AUhFg0IW+ajg1%B0M*Gtjq z4e_X>P&sOS8Fh`(+x@=uGE-nd2zskE8;|k$3*`m-+=OhUA?;?hrEeJn#d-ekM0)9Z zj7)kC{&PLWNDYUbLFAlR59Q6l{_nB(oB($HGLq`_qCTJ6SpV}E!`vhtKx51G)l*i} zdMsiY*S%Pgf0MVojgE?{iI<9cqWF?nutSJoU z5t|IKFZh6H-5PV4$K_HY!}Y(eSxGe6e{lD(vV)`Jq%)$}h{{w23u6#nLijqQF1@vW z?Cz@zV(kaukZ!(7m>uhy*|TDJ+4SFg&%%!krE62EJ?7Ajgj~z|ZL`1Y7W<#xqr^Ft z0AoY@l{sx-OZ?CCvNQgSl&2WtMDJ_q2ed>ZbBDB zTr`s`M^D?hRU{de;b2?@J9vHJ8)1HSG?Yjc>Rmoh4(=4#%<6^}uA2Q;=~sYkDunI- zxFLr<)KCZkD8sF03YNW1g2K$a>6%EO_q|LMBMh`O@>vZ~rf@K{UX6s-t50el{=@9g zfA2W<^oep!@*>1#WpgzbR0g5a19I-?8ZotOSzWE0<`m(Svkt{v9LT}klQ>bKM{A{^s|?2$8PLIJ(!{0Qnqxo=Say_5B*m?z{;Vys2=1q)Y| z%9j7PgZkyGnu~7D#JB~Pr*U&GBsd);)51@N6t%Q z((@||l8>HJK6}Kne>YbXij%D4c=zkN?4{Kbse zGp3PfE4vLrME8D5(nfbLNSDg2yD}7=>st0+=mh;ypic;iFru4VlsCRrlC%+Y^#Po$ z3;d+iA-yjvMTn4fmVz^C(o z^`=IAvv1 z`!X4K1gFDgRibhOm=af0v#AIy_|Ck)lOox*E=@8>L<{v>7!}v@$J0UdzF41teV)wW zPtUqE@O9AfES6nw-Y94(-_!QPE|29 zoCZ-{=Qmg$NO$09tYwzy(Z7F61FNF=aO zg1mJK>V*}L#`;=Ew}12mgI<<%57c6OJ=+M8BXD~6XoTRu6Zs<*p{?=%Q!3JuKh@JQ zpNS#-l2Qd3!OQd`Xh~`L;ZdnmX|Iz18`jv9F+@u`Tu|4@{GTYm73QZ()pnI93k1SQ|dAq30|uxWvs>^jl_;U~f?7XHzBRB7KdK?++j3{O2Oa z&Q4csR#ZK{?%HPVFqg9SM>OV)2u7SdUBuj{W94(!iEJ}WA*Okj@*;z6)|3ayz)Jmn z!)kXIIYnVnyYovc0S8b%?_*iM&EM(bfBo_arayLWX3WwvE{qfM3Fe23OD%wVTjM5) zmZiREqnVVbN*Le4@UcpqPHtuoC$hJc_R<1yrKOsO>FC(NOnl z_j9NJMTOpMu3>c_H5x?^GUQmC4;ia1FU>H)49~TB3$4AGIp+<~MNknn&LVrg2u%fF zbFd#A99rOBZlJs_OFWT@`b2YBeNSr=DwiIk&2pHC);*wBXj34;FmzzzEkX-GQl1>6 z#}izt5|dGGVz5o`GwGN6Yw%=8nzPm9F$^6A9drn9Tu@rEK;r_gckckXOnux(_WQDtx+8 z7HTxbDj&Xqe9o2{ml{p7CmO}B3rSR1871*a1YJ4?dv8XavnCRJ^C^lI_4nkGM$98p zHQS+~2f{OQyRXcuR+X47b}ch@6|EeeUnrg>?Rv<_Fk}N6kQSxfBm;zYYk{QEv$}#Z zraq#Q7Y~xr!)rZPqQJym8PNiK;jv3Bs9TZ+_6Qjnr zPhUMprNH$XOlJ#WbLq4jMc^8F^XY|!q7(W@KLh!soXHvOR2OTZo!&NP;$*}l?f`61z8R$O#4Gs+mO#kfDgy7YjVOcE1&%uVt21X zFIfEK2%RqlXbNKKAC?oV=qs4F;_(@GEmZ7+24j4F#FKPc?^KK>uSL5+-x!x_7S)QG zcm>LvvwCl0N7VXCd`XyGR~o0?$mvbcXuOkkIT$=&YZ;#vEfye1@zZEC_}IIBXj(xZ zb%Z7qmCe<|1_iQ@uVm?mDn`$US9Dd$6Qy^Cx)k)>$L!i%NvI;OE2L|{ug`iS({ei_B>K0>l z-zD*@Kf5kYDiU@?WS_E;7%Viz0_fRF8=ym#X$viCWXXxm%r5i$xb4t(Z^(~D<6C%Q zRm^k*%%jQXTAJ&@ws}cJEgrmn}rqLoZkNR<)LC#QTbH!AMrn@ven38fmM7N2`@rgahl zW${P~i2SW!WYCo?CC&7!`QBti`VvdSz>Gt`qNvShgQ+=aF?S`5WQEN3E0rxW3KX^2 z_&D6zx^EBNLH97OB<;G%$o`-9zA~!Hu4`A3?hZle?oD?{cf*#hO-du(T_W8e4Vz{o z9n!S{=|(|9kS;+%&yCOXKJWL9bG}pK{5%YPFvcG2`(A6!HP>8oUe`6Lm4fZn^u*?S z05C)ibOCnHw&rrW+g~F7L{C+90-mMa2rE}57f{V~b&2^jR_!`FHe#nC$|youZbx8l zO(svBb+J^N&tK#C5*!#p(nSJ8WzQTpd;jHAJl^s3A;r6IU#Bl8(;1XD2(u2Uh!;qj z+zMRpe?)!Az}d&Q%H|QJqITOyGb#e1$eeDbL9W^HaZ{;}negur5{9WOXWam;1eIjs zkk_2cINk&(sgLE~Jyp4kd?q`2{yZ_${E~LG^cVN12q_V}(~5W+26RYosMtKTwOWMD?2i{}U@cCA7Ske zLEag)&*_epyI-S}n2&`P$0uUA0%c1@m*X!Y**n}j8bp63cZYVOO{;+W(G*#tBO;>x zFWGS{awI8_X% zpre!db5NE4c)$x1pcPeTfW9=7_XYV`UvztK{-u%F0knABWTJ7=AKx z6KmA>PTS$rO}9=|v>QI^K5ksgx8JYJoM&G}(jAQMs8~B6m>2_Ub*{;Pl5?f zAj&3uFnipar7b6yt`ZXawo9j8Qp6Rg*($}OTWD3aVyu?YrS2cFAX@3hZk1`J4LYW` zzwQ{UWnQrcOiLwNz&$hCmfmbZh!3$~I<3k`oY+ZFdNNx z4V8bK(WgU)w1I<*vig-4wvL1F=o;agl4?giOObW!Z5shf9rbB=Xj0uG4brv@lU%g(DeYEBh?G2n1~~yM7LEi_ozb}-y(Xs>&DESR+h+5vP(v)2E|Dow zC>vABO=5bcQ)(=0y`kI^t+B?$Z@qeWVCPsxK8Ro#f!<7@4@jFn!?9A((Vp7VEN-nd z_&fGF{yX-W+a<@xFG8%?wMx^ze*j8B?ZsB7`mp4I?iXAmnQ3`>gF)ytOG7K;hlirL zEJ|@JvvE1B?s4diaX6E!IeEr;nmU=rZyN+e!jhO_O&w3A&~ub~*;TA{V_g1K2i>-| znc@gFo-xYROv(!=@^K~!j;~Xh5?_9%7**xa+{#ybbj0ewFz0AWA2rMrrg^@*BZeNA zJ+-wqm4yIrcb(*)MW39oD46Lh>T;M0XocExR)hD{7D!0)a^`h?-i*K?c~u3Hj@y2q z;?uRZA2aWb2|yCzBuOOmvqSeZ;kW=iRW%+_S-5_Ey3c}8d#vOh#JF-~lF^Hw z+m2_vk1eVH-CTL%aKK~UtVrOn7jC0@n2+~r-l1LQJpXvU=wYgNG-a*V$kjx! z2QgN6HNPJoO%o;tR^Wwfs+a-b8BeL#42rxlkOK@J7;(i`Sz}3GdOC;K4$a@-`uW%* zfzNM~9|6YNRuKTGx7g{`xrksUkEX$(K=t(Me^5O=-}~>Yr&mDr^t~+juj;AZSakao zn*2Z2Q?S>+h^OSiHnkpYJ`dS(Tx$iM77NSWs%VJd^&N?(U%q zFVyqe-JBabMRbLcILIza-s=w)2=ndJ8AmV+2^7Z}@wD$l;hEaR5ZRjLz~)B}wyO$Y zEkp4a0jzKZal@KuZ=m;RyzG}PfwzdHw?E7|KMk_Iazi0Yk*}Be*Fmefz@S_CH|h|h z1*wT4o`*eC)O}aw@S6ZY?6!VuC$Xe$3ifgGvKFeZKpWw$>j2f27d2X$2YOzdjV0&L z+!0`~KDJFuTa2rk1+m1DyIYIi#?3}Jd~;mFz)?E+rNWJIkRDUS7YNAmNov43kJ<9=p5 z_nutF@g$X+Xk)9bml`sux+elMMR|m>xLFrha1GbC^`2Bct-A07sT)<* z)jD`HeA4e(y^z8YBYcuaO`1&2MO<%g!a0~E8R5~CD$Tv8D(u)C$@1LeBW2T?MTx3~+ z&^Goh41Fe|Qcus**N&8ml=T2*C7~OUl*L3}%#c8Iyma4}XXwm_;W&72r}@+o(*9U>*}YC}L>Z>H zP+^3}zRxXnllU2F4I$oot)gkZlR&;spuTSV{Qb#L!sQRv$}8EQPKO~4y5ntiHvHWzVF}Ve)$>wpwEZ^bJ>6s|*T6mG`52Umwmj{(Bw!$gx<3YA5J}>yLnrSWUB3)lTTeK(o8QGIFKjh?1ELPI zSBKSL)hr}c0dcmD-VH`|z6ob><}4c>-Xv%3*->qQT3+=N@wU3|!!>3+UAs52++Db} z>+UO~LB!V{OdWwwxTmDyrIh?cublDqvD13U@yhZY+FdQlkU)WUtc{fZ9??NeSle z-B-Yr=?0*P6~xwQ+W+98UBW`H*P%xMk#;YUQlJ~eD#6LAE{RCd=Tmy1BsQeilgfSjFyJd+_ z`O4+Y(^=32f}9?=dUOl8OFVCrShC90R!#;f@tim2TIG;><5LqUl!0*=Y7u+eQ$U;!G zJs5ljbB7Ja`{T>?OVSXGp4YAHG!;FH$%zhta>w&Gy!7k?)fLYKsZk+$C^~K$mRu}g zp=n}K#V47vZhC<}LdD^&yE|!nX(Z{3b?jr|Cu>Cntu>sZtPD+?8v@;}tW48=bYRjCjf>fjolh9vS+nyTo%gsgT_mR&S;HbV?Go zH*i*ad~+E`Z~l8FrC=~Xs3dKGHDYs^bhawqPrK6HOuSt4*c#iy$&q3@=$76lUtbq6 zD-fjjUJOP1b?8^yWOsHvGZ*i0zE zc#A$ZTC^Jwr%tndT$Z*d1XQCJI_&`3LiYk@%jRVlMglQTxi4$u!~JHKxz@G^;Kb;G zrtf5@V?&41mPVKi-zHR%CrX{?0@W21p(+MylL>R0qSQyadDU*vpa&*OmEq1Q%%wjB z=u}*mOVNqe&_rUObAna+c_NGvngx;Cun}OoR#e}Zt6pgCiWjC?o9b696MY8U@lB<-K z#h|bdFbu@$bmuQO`VLQ)4;W24gvlnWbegio*84ocdNb{xRYp`mA4V>|-aw?@p32LC zrw#EP)@=tClJX?wBEgl0q6sYZWCLR&qk22s#_6@vGQ2#07G2niD+%>^8c&B{MaITn zD{3=7hGEVN{ZmKlXj5NqpGS;wup*wN46Kg`b5`NsOz(svLe3Exoy4@KO#H_Q@j}Ir z%E>QczL^;8w>H-&b2-Eguq7$7DsLnhKAJW}Lt<7@WKLLV?Z0J%alH^4XG{3iDw{)y z_4cuxP}mAx=DMJ)_fPwCTzG_Km}ZBw1#WF=iOQGk9rSB`H%UV-K$H>hrNUD{uYt{s zZFOQ|Vv21vA!XSw?rb3g1-T+~RXpH_$I1s=8mcJrMgTj#$}UyFIz5B|Z%>gTJ4>vuUk)nrzPh7W zzo6GKGT);Lu}1XsBhA*MXXKU_dn<0l*Bu4`J`EJiDR^n!1Y zG6#j3a&OJ_ivSFBgOaqg$e?ETMz3_DiOOyKil`#`jgT z8#dRs3q)Saxa-v{)~~;IdfPNR86YL~Qhj~_k)Jt1m-%Q`Pc27+a`iL?TW)TRHV%-$ z>l*X8G1;lMo#49cxx)YKTeuSQP#@ja0a$w$G%bzm3XvE21Usb@N@Y|+`T{lBSdg6CjgWm<-h=!(o3Y*?lYKfo42)&3V2;J1JJ^1%C5 z;B^$o`bPS~W*l|j1TMAo`JL)zzSN1BrKx20UjgPvm8Ttr|Da8+I zbg!<`m)>i@Rc>&~J&G`GzntVZZEs%nRY{-CF9S3>iCc{NPgaxC%lj$Rs z)SK^2P0eR``lsi0XV7)BC>qPw#8KC4_Nbq+&Cv&PU2SQqzqa+7$wyr_+>1J!z@vn8 zdk1Cn)E70QFEpbv=aq|<5w-5NtJ|)~25)bP1_zD(zB4SD>g#huR?vAsUl?qN;I#)r zfv09GqK+vQn6I)>U!sd?g@^nwud+AZCvke2`;6oZ%B+T6K82Brq^ z3vlM7(mRRlDV9jYr8ID4LfJs}R__5;>>49u)f%T-n4os)F6_{b-Lw2~qpUMp{*2l1 zbk(bCaPuSaq{Be?La+Ax60zHIW8pfAW%SFdXJ@3$)~F#P-e*tM?k$Kp!b{hilX{9-K42q6OqxQP#(lciJz zqQ`AFq5;4$MfR>pboqeM>M)*`&=QK}$8Ru4p(@dqHYj(* z`)Bx&=gRcqI$XZQWxCdqlTS&;z+qoS`pKJk>1*Cn#eG^D@>}$1ud@YcP)I4E;+r)a z-<=Tq1M0No`S*;sP3ZN3^E}5SY+DF$1H8b)HzL>fW-ZxmcDBu`F4I``w-ATp`;yU< zZSZFfYewou6bd2-7Urs7z88~oOCP&WroT1N_IEK)Lx`1W^n(&VR{0#DKLtG z=o|>XA~r#b_4AxPnGt$E5l)(-#iM4$7OjXCku*?_Xu)DrliiB$e58l~TLGic_7oM- zz-V}qF!5!+<+2`pw@_uzlWh!`hZ>B4<&HDh?0Ox z@b-3Ld$VFa8T*8w>fu#3si4`}*5m2sKN%c_3?4m;_x);i>Y;}aLaf6TjxDzk&+c9Z zzg2@zGMl`&??a^Uk6(L%R$7G43QNU~EJot)Z272O9m@+zARi7+2uOG##MbDlb z82G7Ph#14UZA+YHc9AhM&Y(iU<3h!4{OFq8_jz0nbB%y9v$@0fPV{(mV&dZ?C0>{p zCk{B>?EZv8_!9T$QNlBkn}Bs!%*F6*p<+4ql}1!0CZ7mW((1l3i?5<|slDhnbk(Fe z?X#Qaqxdix68`qO&AD~MHq0WAgHynJ^4^Q3!W>;ceqz`xep>yPj^7UqY6sh=r@;mI z@qk$hd$GksJxhzKE`2~VN1^-p22L> z(~9YbNeelASZ8>Q5a~v|8TYkurqP$~yY(&?M<6y{jFtHSx)h*9>E*0wE9~t;p-(!` z5M)itcZhcgeJ$jIgB;D$q` z!pG6^BY>_Do3y)jE6B?e_kzD~YTA5;+LZr>-WX%{4W~&?;xjH6^l?I4Gs(vhME9~- zD09^nMuCsN>V4ju-5kzHO}$#!l78vWbKUviK}bk#ZAc0l zzP`S%uL1q1TSU6?n8Sclf-itVAiHA6|>iN&p zbK)+WS}dWOmJ427MT#gU=mfa}Jm05pnk0-|9jeU-+7SVgcxTS`xALIBoX8dyX|w72 z82(JDMog9Xqe29eFLLy(jM8x}&8e{T1WO@@QT(d~wcrcbBhuihb3lcu(GlLpp3}~T z?WPC9%_)H&9v)nFib^|Dm4>sUJ@2>F+y$f_mooXNcio7yrEMeM0gq%&g+;#0qTixT zb(AU?zB>twWimK-%m_I)4+b0T$hZrqq}WUj#Q9BQ`cG!0rEPYIS0(?7n%7`?N1B{~ zXwG6YlwLW$%gD%hF)6FSk3V%Amn^_};)2*v4;-++^K!SdKIRQDsHOf)`@rGrgg{D% zm{q`!<(sh7h`z!oc&v#TUls|d%)Fl1m?Z>cNHGk%14BDXmt9An4OpK>3z*$Z&-;y& zzlJ`d(9HS6Xyh}rat&*i%tVq6KMD@X8vRgh9d|kdffotzH~S8Ma=E|R4+k&DgJ*{( zGP#2e8ne)0Z%=_k=xLjJeoU`sU$yI(U_5UtX{vO@!$Uh(YIA&usyX~+gpfj-&l&dV zVr@^c{1@sVmnaD8gXL^1D>n%RrKBcI_f=AHzFahTbh$$Wgj0cW2VCz6(?!xmDq$Fh8iDgIShha>&?$og4bC&N*DE6SKee=Ut%Z-XBs_TtAVw#UG@67o|?)(6C+&p2MISOu;*z6Ux z4n69lzi(vdeN*N4o9Gj`seqEhK8JD*=f~&={x|^sS76GguIN0)3uAtKrB>Fy6$C>c zJ>uaAv(j>Ybtouee!jh(n=7P^3In>vbR^@&X*gsC;x|GbiJ`)F9pEb*T*}tGf8}#G zZ`tGybqQK%zdw?R?@~36i~>Y<1Ae6M_oM4=Z5qDyaYPgKJgCqtMupikpu+K=6w^fY z`tZ0@CaC}H=gYh%jXbh>(8TZ-7_D9HDI*`S=5;!SwhwfIYK;9zoJTETbn$E9*=K1 zQGU3cM|2t6QpG>3I>!}rHc>~l*xa)GxX@O>RftSm!o{PB!1A~sLc2F2VjmVI!|gv> zTvYsih;EP5exq{@BdXwGJ`gAP{biVJTAzhqN!gvob4?!V6Y+@-9`wpibj zOjMGU_DZMG4IN}o3~7?H%wsWM2+z)aI${`3$fbRn!$a_>XhP*E7hnFnN_I~c&F>_t zlCeRyF_etpvA5+YCNx+aN3D*>!VYc;nqW_oQQTAF66fh3ze{bqy+jWYnYLO8PBgVT0(iXRw_oX&mb$xVhItJl6n zr^FEaHqEk23*Kz(YLqW+%~~Xz14MwbYmFFmTahBxxMJ3~oKxl^_6Xy=QtaI)ZPJ$6 zbYWP&U0_bZ_TJyGW~7}~9Rc!mh3d|NC)|?>YHG^f*m@SdRU@WnLA zoA%3zz$E5Tn525)g91N*+Bs>aaV^_xoB!DY z-vb>77>r=-cpx+&n9P6 zPlv^B1Li#0>?TtJw7(?DciwCR?Pr63Dp?>|wW0&O%Q)cx{Ri5mR)E3Bk;K2lF&ZCG zR_K<=`ela)nL&fAowT*Rqs6>O=F<*Do&N0=4EvH__IiAdOW84^WUVf}pUInt6u4-$ z=u?;)&G-Uo^<$y#!=>9_f}_mLW5>jZB>Q&q?PbVor&U_#vj(QSdPrj zg6MX!N$lNZnKI42jD;1x><_<_Vqxb)EpYRzu-1!A<qc|E#if{TP5olmtUqsm*X1-FAG|9z9+^X4V6 zP*GA<#Nhs>x3c;10vb8`o@7qpe!lwKlDi$lxfZ8vay`D<44@LDD^_&vF^@DaewInm z$1a_%fAp}JK+Se9Rsu%D)?(;RthxR`kGf4UTy~=C_nC@0pMlV8zs;@6(Tjq&FyP2y z2~|Ou4mkaXB$Xw*k~IS6R3^{~olNFsRqKInKat7SK^-K8 zWgS@exjYx;beRUhLS4CnJfa*ASt*U`>-P5Z01F?>Sds|6NE zl=<4CGV7VyH8&UfQJ1a`9B(xNS%8?BwXxToz{Sxe z`st}8r8cj_f`{(a|9A$<-Ns`G(e}F%p=$~vf=7CekZ7~(&C4sTa?ptn;+*)|O@hR2 zXw)5sww4mg%2Qy$H4y%jvLWyaN^*bhh%`GO;Es4CS_w3UAN$mV^*olFIThwqh=+~x z?R{#hospeRTw}x2GzHJEfJz`|lUa!&kxXkAwJXQk%mg9SF~#()X6G38RuUMJyhZSu z>U+KK!R2i-c`nB@$u{5)2#a90qlRF~>X+?4Ed@$anpdTc|Gsw^BB>$f{5wlC@pqPH zpt{huE6f|&5@-vAX$%K!pjQ84%Lebr@N}g0=3iEzbBp&d2P`IbDCO(pbztIcGPrnd zH;9tOZsXP5KsH6+k$U$vAUG%GF>HvYM5GYZ#mim3dp9p94qSU`?oy9rK-DdPio!T( z|F4Zd(EexRheTS2walPLg$Sg=KAOn(c&^mh4|nT_%5yy{;;Bzu=Ts>haQXIUl#+i& z87ln?7^9{@8HDxM-8^-GP5`sWKMSoFLeRS_wlC5dypvzxN z=}lBtC7;w(V>`xxPG*hqd!RK|XB^0YJos}bC;-JF7meor7p=E2H-f=|FR&O5quhQK zkZ^q)MFmnkBQpt+iX7K2pUPPKOt=4NY-eva3ya$!+LxQp@NBORd%yTVdHF$LFjZ9r zaKh-hsQip$$~kE&Qf{?Q+0Q_qvPB6!1^ESKu)N)QQS?@T!9GhiYFJ|`Com|d4ALaK?F)D0RS}aKZ++vTsz#V_MiB@qLi^T$3T-yAaK;Hd^ zaY(o1Wf_pTn@rqy(|+}U3oaym(ba3{81-WE`51UkrEOV@92*3E^?-2^X~nF)34b;s zR-G>#kc$~Wo*Qr$%GDNn;{kcd!>OrE=-$b`QhQy4G*2G-pdB|T=VaZHgt;g7@1eZ> zdniD@;{SFi#m%x0=2h3{aV1kHoD#J*kYy`ZVp68+1sL`wvD zDp2a0j$vEz%)_Ye|Qq00)#d=?_Mhp0it3Phyj`#%@ZD* zrQlI(7=2$}hIo)$SjryKDFyHh8sHhoDuUNAiW2FCf5%Ui& z;vv78W1mvC_E*Bvxq%E@%n#7j8-1DZ$+oYra;3R30O&Rzt)k^0sc-gn^L%dqf?hyS zQ8?J`_V&+OWSbaeCu=FN>+_lfCA>|9a$2LfRYJ9!(JLH@1o0dnPCua_mOtK*SLDGP zR`3GDDYX0WBv1($PI>wN_Hfq7oSCUCi3JQFL<}F6geke1NOf6;`tRhV{X=q6SfXrP z_5A~mH0mJ(>s(2O{lpzmPvO~tD8CXMsc5b_qr9aaC?l)>9D_i$QP9JFgdS67&fgh zRvlwp_DZJ;);@3hJQfAQ(YGTK1Ar>#fFU+;!9xhGnCN|wbMsE%f11%7oXON^SR2Rn zK_gvKyS1*vw!CPb{Akg=y{i`xe(fC)=kFO!C3VUZH+2gmsCdGZQJ}8z@O6dzJ&amf zpc|xbb(mA~^BIj|q5B#@ZoDYTaGAx0X6fI#LA$?mgRMBKG#QV$Se13VzCMN!C9m2T zA?J*CT6*FBboo)a_Vnoecku&4NtczHB{*&`a1MVfRio?ch@ce&Geo)kylo1K2EUm7 z#G^G64E_V|&~^uW0H8`}ClC>Z?WxR^Cykg{4utFAH08= z8k7Cn1E=V-jOGRAe>`>JPA_zof?7M1@eQnR0rv zT;$739uXvdx=G>m9tm<&n+%n4I%`p0Y1h)qmOZ-px1jL<ZWG}98 z=WKNED}HD-pSL*bdX6#Bw58P`FosV2JNAP76?-N8{7B~kMAFQr+Xq8Xxt4s@A~%QU z=bDl0vGkgbb%ok2vPI!FCr2ef%(2P<6=8q^W)Ms5>d5K&Ya5wy2II5Poqs6ZJwbc# zCyp5MjAS=|1TX%-HemHZQq2ZLt4r?&U!o0vqGHZ8Z-hQs9{>SuZOCL;s z3V!%%RTrPdvbaITnuCSr0R!{D6>t>V3#q7Rpt%f=leF1ZL+Ft&QFGXiJq+LZ{k`}m zFUoad!!I}Fb)8C{<-Yzi_`mkRB(RUI{@%wWK^i<5_ZG$o8Wd~7DZ)8D{Hclh=6Y9x zj|o+m2{YpxBZ4f9WQVMPG%J*CwK>N=2WoNAic5PR!O*KLsYg0r;8<-!)Yxx2!M|=^ z^q&JQcQnKj?K2jUU|H)d#aQ*FkhT7U%Go_-Czp1QM?2>iSy`&5$Uz-+oVR95E1BCL z#H$V-wmm?9tOD#F@4vG9gVoZ2wS`*^UG<4MKq%!}7giwKt z+x&C0UD>@HQ(t0K2nD^jW6+%lje=^=-eH-w`adMRX-5AFv^@Rz`H9Txe3-9(dhbqtl-Wo3&mQL+Dc>KWyy4}!7p-Btmfu*e9To-t5{hQa| zB0yysPG=zViUIi6WFwp32>B46kH$^qS-Dc`Jp0>fSl&;y;BKEfw(?%Ve4(m3%Gij?9ks|)Zqmy(j|_oj&hdtVea^WrMDNN#v#b*`xj z%E$vg&wA{Rq@_N+;gu>vY$#kWe8wZ{qb(c$Ju>C_+HS>mtjzaxfzjS{37To88*qz{ zzGB6p^>cOQO1YZl9!%4FBZBufjZ$I#+z5AwIA;L;(p}I+M0LQ*h-b!#W%y%!CjMv0)X!59_S5uFO}3F0Ki+pEx9W9I zBtT+Q>?z!4Tw`XO>D#YY{>b{k!Rfnk45}PBsKLyE4yozLW25HtKTrhloxl@ZA9{Y@U8^0XPOG^ZcryR2edr(eQqluSc zQffygGL8=Ou=;1$rOMeV{y4|;9VkWAyS1FSysDy+&`wHZo*m|0ALemrPuzd)y)w!w zs~?*^e0xsp8%~d#E@^;Zj`sim`2Ume|I89zX=gAzdW1}_C?low-&@y*x2YeX@yzE! zU+kBrO}OZ$uqZ(?^l#S(ak2|Co-Y98J)`UV+=ELrn&bHGObOa}F1C<*#CUuIArbvG zYXRA@CBEj%$ipCH!><&D^jyui!t=ZF4oM0ndlX$7{XY_nbz?VD{NJFr_^=I%ymC-H zEW)uwSEgcn+=945qGnk)#^50m0#;Ncvnw|0J`J==|L~REfq66KDQ&`qMn5wfQuq~l z@fcy3O>b|6C9%FN%{s-m3ZDq>Sh0A9hd1%3*U;0YzSFB^nafP`u)MaVbok1RuWYv9 z`s>FhuTK2Ng#`sGVjG(3PnmP2hhh^GU)ke`<)C`5)yra=pM6zg1>))09tWYt1Idwj#4}Hbt(`A#fM)h4{{q6`>Hd`>rlk7;d7f*Cco(w zr^2LsSSmF%5BXTJ0!?HHeO_GkC2FDNK)R&r7g04osj?AiIdz%%ACXt&VZwZVIEd`@ zHuUTV%Oqh-{Y-uMsLCJo5%_e)?(QeTtq5S}w5sCAA1w#cB<)H@0ND__J~urrgC4y$ z%&$WSVv363BnXjewg6AHzqH811XySyMBs_>W?GpOI}HtQp_x?6>bFIftjBAaT{a#Y-TC=LZbZsXbjjswh0h+Qi<>p%P(eN84ScA2 zJxNX#-Ix}Jm`x*%^rEy|aHclb{2rQ2TJh>SfB$NcoHhySNVa&Rpgts7jMX3U;3Vhp znPOyEc&CS|za@`N3cOV*Dc1WtM83}x5!VezC5Ic z%S1`h?u;Jw9(ncnfW18sVS40qu?;Ilk8GtY=9cThbHcC42vqOz%$!px#t`na-%qMX zj6~K$rQem;HdM~{wNVJ(v4WStzL#7HTz>c>cqdM)%*7YA*p5b%9*8ln zPb0P-JsO*ZwfHx;Lh-|hAY(D8JdXComLOJ^uyW=%mc(Dce;^^S8P4huA1qe;IQ=kb z4$~z=5Ezrr#=+_)Uj6c^>a?zkooZwprEod<5B1Cjh6C|UYnE+p8Bl#5-+epO!4pIIw(vz%uje}zT{$ly!>`assHqAc zG8%T~m#gmv%P&LxYt${fD__Ofgw4ePeWZLiyBlg?GgDv?>doRh361Z{;Y9Tn@6%e) zXb8=%qyOwBK3*_6B`1RA(%Rcf4oo1z)GzMo)Y|xHm0` zLD66BJgcISAA$E#% zFSnZk|Y+yv*FEjXoSOA`L^kchlGe zwgNXaUY^mXbxQrc{IXMfk>_L4Vf^jD{E(t~S1BB-CoL>GtH-lz8Ym(Z->L_!oz_kE z?^KYAts7SNxKPJ-Q=RgLaJDhmoV0x@C;Dhnj&?UAkMjFKbnVUgbXX3XK>k-OgolOt e55Kw$r#M89WmQrR2VEZlKZ>$yGPTlXq5lmU8D1d( literal 0 HcmV?d00001 diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 8d9bc09..98b3867 100644 --- a/README.md +++ b/README.md @@ -1 +1,130 @@ -# FusionSent \ No newline at end of file +# FusionSent: A Fusion-Based Multi-Task Sentence Embedding Model + +Welcome to the FusionSent repository. FusionSent is an efficient few-shot learning model designed for multi-label classification of scientific documents with many classes. + +![Training Process of FusionSent](./FusionSent_visualization.png) + +**Figure 1**: The training process of FusionSent comprises three steps: + +1. Fine-tune two different sentence embedding models from the same Pre-trained Language Model (PLM), with parameters θ₁, θ₂ respectively. + - θ₁ is fine-tuned on pairs of training sentences using cosine similarity loss, and θ₂ is fine-tuned on pairs of training sentences and their corresponding label texts, using contrastive loss. + - Label texts can consist of simple label/class names or more extensive texts that semantically describe the meaning of a label/class. +2. Merge parameter sets θ₁, θ₂ into θ₃ using Spherical Linear Interpolation (SLERP). +3. Freeze θ₃ to embed the training sentences, which are then used as input features to train a classification head. + +By fine-tuning sentence embedding models using contrastive learning, FusionSent achieves high performance even with limited labeled data. The model initially leverages two distinct sub-models: one, using regular contrastive learning with item pairs (['setfit'](https://github.com/huggingface/setfit)), and another using label embeddings with class-description pairs ('label_embedding'). These two models are then fused, via (spherical) linear intterpolation, to create the robost FusionSent model that excels in diverse classification tasks. For detailed insights into the model and its performance, please refer to our [published paper](#). + +## Overview + +`FusionSent` is integrated with the [Hugging Face Hub](https://huggingface.co/) and provides two main classes: + +- **FusionSentModel**: This class encapsulates the dual fine-tuning process of the two sentence embedding models ('setfit, and 'label_embedding') and their fusion into a single model ('fusionsent'). It is the core model class for embedding sentences and performing classification tasks. +- **FusionTrainer**: Responsible for loading, cleaning, and preparing datasets for training and evaluation. + +## Installation + +To install the `fusionSent` package, use pip: + +```bash +pip install fusionsent +``` + +## Usage Example + +```python +from fusionsent.training_args import TrainingArguments +from fusionsent.modeling import FusionSentModel +from fusionsent.trainer import Trainer +from datasets import Dataset + +# Example dataset objects with sentences belonging to classes: ["Computer Science", "Physics", "Philosophy"] +train_dataset = Dataset.from_dict({ + "text": [ + "Algorithms and data structures form the foundation of computer science.", + "Quantum mechanics explores the behavior of particles at subatomic scales.", + "The study of ethics is central to philosophical inquiry." + ], + "label": [ + [1, 0, 0], # Computer Science + [0, 1, 0], # Physics + [0, 0, 1] # Philosophy + ], + "label_description": [ + ["Computer Science"], + ["Physics"], + ["Philosophy"] + ] +}) + +eval_dataset = Dataset.from_dict({ + "text": [ + "Artificial intelligence is transforming the landscape of technology.", + "General relativity revolutionized our understanding of gravity.", + "Epistemology questions the nature and limits of human knowledge." + ], + "label": [ + [1, 0, 0], # Computer Science + [0, 1, 0], # Physics + [0, 0, 1] # Philosophy + ], + "label_description": [ + ["Computer Science"], + ["Physics"], + ["Philosophy"] + ] +}) + +# Load the model. +model_id = "malteos/scincl" +model = FusionSentModel._from_pretrained(model_id=model_id) + +# Set training arguments. +training_args = TrainingArguments( + batch_sizes=(16, 1), + num_epochs=(1, 3), + sampling_strategies="undersampling" +) + +# Initialize trainer. +trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset +) + +# Train the model. +trainer.train() + +# Evaluate the model. +eval_scores = trainer.evaluate( + x_eval=eval_dataset["text"], + y_eval=eval_dataset["label"] +) + +# Perform inference. +texts = [ + "Computational complexity helps us understand the efficiency of algorithms.", + "Thermodynamics studies the relationships between heat, work, and energy.", + "Existentialism explores the freedom and responsibility of individual existence." +] +predictions = model.predict(texts) +print(predictions) +``` + +For a more elaborate example, please refer to the [Jupyter notebook of a Description-Embedding Experiment](./Evaluate_Description-Embedding_Body.ipynb). + +## Citation + +If you use FusionSent in your research, please cite the following paper: + +```bibtex +@article{..., + title={...}, + author={...}, + journal={...}, + year={...} +} +``` + +For additional details and advanced configurations, please refer to the original paper linked at the beginning of this document. \ No newline at end of file diff --git a/fusionsent/__init__.py b/fusionsent/__init__.py new file mode 100644 index 0000000..5a7aafc --- /dev/null +++ b/fusionsent/__init__.py @@ -0,0 +1,4 @@ +from ._version import __version__ +from .modeling import FusionSentModel +from .trainer import Trainer +from .training_args import TrainingArguments \ No newline at end of file diff --git a/fusionsent/merging_methods.py b/fusionsent/merging_methods.py new file mode 100644 index 0000000..905e6e3 --- /dev/null +++ b/fusionsent/merging_methods.py @@ -0,0 +1,138 @@ +# This module provides different functionalities for merging two sets of model parameters. + +from typing import Union +import numpy as np +import torch + +def merge_models( + model_state_dict0, + model_state_dict1, + merging_method: str='slerp', + t: Union[float, np.ndarray]=0.5, + DOT_THRESHOLD: float = 0.9995, + eps: float = 1e-8 + ): + """ + Merges two model state dictionaries using a specified merging method. + + Args: + model_state_dict0: State dictionary of the first model. + model_state_dict1: State dictionary of the second model. + merging_method (str): Method to be used for merging (either 'slerp' [default], or 'lerp'). + t (Union[float, np.ndarray]): Interpolation factor, can be a float or ndarray. + DOT_THRESHOLD (float): Threshold to consider vectors as collinear (used only if merging_method = 'slerp'). + eps (float): Small value to prevent division by zero (used only if merging_method = 'slerp'). + + Returns: + fused_parameter_dict (dict): Dictionary containing the merged parameters. + """ + fused_parameter_dict = {} + if merging_method == 'slerp': + for key in model_state_dict1: + fused_parameter_dict[key] = _slerp(t=t, v0=model_state_dict0[key], v1=model_state_dict1[key], DOT_THRESHOLD=DOT_THRESHOLD, eps=eps) + elif merging_method == 'lerp': + for key in model_state_dict1: + fused_parameter_dict[key] = _lerp(t=t, v0=model_state_dict0[key], v1=model_state_dict1[key]) + else: + raise ValueError(f"'merging_method' has unsupported value '{merging_method}'. Choose either 'slerp' or 'lerp'.") + + return fused_parameter_dict + +def _lerp( + t: float, + v0: Union[np.ndarray, torch.Tensor], + v1: Union[np.ndarray, torch.Tensor] +) -> Union[np.ndarray, torch.Tensor]: + """ + Traditional linear interpolation of model parameters as simple weighted average. + + From: https://github.com/cg123/mergekit#linear + Args: + t (float/np.ndarray): Float value between 0.0 and 1.0 as interpolation or weighting factor. At t=0 will return v0, at t=1 will return v1. + v0 (np.ndarray): Starting vector + v1 (np.ndarray): Final vector + DOT_THRESHOLD (float): Threshold for considering the two vectors as colinear. Not recommended to alter this. + Returns: + v2 (np.ndarray or torch.Tensor, depending on the input vectors): Interpolation vector between v0 and v1 + """ + return (1 - t) * v0 + t * v1 + +def _slerp( + t: Union[float, np.ndarray], + v0: Union[np.ndarray, torch.Tensor], + v1: Union[np.ndarray, torch.Tensor], + DOT_THRESHOLD: float = 0.9995, + eps: float = 1e-8, +) -> Union[np.ndarray, torch.Tensor]: + """ + Spherical Linear Interpolation (SLERP) is a method used to smoothly interpolate between two vectors (i.e. model parameters). It maintains a constant rate of change and preserves the geometric properties of the spherical space in which the vectors reside. + + SLERP is implemented using the following steps: + + 1. Normalize the input vectors to unit length, ensuring they represent directions rather than magnitudes + 2. Calculate the angle between these vectors using their dot product. + 3. If the vectors are nearly collinear, it defaults to linear interpolation for efficiency. Otherwise, SLERP computing scale factors based on the interpolation factor t (t=0 = 100% of the first vector, t=1 = 100% of model 2) and the angle between the vectors. + 4. These factors are used to weigh the original vectors, which are then summed to obtain the interpolated vector. + + There are several reasons to prefer SLERP over a traditional linear interpolation. For example, in high-dimensional spaces, linear interpolation can lead to a decrease in the magnitude of the interpolated vector (i.e., it reduces the scale of weights). Moreover, the change in direction of the weights often represents more meaningful information (like feature learning and representation) than the magnitude of change. + + From: https://github.com/cg123/mergekit#slerp + Args: + t (float/np.ndarray): Float value between 0.0 and 1.0 as interpolation or weighting factor. At t=0 will return v0, at t=1 will return v1. + v0 (np.ndarray): Starting vector + v1 (np.ndarray): Final vector + DOT_THRESHOLD (float): Threshold for considering the two vectors as colinear. Not recommended to alter this. + Returns: + v2 (np.ndarray or torch.Tensor, depending on the input vectors): Interpolation vector between v0 and v1 + """ + is_torch = False + if not isinstance(v0, np.ndarray): + is_torch = True + v0 = v0.detach().cpu().float().numpy() + if not isinstance(v1, np.ndarray): + is_torch = True + v1 = v1.detach().cpu().float().numpy() + + # Copy the vectors to reuse them later + v0_copy = np.copy(v0) + v1_copy = np.copy(v1) + + # Normalize the vectors to get the directions and angles + v0 = _normalize(v0, eps) + v1 = _normalize(v1, eps) + + # Dot product with the normalized vectors (can't use np.dot in W) + dot = float(np.sum(v0 * v1)) + + # If absolute value of dot product is almost 1, vectors are ~colinear, so use lerp + if np.abs(dot) > DOT_THRESHOLD: + res = _lerp(t, v0_copy, v1_copy) + return _maybe_torch(res, is_torch) + + # Calculate initial angle between v0 and v1 + theta_0 = np.arccos(dot) + sin_theta_0 = np.sin(theta_0) + + # Angle at timestep t + theta_t = theta_0 * t + sin_theta_t = np.sin(theta_t) + + # Finish the slerp algorithm + s0 = np.sin(theta_0 - theta_t) / sin_theta_0 + s1 = sin_theta_t / sin_theta_0 + res = s0 * v0_copy + s1 * v1_copy + + return _maybe_torch(res, is_torch) + +def _maybe_torch(v: np.ndarray, is_torch: bool) -> Union[np.ndarray, torch.Tensor]: + if not isinstance(v, np.ndarray): + v = np.array(v) + if is_torch: + return torch.from_numpy(v) + return v + +def _normalize(v: np.ndarray, eps: float) -> np.ndarray: + norm_v = np.linalg.norm(v) + if norm_v > eps: + v = v / norm_v + return v \ No newline at end of file diff --git a/fusionsent/modeling.py b/fusionsent/modeling.py new file mode 100644 index 0000000..4bda3c5 --- /dev/null +++ b/fusionsent/modeling.py @@ -0,0 +1,274 @@ +# This module contains the actual FusionSent model, with additional sub-models for different prediction strategies and classification heads. + +from typing import Callable, Dict, List, Optional, Union +import warnings +from packaging.version import Version, parse +from dataclasses import dataclass, field +import copy +import numpy as np +import torch +from huggingface_hub.utils import validate_hf_hub_args +from huggingface_hub import PyTorchModelHubMixin +from sentence_transformers import SentenceTransformer, models +from sentence_transformers import __version__ as sentence_transformers_version +from sklearn.linear_model import LogisticRegression +from sklearn.multiclass import OneVsRestClassifier +from sklearn.multioutput import ClassifierChain, MultiOutputClassifier + +class FusionModelBody: + """ + This class encapsulates the dual encoder bodies of all variants ('setfit', 'label_embedding', 'fusion') for the FusionSent model. + + Attributes: + setfit_model_body (SentenceTransformer): A copy of the SentenceTransformer model for setfit. + label_embedding_model_body (SentenceTransformer): A copy of the SentenceTransformer model for label embedding. + fusion_model_body (SentenceTransformer): A copy of the SentenceTransformer model for fusion. + """ + + def __init__(self, model: SentenceTransformer): + self.setfit_model_body = copy.deepcopy(model) + self.label_embedding_model_body = copy.deepcopy(model) + self.fusion_model_body = copy.deepcopy(model) + + +class FusionModelHead: + """ + This class to encapsulate the classification heads for all variants of encoder bodies ('setfit', 'label_embedding', 'fusion') of the FusionSent model. + + Attributes: + setfit_model_head (Callable): A copy of the classification head for setfit. + label_embedding_model_head (Callable): A copy of the classification head for label embedding. + fusion_model_head (Callable): A copy of the classification head for fusion. + """ + + def __init__(self, model: Callable): + self.setfit_model_head = copy.deepcopy(model) + self.label_embedding_model_head = copy.deepcopy(model) + self.fusion_model_head = copy.deepcopy(model) + +@dataclass +class FusionSentModel(PyTorchModelHubMixin): + """ + This data class for the FusionSent model includes model bodies and heads for different prediction strategies. + + The FusionSentModel is designed to encapsulate three separate sub-models, each with a pretrained language model at its core, and a linear classification head on top: + - `setfit`: An encoder (body) intended to be trained contrastivley, with regular (item, item)-pairs (adapted from https://github.com/huggingface/setfit). + - `label_embedding`: An encoder (body) intended to be trained with pairs of (class-descriptions, item)-pairs. + - `fusion`: An encoder (body) that is the result of an (spherical) linear interpolation between the parameters of both the `setfit` and `label_embedding` sub-models. + + Each sub-model makes up a unique 'prediction strategy'. I.e., each sub-model (encoder + classification head) can be selected at runtime to be used. + Only one sub-model can be selected at any given time. + + Attributes: + model_body (FusionModelBody): An instance of FusionModelBody containing the model bodies ('fusion', 'label_embedding', 'setfit'). + model_head (FusionModelHead): An instance of FusionModelHead containing the model heads ('fusion', 'label_embedding', 'setfit'). + multi_target_strategy (Optional[str]): The strategy for handling multi-target classification ('one-vs-rest', 'multi-output', or 'classifier-chain'). + prediction_strategy (Optional[str]): The current prediction strategy ('fusion', 'label_embedding', 'setfit'). + sentence_transformers_kwargs (Dict): Additional keyword arguments for SentenceTransformer implementation. + transformers_config (Optional[Dict]): Configuration for the transformer implementation. + """ + + model_body: Optional[FusionModelBody] = None + model_head: Optional[FusionModelHead] = None + multi_target_strategy: Optional[str] = None + prediction_strategy: Optional[str] = None + sentence_transformers_kwargs: Dict = field(default_factory=dict, repr=False) + transformers_config: Optional[Dict] = None + + def get_prediction_strategy(self)->str: + """" + Returns the prediction strategy for the model body. If not `None`, it can be either `fusion`, `label_embedding` or `setfit`. + """ + return self.prediction_strategy + + def set_prediction_strategy(self, prediction_strategy: str)->None: + """ + Sets the prediction strategy of the model body. If not `None`, it can be either `fusion`, `label_embedding` or `setfit`. + Args: + prediction_strategy (`str`): A string representing the prediction strategy of the model. If not `None`, it can be either `fusion`, `label_embedding` or `setfit`. + """ + self.prediction_strategy = prediction_strategy + + def encode(self, texts: List[str], device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))->np.ndarray: + """ + Convert input texts to embeddings using the SentenceTransformer dual encoder body. + + Args: + texts (`List[str]`): A list of texts to encode. + """ + if self.get_prediction_strategy() is None or self.get_prediction_strategy() == "fusion": + # get fusion embeddings + embeddings = self.get_fusion_embeddings(texts, device=device) + elif self.get_prediction_strategy() == "setfit": + # get SetFit embeddings + embeddings = self.get_setfit_embeddings(texts, device=device) + elif self.get_prediction_strategy() == "label_embedding": + # get label embeddings + embeddings = self.get_label_embeddings(texts, device=device) + + return embeddings + + def get_fusion_embeddings(self, texts: List[str], device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))->np.ndarray: + """ + Convert input texts to embeddings using the fusion model body. + + Args: + texts (`List[str]`): A list of texts to encode. + """ + # get embeddings from fusion body + return self.model_body.fusion_model_body.encode(texts, device=device) + + def get_setfit_embeddings(self, texts: List[str], device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))->np.ndarray: + """ + Convert input texts to embeddings using the SetFit model body. + + Args: + texts (`List[str]`): A list of texts to encode. + """ + # get embeddings from SetFit body + return self.model_body.setfit_model_body.encode(texts, device=device) + + def get_label_embeddings(self, texts: List[str], device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))->np.ndarray: + """ + Convert input texts to embeddings using the label embeddings model body. + + Args: + texts (`List[str]`): A list of texts to encode. + """ + # get embeddings from label embeddings body + return self.model_body.label_embedding_model_body.encode(texts, device=device) + + def predict(self, texts: List[str], device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))->np.ndarray: + """ + Predict classes of input texts. + + Args: + texts (`List[str]`): A list of texts for classification. + """ + # encode texts as input features for classification head + features = self.encode(texts, device=device) + + # classify texts + if self.get_prediction_strategy() is None or self.get_prediction_strategy() == "fusion": + # get fusion head embeddings + predictions = self.model_head.fusion_model_head.predict(features) + elif self.get_prediction_strategy() == "setfit": + # get SetFit head predictions + predictions = self.model_head.setfit_model_head.predict(features) + elif self.get_prediction_strategy() == "label_embedding": + # get label embedding head predictions + predictions = self.model_head.label_embedding_model_head.predict(features) + + return predictions + + @classmethod + @validate_hf_hub_args + def _from_pretrained( + cls, + model_id: str, + revision: Optional[str] = None, + cache_dir: Optional[str] = None, + force_download: Optional[bool] = None, + proxies: Optional[Dict] = None, + resume_download: Optional[bool] = None, + local_files_only: Optional[bool] = None, + token: Optional[Union[bool, str]] = None, + multi_target_strategy: Optional[str] = None, + prediction_strategy: Optional[str] = None, + device: Optional[Union[torch.device, str]] = None, + trust_remote_code: bool = False, + **model_kwargs, + ) -> 'FusionSentModel': + """ + Internal method to load a pretrained FusionSent model from the Hugging Face Hub. + + This method is called by the Hugging Face Hub framework and should not be modified by the user. + It initializes the FusionSent model with pretrained components and configuration from the Hugging Face Hub. + + Args: + model_id (str): The ID of the model on the Hugging Face Hub. + cache_dir (Optional[str], optional): Directory to cache the model. + token (Optional[Union[bool, str]], optional): Token for accessing the Hub. + multi_target_strategy (Optional[str], optional): Strategy for multi-target classification ('one-vs-rest', 'multi-output', or 'classifier-chain'). + prediction_strategy (Optional[str], optional): The prediction strategy to use. + device (Optional[Union[torch.device, str]], optional): The device to use for the model. + trust_remote_code (bool, optional): Whether to trust custom code from the model repo. + **model_kwargs: Additional keyword arguments for the model. + + Returns: + FusionSentModel: The loaded FusionSent model. + """ + # Warn if any unused arguments are provided. -- Disabled this, because it will always be passed by parent class. + # unused_args = [ + # ('revision', revision), + # ('force_download', force_download), + # ('proxies', proxies), + # ('resume_download', resume_download), + # ('local_files_only', local_files_only) + # ] + # for arg_name, arg_value in unused_args: + # if arg_value is not None: + # warnings.warn(f"The '{arg_name}' argument is not used by 'FusionSentModel', and will have no effect.", UserWarning, stacklevel=2) + + #Setup additional arguments for sentence-transformer. + sentence_transformers_kwargs = { + "cache_folder": cache_dir, + "use_auth_token": token, + "device": device, + "trust_remote_code": trust_remote_code, + } + if parse(sentence_transformers_version) >= Version("2.3.0"): + sentence_transformers_kwargs = { + "cache_folder": cache_dir, + "token": token, + "device": device, + "trust_remote_code": trust_remote_code, + } + else: + if trust_remote_code: + raise ValueError( + "The `trust_remote_code` argument is only supported for `sentence-transformers` >= 2.3.0." + ) + sentence_transformers_kwargs = { + "cache_folder": cache_dir, + "use_auth_token": token, + "device": device, + } + + #Load model components. + word_embedding_model = models.Transformer(model_id) + pooling_model = models.Pooling(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), pooling_mode='mean') + sentence_transformer = SentenceTransformer(modules=[word_embedding_model, pooling_model], **sentence_transformers_kwargs) + model_body = FusionModelBody(sentence_transformer) + + #Set device. + if parse(sentence_transformers_version) >= Version("2.3.0"): + device = sentence_transformer.device + else: + device = sentence_transformer._target_device + + #Configure classification-heads. + head_params = model_kwargs.pop("head_params", {}) + clf = LogisticRegression(**head_params) + if multi_target_strategy is not None: + if multi_target_strategy == "one-vs-rest": + multilabel_classifier = OneVsRestClassifier(clf) + elif multi_target_strategy == "multi-output": + multilabel_classifier = MultiOutputClassifier(clf) + elif multi_target_strategy == "classifier-chain": + multilabel_classifier = ClassifierChain(clf) + else: + raise ValueError(f"multi_target_strategy {multi_target_strategy} is not supported.") + + model_head = FusionModelHead(multilabel_classifier) + else: + model_head = FusionModelHead(clf) + + return cls( + model_body=model_body, + model_head=model_head, + multi_target_strategy=multi_target_strategy, + prediction_strategy=prediction_strategy, + sentence_transformers_kwargs=sentence_transformers_kwargs, + **model_kwargs, + ) \ No newline at end of file diff --git a/fusionsent/trainer.py b/fusionsent/trainer.py new file mode 100644 index 0000000..e4af74e --- /dev/null +++ b/fusionsent/trainer.py @@ -0,0 +1,676 @@ +#This module contains the Trainer class, responsible for managing the training and evaluation process for FusionSent. + +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +import warnings +import logging +import math +import random +import numpy as np +import torch +from torch import nn +from torch.utils.data import DataLoader +from transformers.trainer_utils import set_seed +from datasets import Dataset +from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score +from setfit.trainer import ColumnMappingMixin +from setfit.sampler import ContrastiveDataset +from sentence_transformers.datasets import SentenceLabelDataset +from sentence_transformers import InputExample, losses +from setfit.losses import SupConLoss +import gc +import json + +from .training_args import TrainingArguments +from .modeling import FusionSentModel +from .merging_methods import merge_models + +logging.basicConfig() +logger = logging.getLogger('FusionSent') +logger.setLevel(logging.INFO) + +class Trainer(ColumnMappingMixin): + """ + The Trainer class is responsible for managing the training and evaluation process for the FusionSent model. + + It facilitates the training of two distinct sub-models ('setfit' and 'label_embedding') and merges their parameters + into the unified FusionSent model. This class handles the preparation of datasets, the configuration of training parameters, + and the execution of training and evaluation routines. + """ + + DEFAULT_EVAL_METRICS = {'metric_names': ['f1', 'precision', 'recall', 'accuracy'], 'metric_args': {'average': 'micro'}} + + def __init__( + self, + model: FusionSentModel = None, + args: Optional[TrainingArguments] = TrainingArguments(), + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Dataset] = None, + eval_metrics: Optional[Dict[List,Dict]] = DEFAULT_EVAL_METRICS, + column_mapping: Optional[Dict[str, str]] = None, + ) -> None: + """ + Initializes the Trainer class with the provided FusionSent model, training arguments, datasets, evaluation metrics, and column mapping. + + Args: + model (FusionSentModel): The FusionSent model to be trained. If not provided, raises a RuntimeError. + args (Optional[TrainingArguments]): Configuration for training parameters. If not provided, the default setting of 'TrainingArguments' will be used. + train_dataset (Optional[Dataset]): The dataset used for training. If provided, applies column mapping if necessary. + eval_dataset (Optional[Dataset]): The dataset used for evaluation. If provided, applies column mapping if necessary. + eval_metrics (Optional[Dict[List, Dict]]): A dictionary specifying the evaluation metrics and their arguments. Defaults to evaluating f1, precision, recall, and accuracy with 'micro' averaging. + If not provided or ill-formatted, default metrics will be used. Example format: + { + 'metric_names': ['f1', 'precision', 'recall', 'accuracy'], + 'metric_args': {'average': 'micro'} + } + column_mapping (Optional[Dict[str, str]]): A mapping of dataset columns to the expected input columns. + + Raises: + ValueError: If the TrainingArguments are ill-formatted. + RuntimeError: If the `model` parameter is not provided, or not of type 'FusionSentModel'. + """ + + #Verify that a model has been given. + if model is None: + raise ValueError("`Trainer` requires a `model` argument.") + if not isinstance(model, FusionSentModel): + raise ValueError("`Trainer` requires a `model` argument of type 'FusionSentModel'.") + set_seed(12) # Seed must be set before instantiating the model when using model_init. + self.model = model + + #Initialize 'TrainingArguments' from given input (or as default), and validate them. + if args is not None and not isinstance(args, TrainingArguments): + raise ValueError("`args` must be a `TrainingArguments` instance imported from `FusionSent`.") + self.args = args + self.args._validate() + + #Assign and validate evaluation metrics, if given. + self.eval_metrics: Dict[List,Dict] = eval_metrics + self._validate_eval_metrics() + + #Apply column mapping to 'train_dataset' if necessary. + self.column_mapping = column_mapping + if train_dataset: + self._validate_column_mapping(train_dataset) + if self.column_mapping is not None: + logger.info("Applying column mapping to the training dataset") + train_dataset = self._apply_column_mapping(train_dataset, self.column_mapping) + self.train_dataset = train_dataset + + #Apply column mapping to 'eval_dataset' if necessary. + if eval_dataset: + self._validate_column_mapping(eval_dataset) + if self.column_mapping is not None: + logger.info("Applying column mapping to the evaluation dataset") + eval_dataset = self._apply_column_mapping(eval_dataset, self.column_mapping) + self.eval_dataset = eval_dataset + + + def _dataset_to_parameters(self, dataset: Dataset) -> List[Iterable]: + """ + Converts the provided dataset into a list of parameters required for training. + + Args: + dataset (Dataset): The dataset to be converted. + Expected to contain the keys 'text', 'label_description', and 'label'. + + Returns: + List[Iterable]: A list containing three elements: + - A list of texts from the dataset. + - A list of label descriptions from the dataset. + - A list of labels from the dataset. + """ + return [dataset["text"], dataset["label_description"], dataset["label"]] + + @staticmethod + def _has_any_multilabel(examples: List[InputExample]) -> bool: + """ + Determines if any of the input examples represent a multi-label scenario. + + Args: + examples (List[InputExample]): List of InputExample instances to check. + + Returns: + bool: True if any example has a non-binary label or if any label is a list or array, False otherwise. + """ + for example in examples: + label = example.label + + # Check if label is a list, tuple, or numpy array (multi-label scenario) + if isinstance(label, (list, tuple, np.ndarray)): + return True + + # Check if label is not binary (i.e., not 0 or 1) + if isinstance(label, (int, float)) and label not in {0, 1}: + return True + return False + + def _get_setfit_dataloader( + self, + x: List[str], + y: Union[List[int], List[List[int]]], + args: TrainingArguments, + max_pairs: int = -1 + ) -> Tuple[DataLoader, nn.Module, int]: + """ + Prepares a DataLoader and corresponding loss function for training the 'setfit' sub-model. + + Args: + x (List[str]): A list of input texts. + y (Union[List[int], List[List[int]]]): A list of binary- or multi-class labels corresponding to the input texts. + args (TrainingArguments): The training arguments configuration. + max_pairs (int, optional): Maximum number of pairs for contrastive sampling. Default is -1, which means no limit. + + Returns: + Tuple[DataLoader, nn.Module, int]: A tuple containing: + - DataLoader: The DataLoader for the 'setfit' sub-model. + - nn.Module: The loss function for the 'setfit' sub-model. + - int: The batch size used for the DataLoader. + """ + + # Adapt input data for sentence-transformers. + input_data = [InputExample(texts=[text], label=label) for text, label in zip(x, y)] + + if args.setfit_loss in [ + losses.BatchAllTripletLoss, + losses.BatchHardTripletLoss, + losses.BatchSemiHardTripletLoss, + losses.BatchHardSoftMarginTripletLoss, + SupConLoss, + ]: + data_sampler = SentenceLabelDataset(input_data, samples_per_label=args.setfit_samples_per_label) + batch_size = min(args.setfit_batch_size, len(data_sampler)) + dataloader = DataLoader(data_sampler, batch_size=batch_size, drop_last=True) + + if args.setfit_loss is losses.BatchHardSoftMarginTripletLoss: + loss = args.setfit_loss( + model=self.model.model_body.setfit_model_body, + distance_metric=args.setfit_distance_metric, + ) + elif args.setfit_loss is SupConLoss: + loss = args.setfit_loss(model=self.model.model_body.setfit_model_body) + else: + loss = args.setfit_loss( + model=self.model.model_body.setfit_model_body, + distance_metric=args.setfit_distance_metric, + margin=args.setfit_margin, + ) + else: + data_sampler = ContrastiveDataset( + examples=input_data, + multilabel=Trainer._has_any_multilabel(input_data), + num_iterations=args.num_iterations, + sampling_strategy=args.setfit_sampling_strategy, + max_pairs=max_pairs, + ) + batch_size = min(args.setfit_batch_size, len(data_sampler)) + dataloader = DataLoader(data_sampler, batch_size=batch_size, drop_last=False) + loss = args.setfit_loss(self.model.model_body.setfit_model_body) + + return dataloader, loss, batch_size + + def _get_label_embedding_dataloader( + self, + texts: List[str], + label_descriptions: List[str], + args: TrainingArguments + ) -> Tuple[DataLoader, nn.Module, int]: + """ + Prepares a DataLoader and corresponding loss function for training the 'label_embedding' sub-model. + + Note that remaining TODO's include: + - Adding additional Sentence-Transformers losses + - Reimplementing sampling strategies to support oversampling of negatives and undersampling of positives. + + Args: + texts (List[str]): A list of input texts. + label_descriptions (List[str]): A list of label descriptions corresponding to the input texts. + args (TrainingArguments): The training arguments configuration. + + Returns: + Tuple[DataLoader, nn.Module, int]: A tuple containing: + - DataLoader: The DataLoader for the 'label_embedding' sub-model. + - nn.Module: The loss function for the 'label_embedding' sub-model. + - int: The batch size used for the DataLoader. + """ + + #TODO: add remaining ST losses + #TODO: reimplement sampling strategies to support oversampling of negatives and undersampling of positives + if args.label_embedding_loss is losses.MultipleNegativesRankingLoss: + # create default dataloader with positives only + input_data = [] + for i, text in enumerate(texts): + for label_description in label_descriptions[i]: + input_data.append(InputExample(texts=[text, label_description])) + + elif args.label_embedding_loss is losses.TripletLoss: + if args.label_embedding_sampling_strategy == "oversampling": + # create dataloader for triplet loss with oversampling of positives + input_data = [] + unique_labels = set([x for xs in label_descriptions for x in xs]) + for i, text in enumerate(texts): + negative_labels = unique_labels - set(label_descriptions[i]) + # oversample positive label descriptions + positive_label_description_samples = random.choices(label_descriptions[i], k=len(negative_labels)) + for x in range(len(negative_labels)): + input_data.append(InputExample(texts=[text, positive_label_description_samples[x], list(negative_labels)[x]])) + elif args.label_embedding_sampling_strategy == "undersampling": + # create dataloader for triplet loss with undersampling of negatives + input_data = [] + unique_labels = set([x for xs in label_descriptions for x in xs]) + for i, text in enumerate(texts): + negative_labels = unique_labels - set(label_descriptions[i]) + # undersample negative label description + negative_label_description_samples = random.sample(list(negative_labels), len(label_descriptions[i])) + for x in range(len(label_descriptions[i])): + input_data.append( + InputExample(texts=[text, label_descriptions[i][x], negative_label_description_samples[x]])) + + + elif args.label_embedding_loss in [losses.ContrastiveLoss,losses.CosineSimilarityLoss,losses.OnlineContrastiveLoss]: + if args.label_embedding_sampling_strategy == "oversampling": + # create dataloader for contrastive learning with oversampling of positives + input_data = [] + unique_labels = set([x for xs in label_descriptions for x in xs]) + for i, text in enumerate(texts): + negative_labels = unique_labels - set(label_descriptions[i]) + # add positive label descriptions for anchor text + for positive_label_description in label_descriptions[i]: + input_data.append(InputExample(texts=[text, positive_label_description], label=1.0)) + + # add negative label descriptions for anchor text + for negative_label_description in list(negative_labels): + input_data.append(InputExample(texts=[text, negative_label_description], label=0.0)) + + # oversample positive label descriptions + positive_label_description_samples = random.choices(label_descriptions[i], k=len(negative_labels)-1) + for positive_label_description in positive_label_description_samples: + input_data.append(InputExample(texts=[text, positive_label_description], label=1.0)) + + elif args.label_embedding_sampling_strategy == "undersampling": + # create dataloader for contrastive learning with undersampling of negatives + input_data = [] + unique_labels = set([x for xs in label_descriptions for x in xs]) + for i, text in enumerate(texts): + negative_labels = unique_labels - set(label_descriptions[i]) + # add positive label descriptions for anchor text + for positive_label_description in label_descriptions[i]: + input_data.append(InputExample(texts=[text, positive_label_description], label=1.0)) + + # add negative label descriptions for anchor text + negative_label_description_samples = random.sample(list(negative_labels), + len(label_descriptions[i])) + for negative_label_description in negative_label_description_samples: + input_data.append(InputExample(texts=[text, negative_label_description], label=0.0)) + + data_sampler = SentenceLabelDataset(input_data, samples_per_label=args.label_embedding_samples_per_label) + batch_size = min(args.label_embedding_batch_size, len(data_sampler)) + dataloader = DataLoader(input_data, shuffle=True, batch_size=batch_size) + loss = args.label_embedding_loss(self.model.model_body.label_embedding_model_body) + + return dataloader, loss, batch_size + + def _validate_eval_metrics( + self, + other: Optional[Dict[List,Dict]] = None + ): + """ + Validates the local evaluation metrics to ensure they contain at least one of the valid evaluation arguments: + `f1`, `precision`, `recall`, `accuracy`. + + Args: + other (Optional[Dict[List,Dict]]): An alternative set of evaluation metrics to validate. + + Raises: + ValueError: If the evaluation metrics do not contain at least one of the valid evaluation arguments. + """ + valid_metrics = set(['f1', 'precision', 'recall', 'accuracy']) + if other is not None and 'metric_names' in other.keys(): + provided_metrics = set(other['metric_names']) + else: + provided_metrics = set(self.eval_metrics.get('metric_names', [])) + + if not provided_metrics.intersection(valid_metrics): + raise ValueError( + "'eval_metrics' did not contain at least one of the following valid values under key 'metric_names': `f1`, `precision`, `recall`, `accuracy`." + ) + + def _has_evaluation_setting(self) -> bool: + """ + Returns a boolean indicating wether this trainer instance could perform an evaluation (has been given an evaluation dataset and metrics). + """ + return self.eval_dataset and self.eval_metrics + + def train( + self, + args: Optional[TrainingArguments] = None, + trial: Optional[Union["optuna.Trial", Dict[str, Any]]] = None, + **kwargs, + ) -> None: + """ + This function represents the main training entry point. + + Note that evaluation will be perfomed automatically, iff a dectionary of evaluation metrics and an evaluation datatset has been provided at initialization of this instance. + Additionally, evaluation can always be carried out manually via the 'evaluate' method. + + Args: + args (Optional[TrainingArguments]): Training arguments to temporarily override the default training arguments for this call. + trial (Optional[Union["optuna.Trial", Dict[str, Any]]]): The trial run or hyperparameter dictionary for hyperparameter search. + + Raises: + ValueError: If `train_dataset` is not provided, or 'TrainingArguments' is not None and ill-formatted. + """ + if len(kwargs): + warnings.warn( + f"`{self.__class__.__name__}.train` does not accept keyword arguments anymore. " + f"Please provide training arguments via a `TrainingArguments` instance to the `{self.__class__.__name__}` " + f"initialisation or the `{self.__class__.__name__}.train` method.", + DeprecationWarning, + stacklevel=2, + ) + + #Assign and validate training arguments. + args = args or self.args or TrainingArguments() + self.args._validate() + + #Check for existing training dataset. + if self.train_dataset is None: + raise ValueError( + f"Training requires a `train_dataset` given to the `{self.__class__.__name__}` initialization." + ) + + #Initialize trainer parameters and model for hp-search, if applicable. + if trial: + self._hp_search_setup(trial) + + #Construct train parameters + train_parameters = self._dataset_to_parameters(self.train_dataset) + + #Train model body + self._train_sentence_transformers_body(*train_parameters, args=args) + + #If evaluation dataset and metrics are given... + if self._has_evaluation_setting(): + #...train the model head, ... + self._train_classifier_head( + x_train_texts=train_parameters[0], + y_train=train_parameters[2], + x_eval=self.eval_dataset['text'], + y_eval=self.eval_dataset['label'], + eval_metrics=self.eval_metrics, + args=args + ) + + #...and evaluate the whole model. + logger.info(" ***** Running evaluation on `eval_dataset` *****") + self.eval_scores = self.evaluate( + x_eval=self.eval_dataset['text'], + y_eval=self.eval_dataset['label'], + eval_metrics=self.eval_metrics + ) + return self.eval_scores + else: + #Else, train only the model head, without evaluation. + self._train_classifier_head( + x_train_texts=train_parameters[0], + y_train=train_parameters[2], + args=args + ) + return None + + def _train_sentence_transformers_body( + self, + x_train_texts: List[str], + x_train_label_descriptions: List[List[str]], + y_train: Optional[Union[List[int], List[List[int]]]] = None, + args: Optional[TrainingArguments] = None + ) -> None: + """ + Trains both dual encoder `SentenceTransformer` bodies of the sub-models ('setfit' and 'label_embeding') for the embedding training phase. + After training, it merges the parameters of both sub-models into the final encoder body of FusionSent. + + Args: + x_train_texts (List[str]): A list of training texts. + x_train_label_descriptions (List[List[str]]): A list of lists including label descriptions for each positive label per training text. + y_train (Union[List[int], List[List[int]]], optional): A list of labels corresponding to the training texts. + args (TrainingArguments, optional): Temporarily change the training arguments for this training call. If not provided, default training arguments will be used. + + Raises: + ValueError: If 'args' is not None and ill-formatted. + """ + args = args or self.args or TrainingArguments() + args._validate() + + logger.info(" ***** Preparing training dataset *****") + + #Construct dataset for SetFit body training + setfit_train_dataloader, setfit_loss_func, setfit_batch_size = self._get_setfit_dataloader( + x=x_train_texts,y=y_train, args=args + ) + + #Construct dataset for label embedding training + label_embedding_train_dataloader, label_embedding_loss_func, label_embedding_batch_size = self._get_label_embedding_dataloader( + texts=x_train_texts, label_descriptions=x_train_label_descriptions, args=args + ) + + #Compute total number of training steps. + setfit_total_train_steps = len(setfit_train_dataloader) * args.setfit_num_epochs + label_embeddings_total_train_steps = len(label_embedding_train_dataloader) * args.label_embedding_num_epochs + + #Log training statistics. + logger.info(" ***** Running sentence transformers body training *****") + logger.info(f" Total number of examples = {len(setfit_train_dataloader.dataset)} + {len(label_embedding_train_dataloader.dataset)}") + logger.info(f" Number of batches = {len(setfit_train_dataloader)} + {len(label_embedding_train_dataloader)}") + logger.info(f" Number of epochs = {args.setfit_num_epochs} + {args.label_embedding_num_epochs}") + logger.info(f" Train batch sizes = {setfit_batch_size} & {label_embedding_batch_size}") + logger.info(f" Total optimization steps = {setfit_total_train_steps} + {label_embeddings_total_train_steps}") + + #Train the setfit body (only if it is intended to be used). + if args.use_setfit_body: + setfit_warmup_steps = math.ceil( + setfit_total_train_steps * args.setfit_warmup_proportion + ) + self.model.model_body.setfit_model_body.fit( + train_objectives=[(setfit_train_dataloader, setfit_loss_func)], + epochs=args.setfit_num_epochs, warmup_steps=setfit_warmup_steps, + show_progress_bar=args.show_progress_bar + ) + setfit_loss_func.to('cpu') + self.model.model_body.setfit_model_body.to('cpu') + gc.collect() + with torch.no_grad(): + torch.cuda.empty_cache() + + #Train the label_embeddings body. + label_embeddings_warmup_steps = math.ceil( + label_embeddings_total_train_steps * args.label_embedding_warmup_proportion + ) + self.model.model_body.label_embedding_model_body.fit( + train_objectives=[(label_embedding_train_dataloader, label_embedding_loss_func)], + epochs=args.label_embedding_num_epochs, + warmup_steps=label_embeddings_warmup_steps, + show_progress_bar=args.show_progress_bar + ) + label_embedding_loss_func.to('cpu') + self.model.model_body.label_embedding_model_body.to('cpu') + gc.collect() + with torch.no_grad(): + torch.cuda.empty_cache() + + #Get parameters of both trained models + setfit_parameter_dict = dict( + self.model.model_body.setfit_model_body._first_module().auto_model.named_parameters() + ) + label_embedding_parameter_dict = dict( + self.model.model_body.label_embedding_model_body._first_module().auto_model.named_parameters() + ) + + #Fuse/merge model parameters with selected algorithm. + t = 0.5 if args.use_setfit_body else 0 + fused_parameter_dict = merge_models( + model_state_dict0=label_embedding_parameter_dict, + model_state_dict1=setfit_parameter_dict, + t=t, + merging_method=args.merging_method + ) + + #Initialize the body of the final FusionSent model with the fused model parameters. + fusion_state_dict = self.model.model_body.fusion_model_body._first_module().auto_model.state_dict() + for key in fusion_state_dict: + fusion_state_dict[key] = fused_parameter_dict[key] + self.model.model_body.fusion_model_body._first_module().auto_model.load_state_dict(fusion_state_dict) + + @staticmethod + def _ensure_single_label_format(labels: Union[List[int], List[List[int]]]): + """ + Helper function to convert a list of labels into single-label format, if neccesary. + """ + if isinstance(labels[0], list): + return np.argmax(labels, axis=1) + return labels + + def _train_classifier_head( + self, + x_train_texts: List[str], + y_train: Union[List[int], List[List[int]]], + x_eval: Optional[List[str]] = None, + y_eval: Optional[List[int]] = None, + eval_metrics: Optional[Dict[List, Dict]] = None, + args: Optional[TrainingArguments] = None, + ) -> None: + """ + Trains a classification head for each candidate model body (`setfit`, `label_embedding`, and their 'fusion'). + If evaluation metrics and dataset are provided, the performance of all final models will be evaluted and the best performing model is set as the default for further use. + + Note: Cross-Validation is yet to be implemented. + + Args: + x_train_texts (List[str]): A list of training texts. + y_train (Union[List[int], List[List[int]]]): A list of labels corresponding to the training texts. + x_eval (Optional[List[str]]): A list of evaluation texts. + y_eval (Optional[List[int]]): A list of labels corresponding to the evaluation texts. + eval_metrics (Optional[Dict[List, Dict]]): A dictionary specifying the evaluation metrics and their respective arguments. + If not provided, evaluation will be omitted. If ill-formatted, default metrics will be used. Example format: + { + 'metric_names': ['f1', 'precision', 'recall', 'accuracy'], + 'metric_args': {'average': 'micro'} + } + args (Optional[TrainingArguments]): Training arguments to temporarily override the default training arguments for this call. + """ + logger.info(" ***** Running classification head training *****") + y_train = Trainer._ensure_single_label_format(y_train) # Necessary for model head trainig. + + #Get embeddings from setfit body and train setfit model head. + self.model.set_prediction_strategy("setfit") + setfit_train_features = self.model.model_body.setfit_model_body.encode(x_train_texts, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) + self.model.model_head.setfit_model_head.fit(setfit_train_features, y_train) + + #Get embeddings from label_embedding body and train label_embedding model head. + self.model.set_prediction_strategy("label_embedding") + label_embedding_train_features = self.model.model_body.label_embedding_model_body.encode(x_train_texts, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) + self.model.model_head.label_embedding_model_head.fit(label_embedding_train_features, y_train) + + #Get embeddings from fusion body and train fusion model head. + self.model.set_prediction_strategy("fusion") + fusion_train_features = self.model.model_body.fusion_model_body.encode(x_train_texts, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) + self.model.model_head.fusion_model_head.fit(fusion_train_features, y_train) + + #Evaluate classifications with different body features and set the best performing one as default. + eval_dict = {} + if x_eval and y_eval and eval_metrics: + + #Use evaluation dataset to choose best performing features. + self.model.set_prediction_strategy("setfit") + setfit_eval_scores = self.evaluate(x_eval=x_eval, y_eval=y_eval, eval_metrics=eval_metrics) + print("SetFit eval scores:", setfit_eval_scores) + eval_dict["SetFit eval scores"] = setfit_eval_scores + self.model.set_prediction_strategy("label_embedding") + label_embedding_eval_scores = self.evaluate(x_eval=x_eval, y_eval=y_eval, eval_metrics=eval_metrics) + print("Label embedding eval scores:", label_embedding_eval_scores) + eval_dict["Label embedding eval scores"] = label_embedding_eval_scores + self.model.set_prediction_strategy("fusion") + fusion_eval_scores = self.evaluate(x_eval=x_eval, y_eval=y_eval, eval_metrics=eval_metrics) + print("Fusion eval scores:", fusion_eval_scores) + eval_dict["Fusion eval scores"] = fusion_eval_scores + + #Save evaluation dictionary, if path was provided. + if args.json_path is not None: + with open(args.json_path + '.json', 'w') as fp: + json.dump(eval_dict, fp) + + #choose best performing model from average of evaluation scores as default mode. + mean_eval_scores = {} + mean_eval_scores['fusion'] = np.mean(list(fusion_eval_scores.values())) + mean_eval_scores['label_embedding'] = np.mean(list(label_embedding_eval_scores.values())) + mean_eval_scores['setfit'] = np.mean(list(setfit_eval_scores.values())) + self.model.set_prediction_strategy(max(mean_eval_scores, key=mean_eval_scores.get)) + + else: + #TODO: Perform cross-validation to get best performing features + pass + + def evaluate( + self, x_eval: List[str], + y_eval: Union[List[int], List[List[int]]], + eval_metrics: Optional[Dict[List,Dict]] = None + ): + """ + Evaluates the performance of the full model on a given evaluation dataset. + Note that this depends on the model's current prediction_strategy (i.e. which encoder body it will use) to perform inference. + + Args: + x_eval (List[str]): A list of evaluation texts. + y_eval (Union[List[int], List[List[int]]]): A list of labels corresponding to the evaluation texts. + eval_metrics (Optional[Dict[List, Dict]]): A dictionary specifying the evaluation metrics and their respective arguments, to temporarily override the default (if any). + If not provided or ill-formatted, default metrics will be used. Example format: + { + 'metric_names': ['f1', 'precision', 'recall', 'accuracy'], + 'metric_args': {'average': 'micro'} + }. + + Returns: + Dict[str, float]: A dictionary containing the computed scores for each specified metric. + """ + + #If no eval_metrics were given, use the configured ones. + if not eval_metrics: + eval_metrics = self.eval_metrics + + #Validate eval_metrics and use the default if ill-formatted. + try: + self._validate_eval_metrics(eval_metrics) + except ValueError as e: + if "eval_metrics" in str(e): + eval_metrics = self.DEFAULT_EVAL_METRICS + warnings.warn( + "'eval_metrics' provided were ill-formatted. Falling back to default metrics.", + UserWarning, + stacklevel=2, + ) + + #Perform inference on the evaluation dataset. + y_pred = self.model.predict(x_eval) + y_true = Trainer._ensure_single_label_format(y_eval) + + #Correctly format eval_metrics if only a single one was given. + if isinstance(eval_metrics['metric_names'], str): + eval_metrics['metric_names'] = [eval_metrics['metric_names']] + + #Perform the evaluation. + eval_scores = {} + for metric in eval_metrics['metric_names']: + if metric == "f1": + eval_scores["f1"] = f1_score(y_true, y_pred, average=eval_metrics['metric_args']['average']) + elif metric == "precision": + eval_scores["precision"] = precision_score(y_true, y_pred, average=eval_metrics['metric_args']['average']) + elif metric == "recall": + eval_scores["recall"] = recall_score(y_true, y_pred, average=eval_metrics['metric_args']['average']) + elif metric == "accuracy": + eval_scores["accuracy"] = accuracy_score(y_true, y_pred) + + #Check wether evalaution metrics are present. Note: This must always be the case, if a successful validation has occured (so in theory, this exception should never be raised). + if not eval_scores: + raise ValueError( + "eval_metrics did not contain at least on of the following valid evaluation arguments: `f1`, `precision`, `recall`, `accuracy`." + ) + + return eval_scores \ No newline at end of file diff --git a/fusionsent/training_args.py b/fusionsent/training_args.py new file mode 100644 index 0000000..ae109d2 --- /dev/null +++ b/fusionsent/training_args.py @@ -0,0 +1,259 @@ +# This module encapsulates the set of training arguments that can be passed to the FusionSent model to specifiy training. + +from typing import Callable, Optional, Tuple, Union +from dataclasses import dataclass +from sentence_transformers import losses +import warnings + +@dataclass +class TrainingArguments: + """ + A dataclass containing all the arguments that can be passed to the FusionSent model to specifiy training. + Pass these either at model initialisation (to be the same for all training runs), or specifically, when calling the training method. + + FusionSent trains two distinct sub-models, 'setfit' and 'label_embedding', whichs parameters are then fused. + For customization purposes, the majority of training arguments can hence be given as a Tuple, in which the first and seccond components are destined to the "set-fit"- and "label-embedding"-submodel respectivley. + If only a single value is provided, it will be used for both sub-models. + + After instantiation, each model's specific traning arguments are referenceable through custom properties of this class. + Example: + batch_sizes[0] is addressed to 'setfit', accessible as property 'TrainingArguments.setfit_batch_size'. + batch_sizes[1] is addressed to 'label_embedding', accessible as property 'Trainingarguments.label_embedding_batch_size'. + + Attributes: + batch_sizes (Optional[Union[int, Tuple[int, int]]]): Batch sizes for training. Single integer for both sub-models, or a tuple, to address each one individually. Default is (16, 1). + num_epochs (Optional[Union[int, Tuple[int, int]]]): Number of epochs for training. Single integer for both sub-models, or a tuple, to address each one individually. Default is (1, 3). + sampling_strategies (Optional[Union[str, Tuple[str, str]]]): Sampling strategies for training data. Single string for both sub-models, or a tuple, to address each one individually. Choose either "oversampling" (Default), "unique", or "undersampling", respectivley. See 'setfit.ContrastiveDataset' for more details. + num_iterations (Optional[int]): Number of iterations for training. Always the same for both sub-models. + distance_metrics (Optional[Union[Callable, Tuple[Callable, Callable]]]): Distance metrics for loss functions. Single 'Callable' for both sub-models, or a tuple, to address each one individually. Default is cosine distance for triplet loss. + losses (Optional[Union[Callable, Tuple[Callable, Callable]]]): Loss functions for training. Single 'Callable' for both sub-models, or a tuple, to address each one individually Default is (CosineSimilarityLoss, ContrastiveLoss). + merging_method (Optional[str]): Method for merging the parameters of both sub-modules after training. Choose either 'slerp' (default), or 'lerp'. + margins (Optional[Union[float, Tuple[float, float]]]): Margin values for loss functions, to determine the threshold for considering examples as similar or dissimilar. Single float for both models, or a tuple, to address each one individually. Default is 0.25. + warmup_proportions (Optional[Union[float, Tuple[float, float]]]): Proportion of the total training steps used for warming up the learning rates. Single float for both models, or a tuple, to address each one individually. Default is 0.1. + samples_per_label (Optional[Union[int, Tuple[int, int]]]): Number of samples per label for training. A single integer for both models, or a tuple, to address each one individually. Default is 2. + show_progress_bar (Optional[bool]): Whether to show progress bar during training. Default is True. + use_setfit_body (Optional[bool]): Whether to train the 'setfit' submodel, and use its parameters in the merged FusionSent, or not. Use this when you only want to evaluate the 'label_embedding' sub-model. Default is True. + json_path (Optional[str]): Path to save evaluation results as JSON. + """ + + batch_sizes: Optional[Union[int, Tuple[int, int]]] = (16, 1) + num_epochs: Optional[Union[int, Tuple[int, int]]] = (1, 3) + sampling_strategies: Optional[Union[str, Tuple[str, str]]] = "oversampling" + num_iterations: Optional[int] = None + distance_metrics: Optional[Union[Callable, Tuple[Callable, Callable]]] = losses.BatchHardTripletLossDistanceFunction.cosine_distance + losses: Optional[Union[Callable, Tuple[Callable, Callable]]] = (losses.CosineSimilarityLoss, losses.ContrastiveLoss) + merging_method: Optional[str] = 'slerp' + margins: Optional[Union[float, Tuple[float, float]]] = 0.25 + warmup_proportions: Optional[Union[float, Tuple[float, float]]] = 0.1 + samples_per_label: Optional[Union[int, Tuple[int, int]]] = 2 + show_progress_bar: Optional[bool] = True + use_setfit_body: Optional[bool] = True + json_path: Optional[str] = None + + @property + def setfit_batch_size(self) -> int: + """ + Batch sizes for training the 'setfit' sub-model. + """ + if isinstance(self.batch_sizes, int): + return self.batch_sizes + else: + return self.batch_sizes[0] + + @property + def label_embedding_batch_size(self) -> int: + """ + Batch sizes for training the 'label_embedding' sub-model. + """ + if isinstance(self.batch_sizes, int): + return self.batch_sizes + else: + return self.batch_sizes[1] + + @property + def setfit_num_epochs(self) -> int: + """ + Number of epochs for training the 'setfit' sub-model. + """ + if isinstance(self.num_epochs, int): + return self.num_epochs + else: + return self.num_epochs[0] + + @property + def label_embedding_num_epochs(self) -> int: + """ + Number of epochs for training the 'label_embedding' sub-model. + """ + if isinstance(self.num_epochs, int): + return self.num_epochs + else: + return self.num_epochs[1] + + @property + def setfit_sampling_strategy(self) -> str: + """ + Sampling strategy for training data of the 'setfit' sub-model. + Either "oversampling" (Default), "unique", or "undersampling". + See 'setfit.ContrastiveDataset' for more details. + """ + if isinstance(self.sampling_strategies, str): + return self.sampling_strategies + else: + return self.sampling_strategies[0] + + @property + def label_embedding_sampling_strategy(self) -> str: + """ + Sampling strategy for training data of the 'label_embedding' sub-model. + Either "oversampling" (Default), "unique", or "undersampling". + See 'setfit.ContrastiveDataset' for more details. + """ + if isinstance(self.sampling_strategies, str): + return self.sampling_strategies + else: + return self.sampling_strategies[1] + + @property + def setfit_distance_metric(self) -> Callable: + """ + Distance metric for the loss function of the 'setfit' sub-model. + """ + if isinstance(self.distance_metrics, Callable): + return self.distance_metrics + else: + return self.distance_metrics[0] + + @property + def label_embedding_distance_metric(self) -> Callable: + """ + Distance metric for the loss function of the 'label_embedding' sub-model. + """ + if isinstance(self.distance_metrics, Callable): + return self.distance_metrics + else: + return self.distance_metrics[1] + + @property + def setfit_loss(self) -> Callable: + """ + Loss function for training the 'setfit' sub-model. + """ + if isinstance(self.losses, Callable): + return self.losses + else: + return self.losses[0] + + @property + def label_embedding_loss(self) -> Callable: + """ + Loss function for training the 'label_embedding' sub-model. + """ + if isinstance(self.losses, Callable): + return self.losses + else: + return self.losses[1] + + @property + def setfit_margin(self) -> float: + """ + Margin values for the loss function of the 'setfit' sub-model. + This determines the threshold for considering examples as similar or dissimilar. + """ + if isinstance(self.margins, float): + return self.margins + else: + return self.margins[0] + + @property + def label_embedding_margin(self) -> float: + """ + Margin values for the loss function of the 'label_embedding' sub-model. + This determines the threshold for considering examples as similar or dissimilar. + """ + if isinstance(self.margins, float): + return self.margins + else: + return self.margins[1] + + @property + def setfit_warmup_proportion(self) -> float: + """ + Proportion of the total training steps used for warming up the learning rate for training the 'setfit' sub-model. + """ + if isinstance(self.warmup_proportions, float): + return self.warmup_proportions + else: + return self.warmup_proportions[0] + + @property + def label_embedding_warmup_proportion(self) -> float: + """ + Proportion of the total training steps used for warming up the learning rate for training the 'label_embedding' sub-model. + """ + if isinstance(self.warmup_proportions, float): + return self.warmup_proportions + else: + return self.warmup_proportions[1] + + @property + def setfit_samples_per_label(self) -> int: + """ + Number of samples per label for training the 'setfit' submodel. + """ + if isinstance(self.samples_per_label, int): + return self.samples_per_label + else: + return self.samples_per_label[0] + + @property + def label_embedding_samples_per_label(self) -> int: + """ + Number of samples per label for training the 'label_embedding' submodel. + """ + if isinstance(self.samples_per_label, int): + return self.samples_per_label + else: + return self.samples_per_label[1] + + def _validate(self): + """ + Validates the provided training arguments to ensure they are in the correct format and contain necessary values. + Raises warnings for missing optional arguments and exceptions for missing non-optional ones. + """ + # Optional warning for missing json_path + if self.json_path is None: + warnings.warn( + f"`{self.__class__.__name__}.train` did not receive a `json_path`." + f"Evaluation results will not be saved to file." + f"Please provide a `json_path` to the `TrainingArguments` instance to suppress this warning.", + UserWarning, + stacklevel=2, + ) + + # Validate required fields by accessing properties and catching errors + required_properties = [ + ('setfit_batch_size', int), ('label_embedding_batch_size', int), + ('setfit_num_epochs', int), ('label_embedding_num_epochs', int), + ('setfit_sampling_strategy', str), ('label_embedding_sampling_strategy', str), + ('setfit_distance_metric', Callable), ('label_embedding_distance_metric', Callable), + ('setfit_loss', Callable), ('label_embedding_loss', Callable), + ('setfit_margin', float), ('label_embedding_margin', float), + ('setfit_warmup_proportion', float), ('label_embedding_warmup_proportion', float), + ('setfit_samples_per_label', int), ('label_embedding_samples_per_label', int) + ] + for prop, expected_type in required_properties: + try: + value = getattr(self, prop) + if not isinstance(value, expected_type): + raise TypeError(f"Expected type {expected_type} for {prop}, but got {type(value)}.") + except Exception as e: + raise ValueError(f"Invalid value for {prop}: {str(e)}") + + # Check for valid values in sampling_strategies + valid_sampling_strategies = {"oversampling", "unique", "undersampling"} + sampling_strategy_props = ['setfit_sampling_strategy', 'label_embedding_sampling_strategy'] + for strategy_prop in sampling_strategy_props: + value = getattr(self, strategy_prop) + if value not in valid_sampling_strategies: + raise ValueError(f"Invalid value '{value}' for '{prop}'. Must be one of {valid_sampling_strategies}.") \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..fbcce5e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel", "numpy"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..c8d4344 --- /dev/null +++ b/setup.py @@ -0,0 +1,78 @@ +from setuptools import setup, find_packages + +# Read the contents of README file for 'long_description'. +with open("README.md", "r") as fh: + _long_description = fh.read() + +setup( + name="fusionsent", + version="0.0.6", + author="Tim Schopf, Alexander Blatzheim", + author_email="tim.schopf@tum.de, alexander.blatzheim@tum.de", + description="FusionSent: A Fusion-Based Multi-Task Sentence Embedding Model", + long_description=_long_description, + long_description_content_type="text/markdown", + url="https://github.com/NLP-Knowledge-Graph/few-shot-taxonomy-classification", + packages=find_packages(), + install_requires= [ + "accelerate==0.32.1", + "aiohappyeyeballs==2.4.3", + "aiohttp==3.9.5", + "aiosignal==1.3.1", + "async-timeout==4.0.3", + "attrs==23.2.0", + "certifi==2024.8.30", + "charset-normalizer==3.3.2", + "datasets==2.20.0", + "dill==0.3.5.1", + "evaluate==0.4.2", + "filelock==3.15.4", + "frozenlist==1.4.1", + "fsspec==2023.10.0", + "huggingface-hub==0.21.2", + "idna==3.10", + "Jinja2==3.1.4", + "joblib==1.4.2", + "MarkupSafe==2.1.5", + "mpmath==1.3.0", + "multidict==6.0.5", + "multiprocess==0.70.13", + "networkx==3.3", + "numpy==1.23.5", + "packaging==24.1", + "pandas==2.2.2", + "pillow==10.4.0", + "psutil==6.0.0", + "pyarrow==15.0.0", + "python-dateutil==2.9.0.post0", + "pytz==2024.2", + "PyYAML==6.0.1", + "regex==2024.5.15", + "requests==2.32.3", + "safetensors==0.4.3", + "scikit-learn==1.5.1", + "scipy==1.10.1", + "sentence-transformers==3.0.1", + "setfit==1.0.3", + "six==1.16.0", + "sympy==1.13.3", + "threadpoolctl==3.5.0", + "tokenizers==0.19.1", + "torch==2.3.1", + "tqdm==4.66.4", + "transformers==4.40.0", + "typing_extensions==4.12.2", + "tzdata==2024.1", + "urllib3==2.2.2", + "xxhash==3.4.1", + "yarl==1.9.4" + ], + classifiers=[ + "Programming Language :: Python :: 3.9", + "License :: OSI Approved :: Apache License", + "Operating System :: OS Independent", + ], + python_requires='>=3.10', + license="Apache-2.0", + include_package_data=True +) \ No newline at end of file