From 0af3d3172418ebc3db0c58257c2c5ffec75086fe Mon Sep 17 00:00:00 2001 From: jigold Date: Thu, 9 Nov 2023 15:28:03 -0500 Subject: [PATCH 01/48] [batch] Make instance status statistics easier to understand (#13943) Fixes #13815. I tried to simplify the concepts here to be unified across all instance collection types. I renamed "provisioned" to "live". Schedulable means only workers that are active and from the latest instance version. I think the example figures are self explanatory. Screenshot 2023-10-30 at 11 33 39 AM Screenshot 2023-10-30 at 11 35 29 AM --- .../batch/driver/instance_collection/base.py | 103 ++++++++++++++---- .../batch/driver/instance_collection/pool.py | 2 +- batch/batch/driver/main.py | 12 +- batch/batch/driver/templates/index.html | 101 ++++++++++------- batch/batch/driver/templates/job_private.html | 43 +++++--- batch/batch/driver/templates/pool.html | 47 ++++++-- batch/batch/globals.py | 2 + web_common/web_common/styles/main.scss | 10 ++ 8 files changed, 228 insertions(+), 92 deletions(-) diff --git a/batch/batch/driver/instance_collection/base.py b/batch/batch/driver/instance_collection/base.py index 9c6e342b9e0..6e49204487a 100644 --- a/batch/batch/driver/instance_collection/base.py +++ b/batch/batch/driver/instance_collection/base.py @@ -12,7 +12,7 @@ from hailtop import aiotools from hailtop.utils import periodically_call, secret_alnum_string, time_msecs -from ...globals import INSTANCE_VERSION +from ...globals import INSTANCE_VERSION, live_instance_states from ...instance_config import QuantifiedResource from ..instance import Instance from ..location import CloudLocationMonitor @@ -70,7 +70,7 @@ def choose_location( regions: List[str], machine_type: str, ) -> str: - if self._default_region in regions and self.global_total_provisioned_cores_mcpu // 1000 < 1_000: + if self._default_region in regions and self.global_live_cores_mcpu // 1000 < 1_000: regions = [self._default_region] return self.location_monitor.choose_location( cores, local_ssd_data_disk, data_disk_size_gb, preemptible, regions, machine_type @@ -88,19 +88,25 @@ def name_instance(self): return result @property - def global_total_provisioned_cores_mcpu(self): - return sum(inst_coll.all_versions_provisioned_cores_mcpu for inst_coll in self.name_inst_coll.values()) + def global_total_n_instances(self): + return sum(inst_coll.all_versions_total_n_instances for inst_coll in self.name_inst_coll.values()) @property - def global_current_version_live_free_cores_mcpu(self): - return sum( - inst_coll.current_worker_version_stats.live_free_cores_mcpu for inst_coll in self.name_inst_coll.values() - ) + def global_total_cores_mcpu(self): + return sum(inst_coll.all_versions_total_cores_mcpu for inst_coll in self.name_inst_coll.values()) + + @property + def global_live_n_instances(self): + return sum(inst_coll.all_versions_live_n_instances for inst_coll in self.name_inst_coll.values()) + + @property + def global_live_cores_mcpu(self): + return sum(inst_coll.all_versions_live_cores_mcpu for inst_coll in self.name_inst_coll.values()) @property - def global_current_version_live_schedulable_free_cores_mcpu(self): + def global_current_version_active_schedulable_free_cores_mcpu(self): return sum( - inst_coll.current_worker_version_stats.live_schedulable_free_cores_mcpu + inst_coll.current_worker_version_stats.active_schedulable_free_cores_mcpu for inst_coll in self.name_inst_coll.values() ) @@ -111,6 +117,25 @@ def global_n_instances_by_state(self) -> Counter[str]: collections.Counter(), ) + @property + def global_cores_mcpu_by_state(self) -> Counter[str]: + return sum( + (inst_coll.all_versions_cores_mcpu_by_state for inst_coll in self.name_inst_coll.values()), + collections.Counter(), + ) + + @property + def global_schedulable_n_instances(self) -> int: + return sum(pool.current_worker_version_stats.n_instances_by_state['active'] for pool in self.pools.values()) + + @property + def global_schedulable_cores_mcpu(self) -> int: + return sum(pool.current_worker_version_stats.cores_mcpu_by_state['active'] for pool in self.pools.values()) + + @property + def global_schedulable_free_cores_mcpu(self) -> int: + return sum(pool.current_worker_version_stats.active_schedulable_free_cores_mcpu for pool in self.pools.values()) + def get_inst_coll(self, inst_coll_name): return self.name_inst_coll.get(inst_coll_name) @@ -140,34 +165,31 @@ async def get_token_from_instance_name(self, name): class InstanceCollectionStats: def __init__(self): self.n_instances_by_state = {'pending': 0, 'active': 0, 'inactive': 0, 'deleted': 0} + self.cores_mcpu_by_state = {'pending': 0, 'active': 0, 'inactive': 0, 'deleted': 0} self.live_free_cores_mcpu_by_region: Dict[str, int] = collections.defaultdict(int) # pending and active - self.live_free_cores_mcpu = 0 - self.live_total_cores_mcpu = 0 - self.live_schedulable_free_cores_mcpu = 0 + self.active_schedulable_free_cores_mcpu = 0 def remove_instance(self, instance: Instance): self.n_instances_by_state[instance.state] -= 1 + self.cores_mcpu_by_state[instance.state] -= instance.cores_mcpu - if instance.state in ('pending', 'active'): - self.live_free_cores_mcpu -= instance.free_cores_mcpu_nonnegative - self.live_total_cores_mcpu -= instance.cores_mcpu + if instance.state in live_instance_states: self.live_free_cores_mcpu_by_region[instance.region] -= instance.free_cores_mcpu_nonnegative if instance.state == 'active': - self.live_schedulable_free_cores_mcpu -= instance.free_cores_mcpu_nonnegative + self.active_schedulable_free_cores_mcpu -= instance.free_cores_mcpu_nonnegative def add_instance(self, instance: Instance): self.n_instances_by_state[instance.state] += 1 + self.cores_mcpu_by_state[instance.state] += instance.cores_mcpu - if instance.state in ('pending', 'active'): - self.live_free_cores_mcpu += instance.free_cores_mcpu_nonnegative - self.live_total_cores_mcpu += instance.cores_mcpu + if instance.state in live_instance_states: self.live_free_cores_mcpu_by_region[instance.region] += instance.free_cores_mcpu_nonnegative if instance.state == 'active': - self.live_schedulable_free_cores_mcpu += instance.free_cores_mcpu_nonnegative + self.active_schedulable_free_cores_mcpu += instance.free_cores_mcpu_nonnegative class InstanceCollection: @@ -220,8 +242,43 @@ def all_versions_instances_by_state(self): ) @property - def all_versions_provisioned_cores_mcpu(self): - return sum(version_stats.live_total_cores_mcpu for version_stats in self.stats_by_instance_version.values()) + def all_versions_cores_mcpu_by_state(self): + return sum( + ( + collections.Counter(version_stats.cores_mcpu_by_state) + for version_stats in self.stats_by_instance_version.values() + ), + collections.Counter(), + ) + + @property + def all_versions_total_n_instances(self): + return sum( + sum(version_stats.n_instances_by_state.values()) + for version_stats in self.stats_by_instance_version.values() + ) + + @property + def all_versions_live_n_instances(self): + return sum( + version_stats.n_instances_by_state[state] + for version_stats in self.stats_by_instance_version.values() + for state in live_instance_states + ) + + @property + def all_versions_total_cores_mcpu(self): + return sum( + sum(version_stats.cores_mcpu_by_state.values()) for version_stats in self.stats_by_instance_version.values() + ) + + @property + def all_versions_live_cores_mcpu(self): + return sum( + version_stats.cores_mcpu_by_state[state] + for version_stats in self.stats_by_instance_version.values() + for state in live_instance_states + ) @property def n_instances(self) -> int: diff --git a/batch/batch/driver/instance_collection/pool.py b/batch/batch/driver/instance_collection/pool.py index e3e3e019a06..b241822d446 100644 --- a/batch/batch/driver/instance_collection/pool.py +++ b/batch/batch/driver/instance_collection/pool.py @@ -460,7 +460,7 @@ async def create_instances(self): log.info( f'{self} n_instances {self.n_instances} {pool_stats.n_instances_by_state}' - f' free_cores {free_cores} live_free_cores {pool_stats.live_free_cores_mcpu / 1000}' + f' active_schedulable_free_cores {pool_stats.active_schedulable_free_cores_mcpu / 1000}' f' full_job_queue_ready_cores {sum(ready_cores_mcpu_per_user.values()) / 1000}' f' head_job_queue_ready_cores {sum(head_job_queue_ready_cores_mcpu.values()) / 1000}' ) diff --git a/batch/batch/driver/main.py b/batch/batch/driver/main.py index f6e6656918b..bce80a89131 100644 --- a/batch/batch/driver/main.py +++ b/batch/batch/driver/main.py @@ -448,11 +448,17 @@ async def get_index(request, userdata): 'pools': inst_coll_manager.pools.values(), 'jpim': jpim, 'instance_id': app['instance_id'], - 'n_instances_by_state': inst_coll_manager.global_n_instances_by_state, + 'global_total_n_instances': inst_coll_manager.global_total_n_instances, + 'global_total_cores_mcpu': inst_coll_manager.global_total_cores_mcpu, + 'global_live_n_instances': inst_coll_manager.global_live_n_instances, + 'global_live_cores_mcpu': inst_coll_manager.global_live_cores_mcpu, + 'global_n_instances_by_state': inst_coll_manager.global_n_instances_by_state, + 'global_cores_mcpu_by_state': inst_coll_manager.global_cores_mcpu_by_state, + 'global_schedulable_n_instances': inst_coll_manager.global_schedulable_n_instances, + 'global_schedulable_cores_mcpu': inst_coll_manager.global_schedulable_cores_mcpu, + 'global_schedulable_free_cores_mcpu': inst_coll_manager.global_schedulable_free_cores_mcpu, 'instances': inst_coll_manager.name_instance.values(), 'ready_cores_mcpu': ready_cores_mcpu, - 'total_provisioned_cores_mcpu': inst_coll_manager.global_total_provisioned_cores_mcpu, - 'live_schedulable_free_cores_mcpu': inst_coll_manager.global_current_version_live_schedulable_free_cores_mcpu, 'frozen': app['frozen'], 'feature_flags': app['feature_flags'], } diff --git a/batch/batch/driver/templates/index.html b/batch/batch/driver/templates/index.html index b6aa5f2b17c..db10e46cc97 100644 --- a/batch/batch/driver/templates/index.html +++ b/batch/batch/driver/templates/index.html @@ -49,73 +49,98 @@

Feature Flags

Instance Collections

-

Pools

- +
- - - + + + + + + + + + + + + + + - - + + + + {% for pool in pools %} - - - - + + + + + + + + + {% if pool.current_worker_version_stats.cores_mcpu_by_state['active'] != 0 %} + + {% else %} + + {% endif %} {% endfor %} - -
NameWorker TypePreemptibleNameInstancesCoresSchedulable Cores
PendingActiveInactiveDeleted Pending Active Inactive DeletedProvisioned CoresSchedulable CoresFreeTotal% Free
{{ pool.name }}{{ pool.worker_type }}{{ pool.preemptible }} {{ pool.all_versions_instances_by_state['pending'] }} {{ pool.all_versions_instances_by_state['active'] }} {{ pool.all_versions_instances_by_state['inactive'] }} {{ pool.all_versions_instances_by_state['deleted'] }}{{ pool.all_versions_provisioned_cores_mcpu / 1000 }}{{ pool.current_worker_version_stats.live_schedulable_free_cores_mcpu / 1000 }} / {{ pool.all_versions_provisioned_cores_mcpu / 1000 }} {{ pool.all_versions_cores_mcpu_by_state['pending'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['active'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['inactive'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['deleted'] / 1000 }}{{ pool.current_worker_version_stats.active_schedulable_free_cores_mcpu / 1000 }}{{ pool.current_worker_version_stats.cores_mcpu_by_state['active'] / 1000 }}{{ (pool.current_worker_version_stats.active_schedulable_free_cores_mcpu * 100 / pool.current_worker_version_stats.cores_mcpu_by_state['active']) | round(1)}}%
- -

Job Private Instance Manager

- - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + {% if global_schedulable_cores_mcpu != 0 %} + + {% else %} + + {% endif %} + +
NamePendingActiveInactiveDeletedProvisioned CoresSchedulable Cores
{{ jpim.name }} {{ jpim.all_versions_instances_by_state['pending'] }} {{ jpim.all_versions_instances_by_state['active'] }} {{ jpim.all_versions_instances_by_state['inactive'] }} {{ jpim.all_versions_instances_by_state['deleted'] }}{{ jpim.all_versions_provisioned_cores_mcpu / 1000 }}{{ jpim.current_worker_version_stats.live_schedulable_free_cores_mcpu / 1000 }} / {{ jpim.all_versions_provisioned_cores_mcpu / 1000 }} {{ jpim.all_versions_cores_mcpu_by_state['pending'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['active'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['inactive'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['deleted'] / 1000 }}
Total{{ global_n_instances_by_state['pending'] }}{{ global_n_instances_by_state['active'] }}{{ global_n_instances_by_state['inactive'] }}{{ global_n_instances_by_state['deleted'] }}{{ global_cores_mcpu_by_state['pending'] / 1000 }}{{ global_cores_mcpu_by_state['active'] / 1000 }}{{ global_cores_mcpu_by_state['inactive'] / 1000 }}{{ global_cores_mcpu_by_state['deleted'] / 1000 }}{{ global_schedulable_free_cores_mcpu / 1000 }}{{ global_schedulable_cores_mcpu / 1000 }}{{ (global_schedulable_free_cores_mcpu * 100 / global_schedulable_cores_mcpu) | round(1)}}%

Instances

-
-
Pending: {{ n_instances_by_state['pending'] }}
-
Active: {{ n_instances_by_state['active'] }}
-
Inactive: {{ n_instances_by_state['inactive'] }}
-
Deleted: {{ n_instances_by_state['deleted'] }}
-
Total provisioned cores: {{ total_provisioned_cores_mcpu / 1000 }}
-
Total schedulable cores: {{ live_schedulable_free_cores_mcpu / 1000 }} / {{ total_provisioned_cores_mcpu / 1000 }}
-
diff --git a/batch/batch/driver/templates/job_private.html b/batch/batch/driver/templates/job_private.html index ce75824488c..a5a87dd23f3 100644 --- a/batch/batch/driver/templates/job_private.html +++ b/batch/batch/driver/templates/job_private.html @@ -52,24 +52,35 @@

Status

- - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + + + +
PendingActiveInactiveDeletedLive Total CoresLive Free Cores
InstancesCores
PendingActiveInactiveDeletedPendingActiveInactiveDeleted
{{ jpim.current_worker_version_stats.n_instances_by_state['pending'] }}{{ jpim.current_worker_version_stats.n_instances_by_state['active'] }}{{ jpim.current_worker_version_stats.n_instances_by_state['inactive'] }}{{ jpim.current_worker_version_stats.n_instances_by_state['deleted'] }}{{ jpim.current_worker_version_stats.live_total_cores_mcpu / 1000 }}{{ jpim.current_worker_version_stats.live_free_cores_mcpu / 1000 }}
{{ jpim.all_versions_instances_by_state['pending'] }}{{ jpim.all_versions_instances_by_state['active'] }}{{ jpim.all_versions_instances_by_state['inactive'] }}{{ jpim.all_versions_instances_by_state['deleted'] }}{{ jpim.all_versions_cores_mcpu_by_state['pending'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['active'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['inactive'] / 1000 }}{{ jpim.all_versions_cores_mcpu_by_state['deleted'] / 1000 }}
diff --git a/batch/batch/driver/templates/pool.html b/batch/batch/driver/templates/pool.html index 64c5ab96694..6a62fb827e7 100644 --- a/batch/batch/driver/templates/pool.html +++ b/batch/batch/driver/templates/pool.html @@ -69,23 +69,48 @@

Status

- - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - + - - + + + + + + + + + {% if pool.current_worker_version_stats.cores_mcpu_by_state['active'] != 0 %} + + {% else %} + + {% endif %}
PendingActiveInactiveDeletedProvisioned CoresSchedulable Cores
InstancesCoresSchedulable Cores
PendingActiveInactiveDeletedPendingActiveInactiveDeletedFreeTotal% Free
{{ pool.all_versions_instances_by_state['pending'] }} {{ pool.all_versions_instances_by_state['active'] }} {{ pool.all_versions_instances_by_state['inactive'] }} {{ pool.all_versions_instances_by_state['deleted'] }}{{ pool.all_versions_provisioned_cores_mcpu / 1000 }}{{ pool.current_worker_version_stats.live_free_cores_mcpu / 1000 }} / {{ pool.all_versions_provisioned_cores_mcpu / 1000 }} {{ pool.all_versions_cores_mcpu_by_state['pending'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['active'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['inactive'] / 1000 }}{{ pool.all_versions_cores_mcpu_by_state['deleted'] / 1000 }}{{ pool.current_worker_version_stats.active_schedulable_free_cores_mcpu / 1000 }}{{ pool.current_worker_version_stats.cores_mcpu_by_state['active'] / 1000 }}{{ (pool.current_worker_version_stats.active_schedulable_free_cores_mcpu * 100 / pool.current_worker_version_stats.cores_mcpu_by_state['active']) | round(1)}}%
diff --git a/batch/batch/globals.py b/batch/batch/globals.py index 5014241eac1..134878338d5 100644 --- a/batch/batch/globals.py +++ b/batch/batch/globals.py @@ -2,6 +2,8 @@ complete_states = ('Cancelled', 'Error', 'Failed', 'Success') +live_instance_states = ('pending', 'active') + valid_state_transitions = { 'Pending': {'Ready'}, 'Ready': {'Creating', 'Running', 'Cancelled', 'Error'}, diff --git a/web_common/web_common/styles/main.scss b/web_common/web_common/styles/main.scss index e21f87b91ee..3d99d005bc2 100644 --- a/web_common/web_common/styles/main.scss +++ b/web_common/web_common/styles/main.scss @@ -237,6 +237,16 @@ a { background-color: #888; } + tfoot { + td { + white-space: nowrap; + padding: 3px 10px; + color: $white; + background-color: #888; + font-weight: 700; + } + } + tr { &:nth-of-type(even) { background-color: #f0f0f0; From 21c6a40ccafd5b1b8f126fce81c070d3b58b33fe Mon Sep 17 00:00:00 2001 From: jigold Date: Mon, 13 Nov 2023 11:28:41 -0500 Subject: [PATCH 02/48] [services] Add cloudprofile.agent role to service accounts in terraform (#13978) --- infra/gcp-broad/main.tf | 16 ++++++++++++++++ infra/gcp/main.tf | 10 ++++++++++ 2 files changed, 26 insertions(+) diff --git a/infra/gcp-broad/main.tf b/infra/gcp-broad/main.tf index 573f33239a1..a9c495f9af2 100644 --- a/infra/gcp-broad/main.tf +++ b/infra/gcp-broad/main.tf @@ -384,6 +384,10 @@ resource "google_sql_database_instance" "db" { } } + lifecycle { + ignore_changes = [settings.0.tier] + } + timeouts {} } @@ -412,6 +416,11 @@ resource "google_artifact_registry_repository" "repository" { format = "DOCKER" repository_id = "hail" location = var.artifact_registry_location + + # https://github.com/hashicorp/terraform-provider-azurerm/issues/7396 + lifecycle { + ignore_changes = [cleanup_policies, timeouts, cleanup_policy_dry_run] + } } resource "google_service_account" "gcr_push" { @@ -458,6 +467,7 @@ module "auth_gsa_secret" { iam_roles = [ "iam.serviceAccountAdmin", "iam.serviceAccountKeyAdmin", + "cloudprofiler.agent", ] } @@ -467,6 +477,7 @@ module "testns_auth_gsa_secret" { project = var.gcp_project iam_roles = [ "iam.serviceAccountViewer", + "cloudprofiler.agent", ] } @@ -478,6 +489,7 @@ module "batch_gsa_secret" { "compute.instanceAdmin.v1", "iam.serviceAccountUser", "logging.viewer", + "cloudprofiler.agent", ] } @@ -495,6 +507,7 @@ module "testns_batch_gsa_secret" { "compute.instanceAdmin.v1", "iam.serviceAccountUser", "logging.viewer", + "cloudprofiler.agent", ] } @@ -508,6 +521,9 @@ module "ci_gsa_secret" { source = "./gsa" name = "ci" project = var.gcp_project + iam_roles = [ + "cloudprofiler.agent", + ] } resource "google_artifact_registry_repository_iam_member" "artifact_registry_viewer" { diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 8a28a21fdeb..3bfdafc32f5 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -437,6 +437,7 @@ module "auth_gsa_secret" { iam_roles = [ "iam.serviceAccountAdmin", "iam.serviceAccountKeyAdmin", + "cloudprofiler.agent", ] } @@ -446,6 +447,7 @@ module "testns_auth_gsa_secret" { project = var.gcp_project iam_roles = [ "iam.serviceAccountViewer", + "cloudprofiler.agent", ] } @@ -458,6 +460,7 @@ module "batch_gsa_secret" { "iam.serviceAccountUser", "logging.viewer", "storage.admin", + "cloudprofiler.agent", ] } @@ -475,6 +478,7 @@ module "testns_batch_gsa_secret" { "compute.instanceAdmin.v1", "iam.serviceAccountUser", "logging.viewer", + "cloudprofiler.agent", ] } @@ -488,12 +492,18 @@ module "ci_gsa_secret" { source = "./gsa_k8s_secret" name = "ci" project = var.gcp_project + iam_roles = [ + "cloudprofiler.agent", + ] } module "testns_ci_gsa_secret" { source = "./gsa_k8s_secret" name = "testns-ci" project = var.gcp_project + iam_roles = [ + "cloudprofiler.agent", + ] } resource "google_storage_bucket_iam_member" "testns_ci_bucket_admin" { From 258b26f6044296f0e5e2362d1e9231564497e1d5 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Mon, 13 Nov 2023 12:06:27 -0700 Subject: [PATCH 03/48] [query] Remove unused and redundant requirements (#13988) `asyncinit` is unused AFAICT and the `frozenlist` requirement is already inherited from hailtop (though it is not used in `hailtop` only in query code, so am also happy to move it out of hailtop fully and into query). --- Makefile | 2 +- batch/pinned-requirements.txt | 6 +-- benchmark/python/pinned-requirements.txt | 8 ++-- ci/pinned-requirements.txt | 2 +- gear/pinned-requirements.txt | 16 +++---- hail/python/dev/pinned-requirements.txt | 52 +++++++++------------ hail/python/hailtop/config/deploy_config.py | 2 +- hail/python/hailtop/pinned-requirements.txt | 16 +++---- hail/python/pinned-requirements.txt | 27 +++++------ hail/python/requirements.txt | 2 - web_common/pinned-requirements.txt | 2 +- 11 files changed, 62 insertions(+), 73 deletions(-) diff --git a/Makefile b/Makefile index d87f9950f78..d810b90aa6f 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ hail/python/pinned-requirements.txt: hail/python/hailtop/pinned-requirements.txt hail/python/dev/pinned-requirements.txt: hail/python/pinned-requirements.txt hail/python/dev/requirements.txt ./generate-linux-pip-lockfile.sh hail/python/dev -benchmark/python/pinned-requirements.txt: benchmark/python/requirements.txt +benchmark/python/pinned-requirements.txt: benchmark/python/requirements.txt hail/python/pinned-requirements.txt hail/python/dev/pinned-requirements.txt ./generate-linux-pip-lockfile.sh benchmark/python gear/pinned-requirements.txt: hail/python/pinned-requirements.txt hail/python/dev/pinned-requirements.txt hail/python/hailtop/pinned-requirements.txt gear/requirements.txt diff --git a/batch/pinned-requirements.txt b/batch/pinned-requirements.txt index cfd624921db..35dadef8203 100644 --- a/batch/pinned-requirements.txt +++ b/batch/pinned-requirements.txt @@ -37,7 +37,7 @@ attrs==23.1.0 # -c hail/batch/../hail/python/pinned-requirements.txt # -c hail/batch/../web_common/pinned-requirements.txt # aiohttp -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/batch/../gear/pinned-requirements.txt # -c hail/batch/../hail/python/dev/pinned-requirements.txt @@ -78,11 +78,11 @@ packaging==23.2 # -c hail/batch/../hail/python/dev/pinned-requirements.txt # -c hail/batch/../hail/python/pinned-requirements.txt # plotly -pandas==2.1.1 +pandas==2.1.2 # via # -c hail/batch/../hail/python/pinned-requirements.txt # -r hail/batch/requirements.txt -plotly==5.17.0 +plotly==5.18.0 # via # -c hail/batch/../hail/python/pinned-requirements.txt # -r hail/batch/requirements.txt diff --git a/benchmark/python/pinned-requirements.txt b/benchmark/python/pinned-requirements.txt index 7a15b1905dc..9fc6c11ba50 100644 --- a/benchmark/python/pinned-requirements.txt +++ b/benchmark/python/pinned-requirements.txt @@ -4,19 +4,19 @@ # # pip-compile --output-file=hail/benchmark/python/pinned-requirements.txt hail/benchmark/python/requirements.txt # -contourpy==1.1.1 +contourpy==1.2.0 # via # -c hail/benchmark/python/../../hail/python/pinned-requirements.txt # matplotlib cycler==0.12.1 # via matplotlib -fonttools==4.43.1 +fonttools==4.44.0 # via matplotlib -importlib-resources==6.1.0 +importlib-resources==6.1.1 # via matplotlib kiwisolver==1.4.5 # via matplotlib -matplotlib==3.8.0 +matplotlib==3.8.1 # via -r hail/benchmark/python/requirements.txt numpy==1.26.1 # via diff --git a/ci/pinned-requirements.txt b/ci/pinned-requirements.txt index 0a5dccb0c44..7acf28dbfd2 100644 --- a/ci/pinned-requirements.txt +++ b/ci/pinned-requirements.txt @@ -15,7 +15,7 @@ cffi==1.16.0 # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt # cryptography -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt diff --git a/gear/pinned-requirements.txt b/gear/pinned-requirements.txt index fcba6b39f8d..11492abab7e 100644 --- a/gear/pinned-requirements.txt +++ b/gear/pinned-requirements.txt @@ -41,7 +41,7 @@ attrs==23.1.0 # aiomonitor backports-strenum==1.2.8 # via aiomonitor -cachetools==5.3.1 +cachetools==5.3.2 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt @@ -53,7 +53,7 @@ certifi==2023.7.22 # -c hail/gear/../hail/python/pinned-requirements.txt # kubernetes-asyncio # requests -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt @@ -73,11 +73,11 @@ frozenlist==1.4.0 # -c hail/gear/../hail/python/pinned-requirements.txt # aiohttp # aiosignal -google-api-core==2.12.0 +google-api-core==2.13.0 # via google-api-python-client -google-api-python-client==2.105.0 +google-api-python-client==2.107.0 # via google-cloud-profiler -google-auth==2.23.3 +google-auth==2.23.4 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt @@ -128,14 +128,14 @@ multidict==6.0.4 # -c hail/gear/../hail/python/pinned-requirements.txt # aiohttp # yarl -orjson==3.9.9 +orjson==3.9.10 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # -r hail/gear/requirements.txt prometheus-async==19.2.0 # via -r hail/gear/requirements.txt -prometheus-client==0.17.1 +prometheus-client==0.18.0 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -r hail/gear/requirements.txt @@ -223,7 +223,7 @@ urllib3==1.26.18 # -c hail/gear/../hail/python/pinned-requirements.txt # kubernetes-asyncio # requests -wcwidth==0.2.8 +wcwidth==0.2.9 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # prompt-toolkit diff --git a/hail/python/dev/pinned-requirements.txt b/hail/python/dev/pinned-requirements.txt index 87b171648fd..759e4be5ad9 100644 --- a/hail/python/dev/pinned-requirements.txt +++ b/hail/python/dev/pinned-requirements.txt @@ -49,8 +49,6 @@ babel==2.13.1 # via # jupyterlab-server # sphinx -backcall==0.2.0 - # via ipython beautifulsoup4==4.12.2 # via nbconvert black==22.12.0 @@ -67,7 +65,7 @@ cffi==1.16.0 # argon2-cffi-bindings cfgv==3.4.0 # via pre-commit -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/hail/python/dev/../pinned-requirements.txt # aiohttp @@ -79,7 +77,7 @@ click==8.1.7 # aiohttp-devtools # black # curlylint -comm==0.1.4 +comm==0.2.0 # via # ipykernel # ipywidgets @@ -113,13 +111,13 @@ exceptiongroup==1.1.3 # pytest execnet==2.0.2 # via pytest-xdist -executing==2.0.0 +executing==2.0.1 # via # devtools # stack-data fastjsonschema==2.18.1 # via nbformat -filelock==3.12.4 +filelock==3.13.1 # via virtualenv fqdn==1.5.1 # via jsonschema @@ -130,7 +128,7 @@ frozenlist==1.4.0 # aiosignal fswatch==0.1.1 # via -r hail/hail/python/dev/requirements.txt -identify==2.5.30 +identify==2.5.31 # via pre-commit idna==3.4 # via @@ -157,13 +155,11 @@ ipykernel==6.26.0 # jupyter-console # jupyterlab # qtconsole -ipython==8.16.1 +ipython==8.17.2 # via # ipykernel # ipywidgets # jupyter-console -ipython-genutils==0.2.0 - # via qtconsole ipywidgets==8.1.1 # via jupyter isoduration==20.11.0 @@ -185,7 +181,7 @@ json5==0.9.14 # via jupyterlab-server jsonpointer==2.4 # via jsonschema -jsonschema[format-nongpl]==4.19.1 +jsonschema[format-nongpl]==4.19.2 # via # jupyter-events # jupyterlab-server @@ -194,7 +190,7 @@ jsonschema-specifications==2023.7.1 # via jsonschema jupyter==1.0.0 # via -r hail/hail/python/dev/requirements.txt -jupyter-client==8.5.0 +jupyter-client==8.6.0 # via # ipykernel # jupyter-console @@ -203,7 +199,7 @@ jupyter-client==8.5.0 # qtconsole jupyter-console==6.6.3 # via jupyter -jupyter-core==5.4.0 +jupyter-core==5.5.0 # via # ipykernel # jupyter-client @@ -214,11 +210,11 @@ jupyter-core==5.4.0 # nbconvert # nbformat # qtconsole -jupyter-events==0.8.0 +jupyter-events==0.9.0 # via jupyter-server jupyter-lsp==2.2.0 # via jupyterlab -jupyter-server==2.9.1 +jupyter-server==2.10.0 # via # jupyter-lsp # jupyterlab @@ -227,7 +223,7 @@ jupyter-server==2.9.1 # notebook-shim jupyter-server-terminals==0.4.4 # via jupyter-server -jupyterlab==4.0.7 +jupyterlab==4.0.8 # via notebook jupyterlab-pygments==0.2.2 # via nbconvert @@ -259,9 +255,9 @@ multidict==6.0.4 # yarl mypy-extensions==1.0.0 # via black -nbclient==0.8.0 +nbclient==0.9.0 # via nbconvert -nbconvert==7.9.2 +nbconvert==7.11.0 # via # jupyter # jupyter-server @@ -314,8 +310,6 @@ pathspec==0.11.2 # curlylint pexpect==4.8.0 # via ipython -pickleshare==0.7.5 - # via ipython pillow==10.1.0 # via # -c hail/hail/python/dev/../pinned-requirements.txt @@ -330,7 +324,7 @@ pluggy==1.3.0 # via pytest pre-commit==3.5.0 # via -r hail/hail/python/dev/requirements.txt -prometheus-client==0.17.1 +prometheus-client==0.18.0 # via jupyter-server prompt-toolkit==3.0.39 # via @@ -362,7 +356,7 @@ pygments==2.16.1 # sphinx pylint==2.17.7 # via -r hail/hail/python/dev/requirements.txt -pyright==1.1.333 +pyright==1.1.334 # via -r hail/hail/python/dev/requirements.txt pytest==7.4.3 # via @@ -411,7 +405,7 @@ pyzmq==25.1.1 # jupyter-console # jupyter-server # qtconsole -qtconsole==5.4.4 +qtconsole==5.5.0 # via jupyter qtpy==2.4.1 # via qtconsole @@ -433,11 +427,11 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rpds-py==0.10.6 +rpds-py==0.12.0 # via # jsonschema # referencing -ruff==0.1.2 +ruff==0.1.4 # via -r hail/hail/python/dev/requirements.txt send2trash==1.8.2 # via jupyter-server @@ -503,7 +497,7 @@ tomli==2.0.1 # jupyterlab # pylint # pytest -tomlkit==0.12.1 +tomlkit==0.12.2 # via pylint tornado==6.3.3 # via @@ -514,7 +508,7 @@ tornado==6.3.3 # jupyterlab # notebook # terminado -traitlets==5.12.0 +traitlets==5.13.0 # via # comm # ipykernel @@ -576,7 +570,7 @@ virtualenv==20.24.6 # via pre-commit watchfiles==0.21.0 # via aiohttp-devtools -wcwidth==0.2.8 +wcwidth==0.2.9 # via prompt-toolkit webcolors==1.13 # via jsonschema @@ -586,7 +580,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.6.4 # via jupyter-server -wheel==0.41.2 +wheel==0.41.3 # via -r hail/hail/python/dev/requirements.txt widgetsnbextension==4.0.9 # via ipywidgets diff --git a/hail/python/hailtop/config/deploy_config.py b/hail/python/hailtop/config/deploy_config.py index f13a00a57ef..d6b66c7adc3 100644 --- a/hail/python/hailtop/config/deploy_config.py +++ b/hail/python/hailtop/config/deploy_config.py @@ -62,7 +62,7 @@ def with_default_namespace(self, default_namespace): def with_location(self, location): return DeployConfig(location, self._default_namespace, self._domain) - def default_namespace(self): + def default_namespace(self) -> str: return self._default_namespace def location(self): diff --git a/hail/python/hailtop/pinned-requirements.txt b/hail/python/hailtop/pinned-requirements.txt index fe82b17a3b9..220a3440e19 100644 --- a/hail/python/hailtop/pinned-requirements.txt +++ b/hail/python/hailtop/pinned-requirements.txt @@ -22,7 +22,7 @@ azure-core==1.29.5 # azure-mgmt-core # azure-storage-blob # msrest -azure-identity==1.14.1 +azure-identity==1.15.0 # via -r hail/hail/python/hailtop/requirements.txt azure-mgmt-core==1.4.0 # via azure-mgmt-storage @@ -30,14 +30,14 @@ azure-mgmt-storage==20.1.0 # via -r hail/hail/python/hailtop/requirements.txt azure-storage-blob==12.18.3 # via -r hail/hail/python/hailtop/requirements.txt -boto3==1.28.69 +boto3==1.28.80 # via -r hail/hail/python/hailtop/requirements.txt -botocore==1.31.69 +botocore==1.31.80 # via # -r hail/hail/python/hailtop/requirements.txt # boto3 # s3transfer -cachetools==5.3.1 +cachetools==5.3.2 # via google-auth certifi==2023.7.22 # via @@ -47,7 +47,7 @@ cffi==1.16.0 # via # cryptography # pycares -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # aiohttp # requests @@ -68,7 +68,7 @@ frozenlist==1.4.0 # -r hail/hail/python/hailtop/requirements.txt # aiohttp # aiosignal -google-auth==2.23.3 +google-auth==2.23.4 # via # -r hail/hail/python/hailtop/requirements.txt # google-auth-oauthlib @@ -92,7 +92,7 @@ jmespath==1.0.1 # botocore jproperties==2.1.1 # via -r hail/hail/python/hailtop/requirements.txt -msal==1.24.1 +msal==1.25.0 # via # azure-identity # msal-extensions @@ -108,7 +108,7 @@ nest-asyncio==1.5.8 # via -r hail/hail/python/hailtop/requirements.txt oauthlib==3.2.2 # via requests-oauthlib -orjson==3.9.9 +orjson==3.9.10 # via -r hail/hail/python/hailtop/requirements.txt portalocker==2.8.2 # via msal-extensions diff --git a/hail/python/pinned-requirements.txt b/hail/python/pinned-requirements.txt index c50706333a6..db965402254 100644 --- a/hail/python/pinned-requirements.txt +++ b/hail/python/pinned-requirements.txt @@ -20,8 +20,6 @@ async-timeout==4.0.3 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # aiohttp -asyncinit==0.2.4 - # via -r hail/hail/python/requirements.txt attrs==23.1.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt @@ -39,7 +37,7 @@ azure-core==1.29.5 # azure-mgmt-core # azure-storage-blob # msrest -azure-identity==1.14.1 +azure-identity==1.15.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt @@ -57,17 +55,17 @@ azure-storage-blob==12.18.3 # -r hail/hail/python/hailtop/requirements.txt bokeh==3.3.0 # via -r hail/hail/python/requirements.txt -boto3==1.28.69 +boto3==1.28.80 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -botocore==1.31.69 +botocore==1.31.80 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt # boto3 # s3transfer -cachetools==5.3.1 +cachetools==5.3.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # google-auth @@ -81,7 +79,7 @@ cffi==1.16.0 # -c hail/hail/python/hailtop/pinned-requirements.txt # cryptography # pycares -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # aiohttp @@ -94,7 +92,7 @@ commonmark==0.9.1 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # rich -contourpy==1.1.1 +contourpy==1.2.0 # via bokeh cryptography==41.0.5 # via @@ -115,10 +113,9 @@ frozenlist==1.4.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt - # -r hail/hail/python/requirements.txt # aiohttp # aiosignal -google-auth==2.23.3 +google-auth==2.23.4 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt @@ -158,7 +155,7 @@ jproperties==2.1.1 # -r hail/hail/python/hailtop/requirements.txt markupsafe==2.1.3 # via jinja2 -msal==1.24.1 +msal==1.25.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # azure-identity @@ -191,7 +188,7 @@ oauthlib==3.2.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # requests-oauthlib -orjson==3.9.9 +orjson==3.9.10 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt @@ -199,7 +196,7 @@ packaging==23.2 # via # bokeh # plotly -pandas==2.1.1 +pandas==2.1.2 # via # -r hail/hail/python/requirements.txt # bokeh @@ -207,7 +204,7 @@ parsimonious==0.10.0 # via -r hail/hail/python/requirements.txt pillow==10.1.0 # via bokeh -plotly==5.17.0 +plotly==5.18.0 # via -r hail/hail/python/requirements.txt portalocker==2.8.2 # via @@ -335,7 +332,7 @@ uvloop==0.19.0 ; sys_platform != "win32" # -r hail/hail/python/hailtop/requirements.txt wrapt==1.15.0 # via deprecated -xyzservices==2023.10.0 +xyzservices==2023.10.1 # via bokeh yarl==1.9.2 # via diff --git a/hail/python/requirements.txt b/hail/python/requirements.txt index 35f4a3edfcf..9f8492a2a07 100644 --- a/hail/python/requirements.txt +++ b/hail/python/requirements.txt @@ -1,12 +1,10 @@ -c hailtop/pinned-requirements.txt -r hailtop/requirements.txt -asyncinit>=0.2.4,<0.3 avro>=1.10,<1.12 bokeh>=3,<4 decorator<5 Deprecated>=1.2.10,<1.3 -frozenlist>=1.3.1,<2 numpy<2 pandas>=2,<3 parsimonious<1 diff --git a/web_common/pinned-requirements.txt b/web_common/pinned-requirements.txt index 217f030afc6..7fa1d0ff664 100644 --- a/web_common/pinned-requirements.txt +++ b/web_common/pinned-requirements.txt @@ -30,7 +30,7 @@ attrs==23.1.0 # -c hail/web_common/../hail/python/dev/pinned-requirements.txt # -c hail/web_common/../hail/python/pinned-requirements.txt # aiohttp -charset-normalizer==3.3.1 +charset-normalizer==3.3.2 # via # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt From 9dc3495336a727d4ee5703b9f545135b1e78cb7b Mon Sep 17 00:00:00 2001 From: Patrick Schultz Date: Wed, 15 Nov 2023 08:24:59 -0500 Subject: [PATCH 04/48] [compiler] disentangle IR parser from bindings (#13990) Currently the binding structure is redundantly specified in two places: Binds.scala, and the parser. We need the binding structure in the parser to propagate the environment, so we can annotate `Ref` nodes (and a few other things) with their types. But we can't use Binds.scala because we don't yet have an IR. This PR removes environment maintenance from the parser by deferring type annotation to a separate pass (which is simple, because it can use the Binds.scala infrastructure). One consequence is that we can't assign types to nodes like `Ref` during parsing, which means we can't ask for the type of any node during parsing, and by extension we can't ask for types of children in IR node constructors. Instead, all typechecking logic is moved to the `TypeCheck` pass. Some benefits of this change: * The parser is simpler, as it doesn't have to maintain a typing environment. * Binds.scala is now the single source of truth on the binding structure of the IR. * Instead of typechecking being split in an ad-hoc way between IR constructors and the `TypeCheck` pass, all typechecking and type error reporting logic is in one place. * The parser parses a context-free grammar, no more and no less. If the input is gramatically correct, the parser succeeds. * We can round trip IR with type errors through the text representation. For instance, if we log an IR that fails TypeCheck, we can copy the IR from the log, parse it, then debug. This change was motivated by my work in progress to convert the parser to use the SSA grammar, which this should greatly simplify. I chose to make the type annotation pass after parsing mutate the IR in place (with the unfortunate exception of `Apply`, which can change into an `ApplyIR` or `ApplySpecial`. Do these really need to be separate nodes?). The type of a `Ref` node was already mutable to allow this sort of deferred annotation, and I've had to make a few other things mutable as well. Alternatively we could rebuild the entire IR to include type annotations, but I think the mutation is sufficiently well localized, and this is a common compiler pattern. This touches a lot of lines, but the high level changes are: * Remove the `refMap` from the `IRParserEnvironment`, and remove all code that modifies the typing environment from the parser. Nodes like `Ref` that need a type from the environment get types set to `null`, to be filled in after parsing. * Add `annotateTypes` pass to fill in type annotations from the environment. This is currently written in the parser, and always called after parsing. This means for the moment we can only parse type correct IR. But in the future we could move this to a separate stage of the compilation pipeline. * Move typechecking logic on relational nodes from the constructors to a `typecheck` method, which is called from the `TypeCheck` pass. * Make the `typ` field on IR nodes consistently lazy (or a def when the type is a child's type without modification). Before we sometimes did this for performance, but it's now required to avoid querying children's types during construction. * Make types mutable on `AggSignature` and `ComparisonOp`, so they can be filled in after parsing. * Ensure that the structure in `Binds.scala` satisfies the following invariant: to construct the typing environment of child `i`, we only need the types of children with index less than `i`. This was almost always satisfied already, and allows us to use the generic binds infrastucture in the pass to annotate types (where when visiting child `i`, we can only query types of already visited children). * Change the text representation of `TailLoop`/`Recur` to move the explicit type annotation from `Recur` to `TailLoop`. This is necessary to comply with the previous point. It's also consistent with `Ref`, where types of references are inferred from the environment. * Add explicit types in the text representation of `TableFilterIntervals` and `MatrixFilterIntervals`, where the types were needed during parsing and we can no longer get them from child types. * Fix IR examples used in parser tests to be type correct. * Add an explicit return type to the `Apply` node. Before the parser parsed an `Apply` node to one of `Apply/ApplySpecial/ApplyIR`; now it always parses to `Apply`, and the type annotation pass converts to the appropriate specialization, which needs the parsed return type. --- hail/python/hail/ir/ir.py | 4 +- hail/python/hail/ir/matrix_ir.py | 2 +- hail/python/hail/ir/table_ir.py | 2 +- hail/python/test/hail/test_ir.py | 18 +- .../main/scala/is/hail/backend/Backend.scala | 2 +- .../is/hail/backend/local/LocalBackend.scala | 2 +- .../is/hail/backend/spark/SparkBackend.scala | 2 +- .../main/scala/is/hail/expr/ir/AggOp.scala | 6 +- .../main/scala/is/hail/expr/ir/BaseIR.scala | 30 + .../main/scala/is/hail/expr/ir/Binds.scala | 16 +- .../scala/is/hail/expr/ir/BlockMatrixIR.scala | 74 ++- .../main/scala/is/hail/expr/ir/Children.scala | 4 +- .../scala/is/hail/expr/ir/ComparisonOp.scala | 77 ++- .../src/main/scala/is/hail/expr/ir/Copy.scala | 8 +- .../src/main/scala/is/hail/expr/ir/Emit.scala | 2 +- .../hail/expr/ir/ExtractIntervalFilters.scala | 2 +- hail/src/main/scala/is/hail/expr/ir/IR.scala | 82 ++- .../is/hail/expr/ir/InTailPosition.scala | 2 +- .../scala/is/hail/expr/ir/InferType.scala | 6 +- .../scala/is/hail/expr/ir/Interpret.scala | 2 +- .../main/scala/is/hail/expr/ir/MatrixIR.scala | 168 +++--- .../scala/is/hail/expr/ir/NestingDepth.scala | 2 +- .../is/hail/expr/ir/NormalizeNames.scala | 4 +- .../main/scala/is/hail/expr/ir/Parser.scala | 547 +++++++++--------- .../main/scala/is/hail/expr/ir/Pretty.scala | 16 +- .../is/hail/expr/ir/PruneDeadFields.scala | 2 +- .../scala/is/hail/expr/ir/Requiredness.scala | 6 +- .../main/scala/is/hail/expr/ir/Simplify.scala | 10 +- .../main/scala/is/hail/expr/ir/TableIR.scala | 255 ++++---- .../scala/is/hail/expr/ir/TypeCheck.scala | 65 +-- .../analyses/ControlFlowPreventsSplit.scala | 2 +- .../hail/expr/ir/analyses/SemanticHash.scala | 2 +- .../is/hail/expr/ir/functions/Functions.scala | 8 +- .../hail/expr/ir/lowering/LowerTableIR.scala | 272 +++++---- .../main/scala/is/hail/expr/ir/package.scala | 10 +- .../scala/is/hail/types/virtual/TStruct.scala | 4 +- .../is/hail/expr/ir/ForwardLetsSuite.scala | 2 +- .../test/scala/is/hail/expr/ir/IRSuite.scala | 30 +- .../scala/is/hail/expr/ir/MatrixIRSuite.scala | 6 +- .../is/hail/expr/ir/RequirednessSuite.scala | 9 +- .../is/hail/expr/ir/table/TableGenSuite.scala | 12 +- 41 files changed, 967 insertions(+), 808 deletions(-) diff --git a/hail/python/hail/ir/ir.py b/hail/python/hail/ir/ir.py index bb1716eb613..5b54f1dd482 100644 --- a/hail/python/hail/ir/ir.py +++ b/hail/python/hail/ir/ir.py @@ -454,7 +454,7 @@ def copy(self, *children): return TailLoop(self.name, [(n, v) for (n, _), v in zip(self.params, params)], body) def head_str(self): - return f'{escape_id(self.name)} ({" ".join([escape_id(n) for n, _ in self.params])})' + return f'{escape_id(self.name)} ({" ".join([escape_id(n) for n, _ in self.params])}) {self.body.typ._parsable_string()}' def _eq(self, other): return self.name == other.name @@ -493,7 +493,7 @@ def copy(self, args): return Recur(self.name, args, self.return_type) def head_str(self): - return f'{escape_id(self.name)} {self.return_type._parsable_string()}' + return f'{escape_id(self.name)}' def _eq(self, other): return other.name == self.name diff --git a/hail/python/hail/ir/matrix_ir.py b/hail/python/hail/ir/matrix_ir.py index 44fcc8f44b6..06644337b19 100644 --- a/hail/python/hail/ir/matrix_ir.py +++ b/hail/python/hail/ir/matrix_ir.py @@ -1183,7 +1183,7 @@ def _handle_randomness(self, row_uid_field_name, col_uid_field_name): return MatrixFilterIntervals(child, self.intervals, self.point_type, self.keep) def head_str(self): - return f'{dump_json(hl.tarray(hl.tinterval(self.point_type))._convert_to_json(self.intervals))} {self.keep}' + return f'{self.child.typ.row_key_type._parsable_string()} {dump_json(hl.tarray(hl.tinterval(self.point_type))._convert_to_json(self.intervals))} {self.keep}' def _eq(self, other): return self.intervals == other.intervals and self.point_type == other.point_type and self.keep == other.keep diff --git a/hail/python/hail/ir/table_ir.py b/hail/python/hail/ir/table_ir.py index 38fcaae51c3..22c0530c35a 100644 --- a/hail/python/hail/ir/table_ir.py +++ b/hail/python/hail/ir/table_ir.py @@ -881,7 +881,7 @@ def _handle_randomness(self, uid_field_name): return TableFilterIntervals(self.child.handle_randomness(uid_field_name), self.intervals, self.point_type, self.keep) def head_str(self): - return f'{dump_json(hl.tarray(hl.tinterval(self.point_type))._convert_to_json(self.intervals))} {self.keep}' + return f'{self.child.typ.key_type._parsable_string()} {dump_json(hl.tarray(hl.tinterval(self.point_type))._convert_to_json(self.intervals))} {self.keep}' def _eq(self, other): return self.intervals == other.intervals and self.point_type == other.point_type and self.keep == other.keep diff --git a/hail/python/test/hail/test_ir.py b/hail/python/test/hail/test_ir.py index b2a53f670b7..b2340e99e33 100644 --- a/hail/python/test/hail/test_ir.py +++ b/hail/python/test/hail/test_ir.py @@ -22,8 +22,9 @@ def value_irs_env(self): 'mat': hl.tndarray(hl.tfloat64, 2), 'aa': hl.tarray(hl.tarray(hl.tint32)), 'sta': hl.tstream(hl.tarray(hl.tint32)), - 'da': hl.tarray(hl.ttuple(hl.tint32, hl.tstr)), - 'nd': hl.tndarray(hl.tfloat64, 1), + 'sts': hl.tstream(hl.tstruct(x=hl.tint32, y=hl.tint64, z=hl.tfloat64)), + 'da': hl.tstream(hl.ttuple(hl.tint32, hl.tstr)), + 'nd': hl.tndarray(hl.tfloat64, 2), 'v': hl.tint32, 's': hl.tstruct(x=hl.tint32, y=hl.tint64, z=hl.tfloat64), 't': hl.ttuple(hl.tint32, hl.tint64, hl.tfloat64), @@ -42,6 +43,7 @@ def value_irs(self): mat = ir.Ref('mat') aa = ir.Ref('aa', env['aa']) sta = ir.Ref('sta', env['sta']) + sts = ir.Ref('sts', env['sts']) da = ir.Ref('da', env['da']) nd = ir.Ref('nd', env['nd']) v = ir.Ref('v', env['v']) @@ -77,7 +79,7 @@ def aggregate(x): ir.ArrayRef(a, i), ir.ArrayLen(a), ir.ArraySort(ir.ToStream(a), 'l', 'r', ir.ApplyComparisonOp("LT", ir.Ref('l', hl.tint32), ir.Ref('r', hl.tint32))), - ir.ToSet(a), + ir.ToSet(st), ir.ToDict(da), ir.ToArray(st), ir.CastToArray(ir.NA(hl.tset(hl.tint32))), @@ -89,17 +91,17 @@ def aggregate(x): ir.NDArrayRef(nd, [ir.I64(1), ir.I64(2)]), ir.NDArrayMap(nd, 'v', v), ir.NDArrayMatMul(nd, nd), - ir.LowerBoundOnOrderedCollection(a, i, True), + ir.LowerBoundOnOrderedCollection(a, i, False), ir.GroupByKey(da), - ir.RNGSplit(rngState, ir.MakeTuple([ir.I64(1), ir.MakeTuple([ir.I64(2), ir.I64(3)])])), + ir.RNGSplit(rngState, ir.MakeTuple([ir.I64(1), ir.I64(2), ir.I64(3)])), ir.StreamMap(st, 'v', v), ir.StreamZip([st, st], ['a', 'b'], ir.TrueIR(), 'ExtendNA'), - ir.StreamFilter(st, 'v', v), + ir.StreamFilter(st, 'v', c), ir.StreamFlatMap(sta, 'v', ir.ToStream(v)), ir.StreamFold(st, ir.I32(0), 'x', 'v', v), ir.StreamScan(st, ir.I32(0), 'x', 'v', v), - ir.StreamWhiten(whitenStream, "newChunk", "prevWindow", 0, 0, 0, 0, False), - ir.StreamJoinRightDistinct(st, st, ['k'], ['k'], 'l', 'r', ir.I32(1), "left"), + ir.StreamWhiten(whitenStream, "newChunk", "prevWindow", 1, 1, 1, 1, False), + ir.StreamJoinRightDistinct(sts, sts, ['x'], ['x'], 'l', 'r', ir.I32(1), "left"), ir.StreamFor(st, 'v', ir.Void()), aggregate(ir.AggFilter(ir.TrueIR(), ir.I32(0), False)), aggregate(ir.AggExplode(ir.StreamRange(ir.I32(0), ir.I32(2), ir.I32(1)), 'x', ir.I32(0), False)), diff --git a/hail/src/main/scala/is/hail/backend/Backend.scala b/hail/src/main/scala/is/hail/backend/Backend.scala index 4fb3e28e7de..0f77f6d91ed 100644 --- a/hail/src/main/scala/is/hail/backend/Backend.scala +++ b/hail/src/main/scala/is/hail/backend/Backend.scala @@ -151,7 +151,7 @@ abstract class Backend { def withExecuteContext[T](methodName: String): (ExecuteContext => T) => T final def valueType(s: String): Array[Byte] = { - withExecuteContext("tableType") { ctx => + withExecuteContext("valueType") { ctx => val v = IRParser.parse_value_ir(s, IRParserEnvironment(ctx, irMap = persistedIR.toMap)) v.typ.toString.getBytes(StandardCharsets.UTF_8) } diff --git a/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala b/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala index 57270f3b75d..b133f622bd4 100644 --- a/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala +++ b/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala @@ -296,7 +296,7 @@ class LocalBackend( def parse_value_ir(s: String, refMap: java.util.Map[String, String]): IR = { ExecutionTimer.logTime("LocalBackend.parse_value_ir") { timer => withExecuteContext(timer) { ctx => - IRParser.parse_value_ir(s, IRParserEnvironment(ctx, BindingEnv.eval(refMap.asScala.toMap.mapValues(IRParser.parseType).toSeq: _*), persistedIR.toMap)) + IRParser.parse_value_ir(s, IRParserEnvironment(ctx, persistedIR.toMap), BindingEnv.eval(refMap.asScala.toMap.mapValues(IRParser.parseType).toSeq: _*)) } } } diff --git a/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala b/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala index f1ddfb72172..7b48b641487 100644 --- a/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala +++ b/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala @@ -670,7 +670,7 @@ class SparkBackend( def parse_value_ir(s: String, refMap: java.util.Map[String, String]): IR = { ExecutionTimer.logTime("SparkBackend.parse_value_ir") { timer => withExecuteContext(timer) { ctx => - IRParser.parse_value_ir(s, IRParserEnvironment(ctx, BindingEnv.eval(refMap.asScala.toMap.mapValues(IRParser.parseType).toSeq: _*), irMap = persistedIR.toMap)) + IRParser.parse_value_ir(s, IRParserEnvironment(ctx, irMap = persistedIR.toMap), BindingEnv.eval(refMap.asScala.toMap.mapValues(IRParser.parseType).toSeq: _*)) } } } diff --git a/hail/src/main/scala/is/hail/expr/ir/AggOp.scala b/hail/src/main/scala/is/hail/expr/ir/AggOp.scala index 3ffb10e9228..81f500a4288 100644 --- a/hail/src/main/scala/is/hail/expr/ir/AggOp.scala +++ b/hail/src/main/scala/is/hail/expr/ir/AggOp.scala @@ -26,9 +26,9 @@ object AggSignature { case class AggSignature( op: AggOp, - initOpArgs: Seq[Type], - seqOpArgs: Seq[Type]) { - + var initOpArgs: Seq[Type], + var seqOpArgs: Seq[Type] +) { // only to be used with virtual non-nested signatures on ApplyAggOp and ApplyScanOp lazy val returnType: Type = Extract.getResultType(this) } diff --git a/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala b/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala index 7fc1eb4d385..b3008b7a6c9 100644 --- a/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala @@ -1,6 +1,7 @@ package is.hail.expr.ir import is.hail.types.BaseType +import is.hail.types.virtual.Type import is.hail.utils.StackSafe._ import is.hail.utils._ @@ -50,4 +51,33 @@ abstract class BaseIR { copy(newChildren) } } + + def forEachChildWithEnv(env: BindingEnv[Type])(f: (BaseIR, BindingEnv[Type]) => Unit): Unit = { + childrenSeq.view.zipWithIndex.foreach { case (child, i) => + val childEnv = ChildBindings(this, i, env) + f(child, childEnv) + } + } + + def mapChildrenWithEnv(env: BindingEnv[Type])(f: (BaseIR, BindingEnv[Type]) => BaseIR): BaseIR = { + val newChildren = childrenSeq.toArray + var res = this + for (i <- newChildren.indices) { + val childEnv = ChildBindings(res, i, env) + val child = newChildren(i) + val newChild = f(child, childEnv) + if (!(newChild eq child)) { + newChildren(i) = newChild + res = res.copy(newChildren) + } + } + res + } + + def forEachChildWithEnvStackSafe(env: BindingEnv[Type])(f: (BaseIR, Int, BindingEnv[Type]) => StackFrame[Unit]): StackFrame[Unit] = { + childrenSeq.view.zipWithIndex.foreachRecur { case (child, i) => + val childEnv = ChildBindings(this, i, env) + f(child, i, childEnv) + } + } } diff --git a/hail/src/main/scala/is/hail/expr/ir/Binds.scala b/hail/src/main/scala/is/hail/expr/ir/Binds.scala index b095aaa735e..1c6f3049b5a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Binds.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Binds.scala @@ -11,11 +11,13 @@ object Binds { object Bindings { private val empty: Array[(String, Type)] = Array() + // A call to Bindings(x, i) may only query the types of children with + // index < i def apply(x: BaseIR, i: Int): Iterable[(String, Type)] = x match { case Let(name, value, _) => if (i == 1) Array(name -> value.typ) else empty - case TailLoop(name, args, body) => if (i == args.length) + case TailLoop(name, args, resultType, _) => if (i == args.length) args.map { case (name, ir) => name -> ir.typ } :+ - name -> TTuple(TTuple(args.map(_._2.typ): _*), body.typ) else empty + name -> TTuple(TTuple(args.map(_._2.typ): _*), resultType) else empty case StreamMap(a, name, _) => if (i == 1) Array(name -> tcoerce[TStream](a.typ).elementType) else empty case StreamZip(as, names, _, _, _) => if (i == as.length) names.zip(as.map(a => tcoerce[TStream](a.typ).elementType)) else empty case StreamZipJoin(as, key, curKey, curVals, _) => @@ -26,14 +28,14 @@ object Bindings { else empty case StreamZipJoinProducers(contexts, ctxName, makeProducer, key, curKey, curVals, _) => - val contextType = TIterable.elementType(contexts.typ) - val eltType = tcoerce[TStruct](tcoerce[TStream](makeProducer.typ).elementType) - if (i == 1) + if (i == 1) { + val contextType = TIterable.elementType(contexts.typ) Array(ctxName -> contextType) - else if (i == 2) + } else if (i == 2) { + val eltType = tcoerce[TStruct](tcoerce[TStream](makeProducer.typ).elementType) Array(curKey -> eltType.typeAfterSelectNames(key), curVals -> TArray(eltType)) - else + } else empty case StreamFor(a, name, _) => if (i == 1) Array(name -> tcoerce[TStream](a.typ).elementType) else empty case StreamFlatMap(a, name, _) => if (i == 1) Array(name -> tcoerce[TStream](a.typ).elementType) else empty diff --git a/hail/src/main/scala/is/hail/expr/ir/BlockMatrixIR.scala b/hail/src/main/scala/is/hail/expr/ir/BlockMatrixIR.scala index fe4099ccfb6..b56a02edbc2 100644 --- a/hail/src/main/scala/is/hail/expr/ir/BlockMatrixIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/BlockMatrixIR.scala @@ -68,6 +68,8 @@ abstract sealed class BlockMatrixIR extends BaseIR { def copy(newChildren: IndexedSeq[BaseIR]): BlockMatrixIR def blockCostIsLinear: Boolean + + def typecheck(): Unit = {} } case class BlockMatrixRead(reader: BlockMatrixReader) extends BlockMatrixIR { @@ -246,8 +248,11 @@ case class BlockMatrixPersistReader(id: String, typ: BlockMatrixType) extends Bl } case class BlockMatrixMap(child: BlockMatrixIR, eltName: String, f: IR, needsDense: Boolean) extends BlockMatrixIR { - override lazy val typ: BlockMatrixType = child.typ - assert(!needsDense || !typ.isSparse) + override def typecheck(): Unit = { + assert(!(needsDense && child.typ.isSparse)) + } + + override def typ: BlockMatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child, f) @@ -370,11 +375,19 @@ case object NeedsDense extends SparsityStrategy { } } -case class BlockMatrixMap2(left: BlockMatrixIR, right: BlockMatrixIR, leftName: String, rightName: String, f: IR, sparsityStrategy: SparsityStrategy) extends BlockMatrixIR { - assert( - left.typ.nRows == right.typ.nRows && - left.typ.nCols == right.typ.nCols && - left.typ.blockSize == right.typ.blockSize) +case class BlockMatrixMap2( + left: BlockMatrixIR, + right: BlockMatrixIR, + leftName: String, + rightName: String, + f: IR, + sparsityStrategy: SparsityStrategy +) extends BlockMatrixIR { + override def typecheck(): Unit = { + assert(left.typ.nRows == right.typ.nRows) + assert(left.typ.nCols == right.typ.nCols) + assert(left.typ.blockSize == right.typ.blockSize) + } override lazy val typ: BlockMatrixType = left.typ.copy(sparsity = sparsityStrategy.mergeSparsity(left.typ.sparsity, right.typ.sparsity)) @@ -477,7 +490,6 @@ case class BlockMatrixMap2(left: BlockMatrixIR, right: BlockMatrixIR, leftName: } case class BlockMatrixDot(left: BlockMatrixIR, right: BlockMatrixIR) extends BlockMatrixIR { - override lazy val typ: BlockMatrixType = { val blockSize = left.typ.blockSize val (lRows, lCols) = BlockMatrixIR.tensorShapeToMatrixShape(left) @@ -531,20 +543,24 @@ case class BlockMatrixBroadcast( child: BlockMatrixIR, inIndexExpr: IndexedSeq[Int], shape: IndexedSeq[Long], - blockSize: Int) extends BlockMatrixIR { + blockSize: Int +) extends BlockMatrixIR { val blockCostIsLinear: Boolean = child.blockCostIsLinear assert(shape.length == 2) assert(inIndexExpr.length <= 2 && inIndexExpr.forall(x => x == 0 || x == 1)) - val (nRows, nCols) = BlockMatrixIR.tensorShapeToMatrixShape(child) - val childMatrixShape = IndexedSeq(nRows, nCols) - assert(inIndexExpr.zipWithIndex.forall({ case (out: Int, in: Int) => - !child.typ.shape.contains(in) || childMatrixShape(in) == shape(out) - })) + override def typecheck(): Unit = { + val (nRows, nCols) = BlockMatrixIR.tensorShapeToMatrixShape(child) + val childMatrixShape = IndexedSeq(nRows, nCols) + + assert(inIndexExpr.zipWithIndex.forall({ case (out: Int, in: Int) => + !child.typ.shape.contains(in) || childMatrixShape(in) == shape(out) + })) + } - override val typ: BlockMatrixType = { + override lazy val typ: BlockMatrixType = { val (tensorShape, isRowVector) = BlockMatrixIR.matrixShapeToTensorShape(shape(0), shape(1)) val nRowBlocks = BlockMatrixType.numBlocks(shape(0), blockSize) val nColBlocks = BlockMatrixType.numBlocks(shape(1), blockSize) @@ -626,11 +642,12 @@ case class BlockMatrixBroadcast( case class BlockMatrixAgg( child: BlockMatrixIR, - axesToSumOut: IndexedSeq[Int]) extends BlockMatrixIR { + axesToSumOut: IndexedSeq[Int] +) extends BlockMatrixIR { val blockCostIsLinear: Boolean = child.blockCostIsLinear - assert(axesToSumOut.length > 0) + assert(axesToSumOut.nonEmpty) override lazy val typ: BlockMatrixType = { val matrixShape = BlockMatrixIR.tensorShapeToMatrixShape(child) @@ -675,21 +692,22 @@ case class BlockMatrixAgg( case class BlockMatrixFilter( child: BlockMatrixIR, - indices: Array[Array[Long]]) extends BlockMatrixIR { + indices: Array[Array[Long]] +) extends BlockMatrixIR { assert(indices.length == 2) val blockCostIsLinear: Boolean = child.blockCostIsLinear private[this] val Array(keepRow, keepCol) = indices - private[this] val blockSize = child.typ.blockSize - lazy val keepRowPartitioned: Array[Array[Long]] = keepRow.grouped(blockSize).toArray - lazy val keepColPartitioned: Array[Array[Long]] = keepCol.grouped(blockSize).toArray + override lazy val typ: BlockMatrixType = { + val blockSize = child.typ.blockSize + val keepRowPartitioned: Array[Array[Long]] = keepRow.grouped(blockSize).toArray + val keepColPartitioned: Array[Array[Long]] = keepCol.grouped(blockSize).toArray - lazy val rowBlockDependents: Array[Array[Int]] = child.typ.rowBlockDependents(keepRowPartitioned) - lazy val colBlockDependents: Array[Array[Int]] = child.typ.colBlockDependents(keepColPartitioned) + val rowBlockDependents: Array[Array[Int]] = child.typ.rowBlockDependents(keepRowPartitioned) + val colBlockDependents: Array[Array[Int]] = child.typ.colBlockDependents(keepColPartitioned) - override lazy val typ: BlockMatrixType = { val childTensorShape = child.typ.shape val childMatrixShape = (childTensorShape, child.typ.isRowVector) match { case (IndexedSeq(vectorLength), true) => IndexedSeq(1, vectorLength) @@ -918,7 +936,11 @@ case class BlockMatrixSlice(child: BlockMatrixIR, slices: IndexedSeq[IndexedSeq[ case class ValueToBlockMatrix( child: IR, shape: IndexedSeq[Long], - blockSize: Int) extends BlockMatrixIR { + blockSize: Int +) extends BlockMatrixIR { + override def typecheck(): Unit = { + assert(child.typ.isInstanceOf[TArray] || child.typ.isInstanceOf[TNDArray] || child.typ == TFloat64) + } assert(shape.length == 2) @@ -984,7 +1006,7 @@ case class BlockMatrixRandom( } case class RelationalLetBlockMatrix(name: String, value: IR, body: BlockMatrixIR) extends BlockMatrixIR { - override lazy val typ: BlockMatrixType = body.typ + override def typ: BlockMatrixType = body.typ def childrenSeq: IndexedSeq[BaseIR] = Array(value, body) diff --git a/hail/src/main/scala/is/hail/expr/ir/Children.scala b/hail/src/main/scala/is/hail/expr/ir/Children.scala index 7aec32d3d53..d81ab5c764e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Children.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Children.scala @@ -40,7 +40,7 @@ object Children { Array(value, body) case AggLet(name, value, body, _) => Array(value, body) - case TailLoop(_, args, body) => + case TailLoop(_, args, _, body) => args.map(_._2).toFastSeq :+ body case Recur(_, args, _) => args.toFastSeq @@ -227,7 +227,7 @@ object Children { case Trap(child) => Array(child) case ConsoleLog(message, result) => Array(message, result) - case ApplyIR(_, _, args, _) => + case ApplyIR(_, _, args, _, _) => args.toFastSeq case Apply(_, _, args, _, _) => args.toFastSeq diff --git a/hail/src/main/scala/is/hail/expr/ir/ComparisonOp.scala b/hail/src/main/scala/is/hail/expr/ir/ComparisonOp.scala index b406445130f..f9d83ebb980 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ComparisonOp.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ComparisonOp.scala @@ -14,28 +14,21 @@ object ComparisonOp { if (lt != rt) throw new RuntimeException(s"Cannot compare types $lt and $rt") - val fromStringAndTypes: PartialFunction[(String, Type, Type), ComparisonOp[_]] = { - case ("==" | "EQ", t1, t2) => - checkCompatible(t1, t2) - EQ(t1, t2) - case ("!=" | "NEQ", t1, t2) => - checkCompatible(t1, t2) - NEQ(t1, t2) - case (">=" | "GTEQ", t1, t2) => - checkCompatible(t1, t2) - GTEQ(t1, t2) - case ("<=" | "LTEQ", t1, t2) => - checkCompatible(t1, t2) - LTEQ(t1, t2) - case (">" | "GT", t1, t2) => - checkCompatible(t1, t2) - GT(t1, t2) - case ("<" | "LT", t1, t2) => - checkCompatible(t1, t2) - LT(t1, t2) - case ("Compare", t1, t2) => - checkCompatible(t1, t2) - Compare(t1, t2) + val fromString: PartialFunction[String, ComparisonOp[_]] = { + case "==" | "EQ" => + EQ(null, null) + case "!=" | "NEQ" => + NEQ(null, null) + case ">=" | "GTEQ" => + GTEQ(null, null) + case "<=" | "LTEQ" => + LTEQ(null, null) + case ">" | "GT" => + GT(null, null) + case "<" | "LT" => + LT(null, null) + case "Compare" => + Compare(null, null) } def negate(op: ComparisonOp[Boolean]): ComparisonOp[Boolean] = { @@ -84,33 +77,56 @@ sealed trait ComparisonOp[ReturnType] { } def render(): is.hail.utils.prettyPrint.Doc = Pretty.prettyClass(this) + + def copy(t1: Type, t2: Type): ComparisonOp[ReturnType] } -case class GT(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Gt() } +case class GT(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Gt() + override def copy(t1: Type = t1, t2: Type = t2): GT = GT(t1, t2) +} object GT { def apply(typ: Type): GT = GT(typ, typ) } -case class GTEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Gteq() } +case class GTEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Gteq() + override def copy(t1: Type = t1, t2: Type = t2): GTEQ = GTEQ(t1, t2) +} object GTEQ { def apply(typ: Type): GTEQ = GTEQ(typ, typ) } -case class LTEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Lteq() } +case class LTEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Lteq() + override def copy(t1: Type = t1, t2: Type = t2): LTEQ = LTEQ(t1, t2) +} object LTEQ { def apply(typ: Type): LTEQ = LTEQ(typ, typ) } -case class LT(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Lt() } +case class LT(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Lt() + override def copy(t1: Type = t1, t2: Type = t2): LT = LT(t1, t2) +} object LT { def apply(typ: Type): LT = LT(typ, typ) } -case class EQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Equiv() } +case class EQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Equiv() + override def copy(t1: Type = t1, t2: Type = t2): EQ = EQ(t1, t2) +} object EQ { def apply(typ: Type): EQ = EQ(typ, typ) } -case class NEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Neq() } +case class NEQ(t1: Type, t2: Type) extends ComparisonOp[Boolean] { + val op: CodeOrdering.Op = CodeOrdering.Neq() + override def copy(t1: Type = t1, t2: Type = t2): NEQ = NEQ(t1, t2) +} object NEQ { def apply(typ: Type): NEQ = NEQ(typ, typ) } case class EQWithNA(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Equiv() override val strict: Boolean = false + override def copy(t1: Type = t1, t2: Type = t2): EQWithNA = EQWithNA(t1, t2) } object EQWithNA { def apply(typ: Type): EQWithNA = EQWithNA(typ, typ) } case class NEQWithNA(t1: Type, t2: Type) extends ComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.Neq() override val strict: Boolean = false + override def copy(t1: Type = t1, t2: Type = t2): NEQWithNA = NEQWithNA(t1, t2) } object NEQWithNA { def apply(typ: Type): NEQWithNA = NEQWithNA(typ, typ) } case class Compare(t1: Type, t2: Type) extends ComparisonOp[Int] { override val strict: Boolean = false val op: CodeOrdering.Op = CodeOrdering.Compare() + override def copy(t1: Type = t1, t2: Type = t2): Compare = Compare(t1, t2) } object Compare { def apply(typ: Type): Compare = Compare(typ, typ) } @@ -126,28 +142,33 @@ trait StructComparisonOp[T] extends ComparisonOp[T] { case class StructCompare(t1: Type, t2: Type, sortFields: Array[SortField]) extends StructComparisonOp[Int] { val op: CodeOrdering.Op = CodeOrdering.StructCompare() override val strict: Boolean = false + override def copy(t1: Type = t1, t2: Type = t2): StructCompare = StructCompare(t1, t2, sortFields) } case class StructLT(t1: Type, t2: Type, sortFields: Array[SortField]) extends StructComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.StructLt() + override def copy(t1: Type = t1, t2: Type = t2): StructLT = StructLT(t1, t2, sortFields) } object StructLT { def apply(typ: Type, sortFields: IndexedSeq[SortField]): StructLT = StructLT(typ, typ, sortFields.toArray) } case class StructLTEQ(t1: Type, t2: Type, sortFields: Array[SortField]) extends StructComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.StructLteq() + override def copy(t1: Type = t1, t2: Type = t2): StructLTEQ = StructLTEQ(t1, t2, sortFields) } object StructLTEQ { def apply(typ: Type, sortFields: IndexedSeq[SortField]): StructLTEQ = StructLTEQ(typ, typ, sortFields.toArray) } case class StructGT(t1: Type, t2: Type, sortFields: Array[SortField]) extends StructComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.StructGt() + override def copy(t1: Type = t1, t2: Type = t2): StructGT = StructGT(t1, t2, sortFields) } object StructGT { def apply(typ: Type, sortFields: IndexedSeq[SortField]): StructGT = StructGT(typ, typ, sortFields.toArray) } case class StructGTEQ(t1: Type, t2: Type, sortFields: Array[SortField]) extends StructComparisonOp[Boolean] { val op: CodeOrdering.Op = CodeOrdering.StructGteq() + override def copy(t1: Type = t1, t2: Type = t2): StructGTEQ = StructGTEQ(t1, t2, sortFields) } object StructGTEQ { def apply(typ: Type, sortFields: IndexedSeq[SortField]): StructGTEQ = StructGTEQ(typ, typ, sortFields.toArray) } diff --git a/hail/src/main/scala/is/hail/expr/ir/Copy.scala b/hail/src/main/scala/is/hail/expr/ir/Copy.scala index 5e1bd4a4030..93f6ef4913b 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Copy.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Copy.scala @@ -40,9 +40,9 @@ object Copy { case AggLet(name, _, _, isScan) => assert(newChildren.length == 2) AggLet(name, newChildren(0).asInstanceOf[IR], newChildren(1).asInstanceOf[IR], isScan) - case TailLoop(name, params, _) => + case TailLoop(name, params, resultType, _) => assert(newChildren.length == params.length + 1) - TailLoop(name, params.map(_._1).zip(newChildren.init.map(_.asInstanceOf[IR])), newChildren.last.asInstanceOf[IR]) + TailLoop(name, params.map(_._1).zip(newChildren.init.map(_.asInstanceOf[IR])), resultType, newChildren.last.asInstanceOf[IR]) case Recur(name, args, t) => assert(newChildren.length == args.length) Recur(name, newChildren.map(_.asInstanceOf[IR]), t) @@ -339,8 +339,8 @@ object Copy { case ConsoleLog(message, result) => assert(newChildren.length == 2) ConsoleLog(newChildren(0).asInstanceOf[IR], newChildren(1).asInstanceOf[IR]) - case x@ApplyIR(fn, typeArgs, args, errorID) => - val r = ApplyIR(fn, typeArgs, newChildren.map(_.asInstanceOf[IR]), errorID) + case x@ApplyIR(fn, typeArgs, args, rt, errorID) => + val r = ApplyIR(fn, typeArgs, newChildren.map(_.asInstanceOf[IR]), rt, errorID) r.conversion = x.conversion r.inline = x.inline r diff --git a/hail/src/main/scala/is/hail/expr/ir/Emit.scala b/hail/src/main/scala/is/hail/expr/ir/Emit.scala index ca9c1cefcd1..ecfc03297ef 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Emit.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Emit.scala @@ -2368,7 +2368,7 @@ class Emit[C]( } } - case x@TailLoop(name, args, body) => + case x@TailLoop(name, args, _, body) => val loopStartLabel = CodeLabel() val accTypes = ctx.req.lookupState(x).zip(args.map(_._2.typ)) diff --git a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala index ccdb45572a8..4619fe17287 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala @@ -755,7 +755,7 @@ class ExtractIntervalFilters(ctx: ExecuteContext, keyType: TStruct) { .restrict(keySet) case (IsNA(_), Seq(b: BoolValue)) => b.isNA.restrict(keySet) // collection contains - case (ApplyIR("contains", _, _, _), Seq(ConstantValue(collectionVal), queryVal)) if literalSizeOkay(collectionVal) => + case (ApplyIR("contains", _, _, _, _), Seq(ConstantValue(collectionVal), queryVal)) if literalSizeOkay(collectionVal) => if (collectionVal == null) { BoolValue.allNA(keySet) } else queryVal match { diff --git a/hail/src/main/scala/is/hail/expr/ir/IR.scala b/hail/src/main/scala/is/hail/expr/ir/IR.scala index 54367a89db9..ccaba5925d5 100644 --- a/hail/src/main/scala/is/hail/expr/ir/IR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/IR.scala @@ -196,22 +196,27 @@ sealed abstract class BaseRef extends IR with TrivialIR { def _typ: Type } -final case class Ref(name: String, var _typ: Type) extends BaseRef +final case class Ref(name: String, var _typ: Type) extends BaseRef { + override def typ: Type = { + assert(_typ != null) + _typ + } +} // Recur can't exist outside of loop // Loops can be nested, but we can't call outer loops in terms of inner loops so there can only be one loop "active" in a given context -final case class TailLoop(name: String, params: IndexedSeq[(String, IR)], body: IR) extends IR { +final case class TailLoop(name: String, params: IndexedSeq[(String, IR)], resultType: Type, body: IR) extends IR { lazy val paramIdx: Map[String, Int] = params.map(_._1).zipWithIndex.toMap } -final case class Recur(name: String, args: IndexedSeq[IR], _typ: Type) extends BaseRef +final case class Recur(name: String, args: IndexedSeq[IR], var _typ: Type) extends BaseRef final case class RelationalLet(name: String, value: IR, body: IR) extends IR final case class RelationalRef(name: String, _typ: Type) extends BaseRef final case class ApplyBinaryPrimOp(op: BinaryOp, l: IR, r: IR) extends IR final case class ApplyUnaryPrimOp(op: UnaryOp, x: IR) extends IR -final case class ApplyComparisonOp(op: ComparisonOp[_], l: IR, r: IR) extends IR +final case class ApplyComparisonOp(var op: ComparisonOp[_], l: IR, r: IR) extends IR object MakeArray { def apply(args: IR*): MakeArray = { @@ -319,8 +324,7 @@ final case class StreamLen(a: IR) extends IR final case class StreamGrouped(a: IR, groupSize: IR) extends IR final case class StreamGroupByKey(a: IR, key: IndexedSeq[String], missingEqual: Boolean) extends IR -final case class StreamMap(a: IR, name: String, body: IR) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) +final case class StreamMap(a: IR, name: String, body: IR) extends TypedIR[TStream] { def elementTyp: Type = typ.elementType } @@ -354,34 +358,30 @@ object ArrayZipBehavior extends Enumeration { val ExtendNA: Value = Value(3) } -final case class StreamZip(as: IndexedSeq[IR], names: IndexedSeq[String], body: IR, behavior: ArrayZipBehavior, - errorID: Int = ErrorIDs.NO_ERROR) extends IR { - lazy val nameIdx: Map[String, Int] = names.zipWithIndex.toMap - override def typ: TStream = tcoerce[TStream](super.typ) -} -final case class StreamMultiMerge(as: IndexedSeq[IR], key: IndexedSeq[String]) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) -} +final case class StreamZip( + as: IndexedSeq[IR], names: IndexedSeq[String], body: IR, behavior: ArrayZipBehavior, + errorID: Int = ErrorIDs.NO_ERROR +) extends TypedIR[TStream] + +final case class StreamMultiMerge(as: IndexedSeq[IR], key: IndexedSeq[String]) extends TypedIR[TStream] -final case class StreamZipJoinProducers(contexts: IR, ctxName: String, makeProducer: IR, - key: IndexedSeq[String], curKey: String, curVals: String, joinF: IR) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) -} + +final case class StreamZipJoinProducers( + contexts: IR, ctxName: String, makeProducer: IR, key: IndexedSeq[String], + curKey: String, curVals: String, joinF: IR +) extends TypedIR[TStream] /** * The StreamZipJoin node assumes that input streams have distinct keys. If input streams * do not have distinct keys, the key that is included in the result is undefined, but * is likely the last. */ -final case class StreamZipJoin(as: IndexedSeq[IR], key: IndexedSeq[String], curKey: String, curVals: String, joinF: IR) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) -} -final case class StreamFilter(a: IR, name: String, cond: IR) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) -} -final case class StreamFlatMap(a: IR, name: String, body: IR) extends IR { - override def typ: TStream = tcoerce[TStream](super.typ) -} +final case class StreamZipJoin( + as: IndexedSeq[IR], key: IndexedSeq[String], curKey: String, curVals: String, joinF: IR +) extends TypedIR[TStream] + +final case class StreamFilter(a: IR, name: String, cond: IR) extends TypedIR[TStream] +final case class StreamFlatMap(a: IR, name: String, body: IR) extends TypedIR[TStream] final case class StreamFold(a: IR, zero: IR, accumName: String, valueName: String, body: IR) extends IR @@ -661,10 +661,7 @@ final case class SelectFields(old: IR, fields: IndexedSeq[String]) extends IR object InsertFields { def apply(old: IR, fields: Seq[(String, IR)]): InsertFields = InsertFields(old, fields, None) } -final case class InsertFields(old: IR, fields: Seq[(String, IR)], fieldOrder: Option[IndexedSeq[String]]) extends IR { - - override def typ: TStruct = tcoerce[TStruct](super.typ) -} +final case class InsertFields(old: IR, fields: Seq[(String, IR)], fieldOrder: Option[IndexedSeq[String]]) extends TypedIR[TStruct] object GetFieldByIdx { def apply(s: IR, field: Int): IR = { @@ -715,7 +712,7 @@ final case class Trap(child: IR) extends IR final case class Die(message: IR, _typ: Type, errorId: Int) extends IR final case class ConsoleLog(message: IR, result: IR) extends IR -final case class ApplyIR(function: String, typeArgs: Seq[Type], args: Seq[IR], errorID: Int) extends IR { +final case class ApplyIR(function: String, typeArgs: Seq[Type], args: Seq[IR], returnType: Type, errorID: Int) extends IR { var conversion: (Seq[Type], Seq[IR], Int) => IR = _ var inline: Boolean = _ @@ -725,7 +722,9 @@ final case class ApplyIR(function: String, typeArgs: Seq[Type], args: Seq[IR], e lazy val explicitNode: IR = { // foldRight because arg1 should be at the top so it is evaluated first - refs.zip(args).foldRight(body) { case ((ref, arg), bodyIR) => Let(ref.name, arg, bodyIR) } + val ir = refs.zip(args).foldRight(body) { case ((ref, arg), bodyIR) => Let(ref.name, arg, bodyIR) } + assert(ir.typ == returnType) + ir } } @@ -756,22 +755,14 @@ final case class MatrixAggregate(child: MatrixIR, query: IR) extends IR final case class TableWrite(child: TableIR, writer: TableWriter) extends IR -final case class TableMultiWrite(_children: IndexedSeq[TableIR], writer: WrappedMatrixNativeMultiWriter) extends IR { - private val t = _children.head.typ - require(_children.forall(_.typ == t)) -} +final case class TableMultiWrite(_children: IndexedSeq[TableIR], writer: WrappedMatrixNativeMultiWriter) extends IR final case class TableGetGlobals(child: TableIR) extends IR final case class TableCollect(child: TableIR) extends IR final case class MatrixWrite(child: MatrixIR, writer: MatrixWriter) extends IR -final case class MatrixMultiWrite(_children: IndexedSeq[MatrixIR], writer: MatrixNativeMultiWriter) extends IR { - private val t = _children.head.typ - assert(!t.rowType.hasField(MatrixReader.rowUIDFieldName) && - !t.colType.hasField(MatrixReader.colUIDFieldName), t) - require(_children.forall(_.typ == t)) -} +final case class MatrixMultiWrite(_children: IndexedSeq[MatrixIR], writer: MatrixNativeMultiWriter) extends IR final case class TableToValueApply(child: TableIR, function: TableToValueFunction) extends IR final case class MatrixToValueApply(child: MatrixIR, function: MatrixToValueFunction) extends IR @@ -960,10 +951,7 @@ final case class SimpleMetadataWriter(val annotationType: Type) extends Metadata writeAnnotations.consume(cb, {}, {_ => ()}) } -final case class ReadPartition(context: IR, rowType: TStruct, reader: PartitionReader) extends IR { - assert(context.typ == reader.contextType, s"context: ${context.typ}, expected: ${reader.contextType}") - assert(PruneDeadFields.isSupertype(rowType, reader.fullRowType), s"requested type: $rowType, full type: ${reader.fullRowType}") -} +final case class ReadPartition(context: IR, rowType: TStruct, reader: PartitionReader) extends IR final case class WritePartition(value: IR, writeCtx: IR, writer: PartitionWriter) extends IR final case class WriteMetadata(writeAnnotations: IR, writer: MetadataWriter) extends IR diff --git a/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala b/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala index 2a83dc673b7..108e3082f7e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala +++ b/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala @@ -5,7 +5,7 @@ object InTailPosition { case Let(_, _, _) => i == 1 case If(_, _, _) => i != 0 case _: Switch => i != 0 - case TailLoop(_, params, _) => i == params.length + case TailLoop(_, params, _, _) => i == params.length case _ => false } } diff --git a/hail/src/main/scala/is/hail/expr/ir/InferType.scala b/hail/src/main/scala/is/hail/expr/ir/InferType.scala index 7c7b3f78d40..528577b4384 100644 --- a/hail/src/main/scala/is/hail/expr/ir/InferType.scala +++ b/hail/src/main/scala/is/hail/expr/ir/InferType.scala @@ -66,8 +66,8 @@ object InferType { body.typ case AggLet(name, value, body, _) => body.typ - case TailLoop(_, _, body) => - body.typ + case TailLoop(_, _, resultType, _) => + resultType case Recur(_, _, typ) => typ case ApplyBinaryPrimOp(op, l, r) => @@ -80,7 +80,7 @@ object InferType { case _: Compare => TInt32 case _ => TBoolean } - case a: ApplyIR => a.explicitNode.typ + case a: ApplyIR => a.returnType case a: AbstractApplyNode[_] => val typeArgs = a.typeArgs val argTypes = a.args.map(_.typ) diff --git a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala index 98979a60db0..e6f7127865e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala @@ -790,7 +790,7 @@ object Interpret { val message_ = interpret(message).asInstanceOf[String] info(message_) interpret(result) - case ir@ApplyIR(function, _, functionArgs, _) => + case ir@ApplyIR(function, _, _, functionArgs, _) => interpret(ir.explicitNode, env, args) case ApplySpecial("lor", _, Seq(left_, right_), _, _) => val left = interpret(left_) diff --git a/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala b/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala index bce01b0b994..7e08faa9901 100644 --- a/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala @@ -51,6 +51,8 @@ abstract sealed class MatrixIR extends BaseIR { } def pyUnpersist(): MatrixIR = unpersist() + + def typecheck(): Unit = {} } object MatrixLiteral { @@ -482,7 +484,7 @@ case class MatrixFilterCols(child: MatrixIR, pred: IR) extends MatrixIR { MatrixFilterCols(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[IR]) } - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts @@ -513,7 +515,7 @@ case class MatrixChooseCols(child: MatrixIR, oldIndices: IndexedSeq[Int]) extend MatrixChooseCols(newChildren(0).asInstanceOf[MatrixIR], oldIndices) } - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts @@ -530,7 +532,7 @@ case class MatrixCollectColsByKey(child: MatrixIR) extends MatrixIR { MatrixCollectColsByKey(newChildren(0).asInstanceOf[MatrixIR]) } - val typ: MatrixType = { + lazy val typ: MatrixType = { val newColValueType = TStruct(child.typ.colValueStruct.fields.map(f => f.copy(typ = TArray(f.typ)))) val newColType = child.typ.colKeyStruct ++ newColValueType val newEntryType = TStruct(child.typ.entryType.fields.map(f => f.copy(typ = TArray(f.typ)))) @@ -544,7 +546,9 @@ case class MatrixCollectColsByKey(child: MatrixIR) extends MatrixIR { } case class MatrixAggregateRowsByKey(child: MatrixIR, entryExpr: IR, rowExpr: IR) extends MatrixIR { - require(child.typ.rowKey.nonEmpty) + override def typecheck(): Unit = { + assert(child.typ.rowKey.nonEmpty) + } lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child, entryExpr, rowExpr) @@ -553,7 +557,7 @@ case class MatrixAggregateRowsByKey(child: MatrixIR, entryExpr: IR, rowExpr: IR) MatrixAggregateRowsByKey(newChild, newEntryExpr, newRowExpr) } - val typ: MatrixType = child.typ.copy( + lazy val typ: MatrixType = child.typ.copy( rowType = child.typ.rowKeyStruct ++ tcoerce[TStruct](rowExpr.typ), entryType = tcoerce[TStruct](entryExpr.typ) ) @@ -564,7 +568,9 @@ case class MatrixAggregateRowsByKey(child: MatrixIR, entryExpr: IR, rowExpr: IR) } case class MatrixAggregateColsByKey(child: MatrixIR, entryExpr: IR, colExpr: IR) extends MatrixIR { - require(child.typ.colKey.nonEmpty) + override def typecheck(): Unit = { + assert(child.typ.colKey.nonEmpty) + } lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child, entryExpr, colExpr) @@ -573,7 +579,7 @@ case class MatrixAggregateColsByKey(child: MatrixIR, entryExpr: IR, colExpr: IR) MatrixAggregateColsByKey(newChild, newEntryExpr, newColExpr) } - val typ = child.typ.copy( + lazy val typ = child.typ.copy( entryType = tcoerce[TStruct](entryExpr.typ), colType = child.typ.colKeyStruct ++ tcoerce[TStruct](colExpr.typ)) @@ -584,7 +590,12 @@ case class MatrixAggregateColsByKey(child: MatrixIR, entryExpr: IR, colExpr: IR) case class MatrixUnionCols(left: MatrixIR, right: MatrixIR, joinType: String) extends MatrixIR { require(joinType == "inner" || joinType == "outer") - require(left.typ.rowKeyStruct isIsomorphicTo right.typ.rowKeyStruct) + + override def typecheck(): Unit = { + assert(left.typ.rowKeyStruct == right.typ.rowKeyStruct, s"${left.typ.rowKeyStruct} != ${right.typ.rowKeyStruct}") + assert(left.typ.colType == right.typ.colType, s"${left.typ.colType} != ${right.typ.colType}") + assert(left.typ.entryType == right.typ.entryType, s"${left.typ.entryType} != ${right.typ.entryType}") + } lazy val childrenSeq: IndexedSeq[BaseIR] = Array(left, right) @@ -593,7 +604,7 @@ case class MatrixUnionCols(left: MatrixIR, right: MatrixIR, joinType: String) ex MatrixUnionCols(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[MatrixIR], joinType) } - private val newRowType = { + private def newRowType = { val leftKeyType = left.typ.rowKeyStruct val leftValueType = left.typ.rowValueStruct val rightValueType = right.typ.rowValueStruct @@ -605,7 +616,7 @@ case class MatrixUnionCols(left: MatrixIR, right: MatrixIR, joinType: String) ex leftKeyType ++ leftValueType ++ rightValueType } - val typ: MatrixType = if (joinType == "inner") + lazy val typ: MatrixType = if (joinType == "inner") left.typ.copy(rowType = newRowType) else left.typ.copy( @@ -632,7 +643,7 @@ case class MatrixMapEntries(child: MatrixIR, newEntries: IR) extends MatrixIR { MatrixMapEntries(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[IR]) } - val typ: MatrixType = + lazy val typ: MatrixType = child.typ.copy(entryType = tcoerce[TStruct](newEntries.typ)) override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts @@ -643,12 +654,14 @@ case class MatrixMapEntries(child: MatrixIR, newEntries: IR) extends MatrixIR { } case class MatrixKeyRowsBy(child: MatrixIR, keys: IndexedSeq[String], isSorted: Boolean = false) extends MatrixIR { - private val fields = child.typ.rowType.fieldNames.toSet - assert(keys.forall(fields.contains), s"${ keys.filter(k => !fields.contains(k)).mkString(", ") }") + override def typecheck(): Unit = { + val fields = child.typ.rowType.fieldNames.toSet + assert(keys.forall(fields.contains), s"${keys.filter(k => !fields.contains(k)).mkString(", ")}") + } val childrenSeq: IndexedSeq[BaseIR] = Array(child) - val typ: MatrixType = child.typ.copy(rowKey = keys) + lazy val typ: MatrixType = child.typ.copy(rowKey = keys) def copy(newChildren: IndexedSeq[BaseIR]): MatrixKeyRowsBy = { assert(newChildren.length == 1) @@ -669,7 +682,7 @@ case class MatrixMapRows(child: MatrixIR, newRow: IR) extends MatrixIR { MatrixMapRows(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[IR]) } - val typ: MatrixType = { + lazy val typ: MatrixType = { child.typ.copy(rowType = newRow.typ.asInstanceOf[TStruct]) } @@ -688,7 +701,7 @@ case class MatrixMapCols(child: MatrixIR, newCol: IR, newKey: Option[IndexedSeq[ MatrixMapCols(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[IR], newKey) } - val typ: MatrixType = { + lazy val typ: MatrixType = { val newColType = newCol.typ.asInstanceOf[TStruct] val newColKey = newKey.getOrElse(child.typ.colKey) child.typ.copy(colKey = newColKey, colType = newColType) @@ -704,7 +717,7 @@ case class MatrixMapCols(child: MatrixIR, newCol: IR, newKey: Option[IndexedSeq[ case class MatrixMapGlobals(child: MatrixIR, newGlobals: IR) extends MatrixIR { val childrenSeq: IndexedSeq[BaseIR] = Array(child, newGlobals) - val typ: MatrixType = + lazy val typ: MatrixType = child.typ.copy(globalType = newGlobals.typ.asInstanceOf[TStruct]) def copy(newChildren: IndexedSeq[BaseIR]): MatrixMapGlobals = { @@ -727,7 +740,7 @@ case class MatrixFilterEntries(child: MatrixIR, pred: IR) extends MatrixIR { MatrixFilterEntries(newChildren(0).asInstanceOf[MatrixIR], newChildren(1).asInstanceOf[IR]) } - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts @@ -739,8 +752,11 @@ case class MatrixFilterEntries(child: MatrixIR, pred: IR) extends MatrixIR { case class MatrixAnnotateColsTable( child: MatrixIR, table: TableIR, - root: String) extends MatrixIR { - require(child.typ.colType.fieldOption(root).isEmpty) + root: String +) extends MatrixIR { + override def typecheck(): Unit = { + assert(child.typ.colType.fieldOption(root).isEmpty) + } lazy val childrenSeq: IndexedSeq[BaseIR] = FastSeq(child, table) @@ -748,8 +764,8 @@ case class MatrixAnnotateColsTable( override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts - private val (colType, inserter) = child.typ.colType.structInsert(table.typ.valueType, List(root)) - val typ: MatrixType = child.typ.copy(colType = colType) + lazy val typ: MatrixType = child.typ.copy( + colType = child.typ.colType.structInsert(table.typ.valueType, List(root))) def copy(newChildren: IndexedSeq[BaseIR]): MatrixAnnotateColsTable = { MatrixAnnotateColsTable( @@ -767,9 +783,12 @@ case class MatrixAnnotateRowsTable( root: String, product: Boolean ) extends MatrixIR { - require((!product && table.typ.keyType.isPrefixOf(child.typ.rowKeyStruct)) || - (table.typ.keyType.size == 1 && table.typ.keyType.types(0) == TInterval(child.typ.rowKeyStruct.types(0))), - s"\n L: ${ child.typ }\n R: ${ table.typ }") + override def typecheck(): Unit = { + assert( + (!product && table.typ.keyType.isPrefixOf(child.typ.rowKeyStruct)) || + (table.typ.keyType.size == 1 && table.typ.keyType.types(0) == TInterval(child.typ.rowKeyStruct.types(0))), + s"\n L: ${child.typ}\n R: ${table.typ}") + } lazy val childrenSeq: IndexedSeq[BaseIR] = FastSeq(child, table) @@ -779,13 +798,13 @@ case class MatrixAnnotateRowsTable( lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound - private val annotationType = + private def annotationType = if (product) TArray(table.typ.valueType) else table.typ.valueType - val typ: MatrixType = + lazy val typ: MatrixType = child.typ.copy(rowType = child.typ.rowType.appendKey(root, annotationType)) def copy(newChildren: IndexedSeq[BaseIR]): MatrixAnnotateRowsTable = { @@ -808,27 +827,28 @@ case class MatrixExplodeRows(child: MatrixIR, path: IndexedSeq[String]) extends override def columnCount: Option[Int] = child.columnCount - val idx = Ref(genUID(), TInt32) - - val newRow: InsertFields = { - val refs = path.init.scanLeft(Ref("va", child.typ.rowType))((struct, name) => - Ref(genUID(), tcoerce[TStruct](struct.typ).field(name).typ)) + lazy val typ: MatrixType = { + // FIXME: compute row type directly + val newRow: InsertFields = { + val refs = path.init.scanLeft(Ref("va", child.typ.rowType))((struct, name) => + Ref(genUID(), tcoerce[TStruct](struct.typ).field(name).typ)) + + path.zip(refs).zipWithIndex.foldRight[IR](Ref(genUID(), TInt32)) { + case (((field, ref), i), arg) => + InsertFields(ref, FastSeq(field -> + (if (i == refs.length - 1) + ArrayRef(ToArray(ToStream(GetField(ref, field))), arg) + else + Let(refs(i + 1).name, GetField(ref, field), arg)))) + }.asInstanceOf[InsertFields] + } - path.zip(refs).zipWithIndex.foldRight[IR](idx) { - case (((field, ref), i), arg) => - InsertFields(ref, FastSeq(field -> - (if (i == refs.length - 1) - ArrayRef(ToArray(ToStream(GetField(ref, field))), arg) - else - Let(refs(i + 1).name, GetField(ref, field), arg)))) - }.asInstanceOf[InsertFields] + child.typ.copy(rowType = newRow.typ) } - - val typ: MatrixType = child.typ.copy(rowType = newRow.typ) } case class MatrixRepartition(child: MatrixIR, n: Int, strategy: Int) extends MatrixIR { - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = FastSeq(child) @@ -844,8 +864,12 @@ case class MatrixRepartition(child: MatrixIR, n: Int, strategy: Int) extends Mat case class MatrixUnionRows(childrenSeq: IndexedSeq[MatrixIR]) extends MatrixIR { require(childrenSeq.length > 1) - require(childrenSeq.tail.forall(c => compatible(c.typ, childrenSeq.head.typ)), childrenSeq.map(_.typ)) - val typ: MatrixType = childrenSeq.head.typ + + override def typecheck(): Unit = { + assert(childrenSeq.tail.forall(c => compatible(c.typ, childrenSeq.head.typ)), childrenSeq.map(_.typ)) + } + + def typ: MatrixType = childrenSeq.head.typ def compatible(t1: MatrixType, t2: MatrixType): Boolean = { t1.colKeyStruct == t2.colKeyStruct && @@ -873,8 +897,7 @@ case class MatrixUnionRows(childrenSeq: IndexedSeq[MatrixIR]) extends MatrixIR { } case class MatrixDistinctByRow(child: MatrixIR) extends MatrixIR { - - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = FastSeq(child) @@ -890,7 +913,7 @@ case class MatrixDistinctByRow(child: MatrixIR) extends MatrixIR { case class MatrixRowsHead(child: MatrixIR, n: Long) extends MatrixIR { require(n >= 0) - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ override lazy val partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts.map { pc => val prefixSums = pc.iterator.scanLeft(0L)(_ + _) @@ -919,7 +942,7 @@ case class MatrixRowsHead(child: MatrixIR, n: Long) extends MatrixIR { case class MatrixColsHead(child: MatrixIR, n: Int) extends MatrixIR { require(n >= 0) - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child) @@ -937,7 +960,7 @@ case class MatrixColsHead(child: MatrixIR, n: Int) extends MatrixIR { case class MatrixRowsTail(child: MatrixIR, n: Long) extends MatrixIR { require(n >= 0) - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child) @@ -956,7 +979,7 @@ case class MatrixRowsTail(child: MatrixIR, n: Long) extends MatrixIR { case class MatrixColsTail(child: MatrixIR, n: Int) extends MatrixIR { require(n >= 0) - val typ: MatrixType = child.typ + def typ: MatrixType = child.typ lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child) @@ -987,13 +1010,12 @@ case class MatrixExplodeCols(child: MatrixIR, path: IndexedSeq[String]) extends lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound - private val (keysType, querier) = child.typ.colType.queryTyped(path.toList) - private val keyType = keysType match { - case TArray(e) => e - case TSet(e) => e + lazy val typ: MatrixType = { + val (keysType, _) = child.typ.colType.queryTyped(path.toList) + val keyType = keysType.asInstanceOf[TContainer].elementType + child.typ.copy( + colType = child.typ.colType.structInsert(keyType, path.toList)) } - val (newColType, inserter) = child.typ.colType.structInsert(keyType, path.toList) - val typ: MatrixType = child.typ.copy(colType = newColType) } /** Create a MatrixTable from a Table, where the column values are stored in a @@ -1006,13 +1028,14 @@ case class CastTableToMatrix( colsFieldName: String, colKey: IndexedSeq[String] ) extends MatrixIR { - - child.typ.rowType.fieldType(entriesFieldName) match { - case TArray(TStruct(_)) => - case t => fatal(s"expected entry field to be an array of structs, found $t") + override def typecheck(): Unit = { + child.typ.rowType.fieldType(entriesFieldName) match { + case TArray(TStruct(_)) => + case t => fatal(s"expected entry field to be an array of structs, found $t") + } } - val typ: MatrixType = MatrixType.fromTableType(child.typ, colsFieldName, entriesFieldName, colKey) + lazy val typ: MatrixType = MatrixType.fromTableType(child.typ, colsFieldName, entriesFieldName, colKey) lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child) @@ -1046,12 +1069,19 @@ case class MatrixToMatrixApply(child: MatrixIR, function: MatrixToMatrixFunction lazy val rowCountUpperBound: Option[Long] = if (function.preservesPartitionCounts) child.rowCountUpperBound else None } -case class MatrixRename(child: MatrixIR, - globalMap: Map[String, String], colMap: Map[String, String], rowMap: Map[String, String], entryMap: Map[String, String]) extends MatrixIR { - require(globalMap.keys.forall(child.typ.globalType.hasField)) - require(colMap.keys.forall(child.typ.colType.hasField)) - require(rowMap.keys.forall(child.typ.rowType.hasField)) - require(entryMap.keys.forall(child.typ.entryType.hasField)) +case class MatrixRename( + child: MatrixIR, + globalMap: Map[String, String], + colMap: Map[String, String], + rowMap: Map[String, String], + entryMap: Map[String, String] +) extends MatrixIR { + override def typecheck(): Unit = { + assert(globalMap.keys.forall(child.typ.globalType.hasField)) + assert(colMap.keys.forall(child.typ.colType.hasField)) + assert(rowMap.keys.forall(child.typ.rowType.hasField)) + assert(entryMap.keys.forall(child.typ.entryType.hasField)) + } lazy val typ: MatrixType = MatrixType( globalType = child.typ.globalType.rename(globalMap), @@ -1083,7 +1113,7 @@ case class MatrixFilterIntervals(child: MatrixIR, intervals: IndexedSeq[Interval MatrixFilterIntervals(newChild, intervals, keep) } - override lazy val typ: MatrixType = child.typ + override def typ: MatrixType = child.typ override def columnCount: Option[Int] = child.columnCount diff --git a/hail/src/main/scala/is/hail/expr/ir/NestingDepth.scala b/hail/src/main/scala/is/hail/expr/ir/NestingDepth.scala index ee1b92cd449..b3c71f57345 100644 --- a/hail/src/main/scala/is/hail/expr/ir/NestingDepth.scala +++ b/hail/src/main/scala/is/hail/expr/ir/NestingDepth.scala @@ -99,7 +99,7 @@ object NestingDepth { computeIR(left, depth) computeIR(right, depth) computeIR(joinF, depth.incrementEval) - case TailLoop(_, params, body) => + case TailLoop(_, params, _, body) => params.foreach { case (_, p) => computeIR(p, depth) } computeIR(body, depth.incrementEval) case NDArrayMap(nd, _, body) => diff --git a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala index 6be2c8f42a0..fab2531f5a6 100644 --- a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala +++ b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala @@ -63,14 +63,14 @@ class NormalizeNames(normFunction: Int => String, allowFreeVariables: Boolean = newValue <- normalize(value, valueEnv) newBody <- normalize(body, bodyEnv) } yield AggLet(newName, newValue, newBody, isScan) - case TailLoop(name, args, body) => + case TailLoop(name, args, resultType, body) => val newFName = gen() val newNames = Array.tabulate(args.length)(i => gen()) val (names, values) = args.unzip for { newValues <- values.mapRecur(v => normalize(v)) newBody <- normalize(body, env.copy(eval = env.eval.bind(names.zip(newNames) :+ name -> newFName: _*))) - } yield TailLoop(newFName, newNames.zip(newValues), newBody) + } yield TailLoop(newFName, newNames.zip(newValues), resultType, newBody) case ArraySort(a, left, right, lessThan) => val newLeft = gen() val newRight = gen() diff --git a/hail/src/main/scala/is/hail/expr/ir/Parser.scala b/hail/src/main/scala/is/hail/expr/ir/Parser.scala index 915c5eafe1e..668ca0d7f20 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Parser.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Parser.scala @@ -3,7 +3,7 @@ package is.hail.expr.ir import is.hail.HailContext import is.hail.backend.ExecuteContext import is.hail.expr.ir.agg._ -import is.hail.expr.ir.functions.RelationalFunctions +import is.hail.expr.ir.functions.{IRFunctionRegistry, RelationalFunctions} import is.hail.expr.{JSONAnnotationImpex, Nat, ParserUtils} import is.hail.io.{BufferSpec, TypedCodecSpec} import is.hail.rvd.{RVDPartitioner, RVDType} @@ -128,70 +128,7 @@ object IRLexer extends JavaTokenParsers { case class IRParserEnvironment( ctx: ExecuteContext, - refMap: BindingEnv[Type] = BindingEnv.empty[Type], - irMap: Map[Int, BaseIR] = Map.empty, -) { - - def promoteAgg: IRParserEnvironment = copy(refMap = refMap.promoteAgg) - - def promoteScan: IRParserEnvironment = copy(refMap = refMap.promoteScan) - - def promoteAggScan(isScan: Boolean): IRParserEnvironment = - if (isScan) promoteScan else promoteAgg - - def noAgg: IRParserEnvironment = copy(refMap = refMap.noAgg) - - def noScan: IRParserEnvironment = copy(refMap = refMap.noScan) - - def noAggScan(isScan: Boolean): IRParserEnvironment = - if (isScan) noScan else noAgg - - def createAgg: IRParserEnvironment = copy(refMap = refMap.createAgg) - - def createScan: IRParserEnvironment = copy(refMap = refMap.createScan) - - def onlyRelational: IRParserEnvironment = { - if (refMap.eval.isEmpty && refMap.agg.isEmpty && refMap.scan.isEmpty) - this - else - copy(refMap = refMap.onlyRelational) - } - - def empty: IRParserEnvironment = copy(refMap = BindingEnv.empty) - - def bindEval(name: String, t: Type): IRParserEnvironment = - copy(refMap = refMap.bindEval(name, t)) - - def bindEval(bindings: (String, Type)*): IRParserEnvironment = - copy(refMap = refMap.bindEval(bindings: _*)) - - def bindEval(bindings: Env[Type]): IRParserEnvironment = - copy(refMap = refMap.bindEval(bindings.m.toSeq: _*)) - - def bindAggScan(isScan: Boolean, bindings: (String, Type)*): IRParserEnvironment = - copy(refMap = if (isScan) refMap.bindScan(bindings: _*) else refMap.bindAgg(bindings: _*)) - - def bindAgg(name: String, t: Type): IRParserEnvironment = - copy(refMap = refMap.bindAgg(name, t)) - - def bindAgg(bindings: (String, Type)*): IRParserEnvironment = - copy(refMap = refMap.bindAgg(bindings: _*)) - - def bindAgg(bindings: Env[Type]): IRParserEnvironment = - copy(refMap = refMap.bindAgg(bindings.m.toSeq: _*)) - - def bindScan(name: String, t: Type): IRParserEnvironment = - copy(refMap = refMap.bindScan(name, t)) - - def bindScan(bindings: (String, Type)*): IRParserEnvironment = - copy(refMap = refMap.bindScan(bindings: _*)) - - def bindScan(bindings: Env[Type]): IRParserEnvironment = - copy(refMap = refMap.bindScan(bindings.m.toSeq: _*)) - - def bindRelational(name: String, t: Type): IRParserEnvironment = - copy(refMap = refMap.bindRelational(name, t)) -} + irMap: Map[Int, BaseIR] = Map.empty) object IRParser { def error(t: Token, msg: String): Nothing = ParserUtils.error(t.pos, msg) @@ -743,7 +680,7 @@ object IRParser { val vtwr = vtwr_expr(it) val accumName = identifier(it) val otherAccumName = identifier(it) - val combIR = ir_value_expr(env.empty.bindEval(accumName -> vtwr.t, otherAccumName -> vtwr.t))(it).run() + val combIR = ir_value_expr(env)(it).run() FoldStateSig(vtwr.canonicalEmitType, accumName, otherAccumName, combIR) } punctuation(it, ")") @@ -829,6 +766,16 @@ object IRParser { } yield ir } + def apply_like(env: IRParserEnvironment, cons: (String, Seq[Type], Seq[IR], Type, Int) => IR)(it: TokenIterator): StackFrame[IR] = { + val errorID = int32_literal(it) + val function = identifier(it) + val typeArgs = type_exprs(it) + val rt = type_expr(it) + ir_value_children(env)(it).map { args => + cons(function, typeArgs, args, rt, errorID) + } + } + def ir_value_expr_1(env: IRParserEnvironment)(it: TokenIterator): StackFrame[IR] = { identifier(it) match { case "I32" => done(I32(int32_literal(it))) @@ -881,33 +828,32 @@ object IRParser { val name = identifier(it) for { value <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, value.typ))(it) + body <- ir_value_expr(env)(it) } yield Let(name, value, body) case "AggLet" => val name = identifier(it) val isScan = boolean_literal(it) for { - value <- ir_value_expr(env.promoteAggScan(isScan))(it) - body <- ir_value_expr(env.bindAggScan(isScan, name -> value.typ))(it) + value <- ir_value_expr(env)(it) + body <- ir_value_expr(env)(it) } yield AggLet(name, value, body, isScan) case "TailLoop" => val name = identifier(it) val paramNames = identifiers(it) + val resultType = type_expr(it) for { paramIRs <- fillArray(paramNames.length)(ir_value_expr(env)(it)) params = paramNames.zip(paramIRs) - bodyEnv = env.bindEval(params.map { case (n, v) => n -> v.typ}: _*) - body <- ir_value_expr(bodyEnv)(it) - } yield TailLoop(name, params, body) + body <- ir_value_expr(env)(it) + } yield TailLoop(name, params, resultType, body) case "Recur" => val name = identifier(it) - val typ = type_expr(it) ir_value_children(env)(it).map { args => - Recur(name, args, typ) + Recur(name, args, null) } case "Ref" => val id = identifier(it) - done(Ref(id, env.refMap.eval(id))) + done(Ref(id, null)) case "RelationalRef" => val id = identifier(it) val t = type_expr(it) @@ -915,8 +861,8 @@ object IRParser { case "RelationalLet" => val name = identifier(it) for { - value <- ir_value_expr(env.onlyRelational)(it) - body <- ir_value_expr(env.noAgg.noScan.bindRelational(name, value.typ))(it) + value <- ir_value_expr(env)(it) + body <- ir_value_expr(env)(it) } yield RelationalLet(name, value, body) case "ApplyBinaryPrimOp" => val op = BinaryOp.fromString(identifier(it)) @@ -932,11 +878,11 @@ object IRParser { for { l <- ir_value_expr(env)(it) r <- ir_value_expr(env)(it) - } yield ApplyComparisonOp(ComparisonOp.fromStringAndTypes((opName, l.typ, r.typ)), l, r) + } yield ApplyComparisonOp(ComparisonOp.fromString(opName), l, r) case "MakeArray" => val typ = opt(it, type_expr).map(_.asInstanceOf[TArray]).orNull ir_value_children(env)(it).map { args => - MakeArray.unify(env.ctx, args, typ) + MakeArray(args, typ) } case "MakeStream" => val typ = opt(it, type_expr).map(_.asInstanceOf[TStream]).orNull @@ -990,8 +936,7 @@ object IRParser { val r = identifier(it) for { a <- ir_value_expr(env)(it) - elt = tcoerce[TStream](a.typ).elementType - lessThan <- ir_value_expr(env.bindEval(l -> elt, r -> elt))(it) + lessThan <- ir_value_expr(env)(it) } yield ArraySort(a, l, r, lessThan) case "ArrayMaximalIndependentSet" => val hasTieBreaker = boolean_literal(it) @@ -999,10 +944,8 @@ object IRParser { for { edges <- ir_value_expr(env)(it) tieBreaker <- if (hasTieBreaker) { - val eltType = tcoerce[TArray](edges.typ).elementType.asInstanceOf[TBaseStruct].types.head - val tbType = TTuple(eltType) val Some((left, right)) = bindings - ir_value_expr(IRParserEnvironment(env.ctx, BindingEnv.eval(left -> tbType, right -> tbType)))(it).map(tbf => Some((left, right, tbf))) + ir_value_expr(env)(it).map(tbf => Some((left, right, tbf))) } else { done(None) } @@ -1030,7 +973,7 @@ object IRParser { val name = identifier(it) for { nd <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TNDArray](nd.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield NDArrayMap(nd, name, body) case "NDArrayMap2" => val errorID = int32_literal(it) @@ -1039,10 +982,7 @@ object IRParser { for { l <- ir_value_expr(env)(it) r <- ir_value_expr(env)(it) - body_env = env.bindEval( - lName -> tcoerce[TNDArray](l.typ).elementType, - rName -> tcoerce[TNDArray](r.typ).elementType) - body <- ir_value_expr(body_env)(it) + body <- ir_value_expr(env)(it) } yield NDArrayMap2(l, r, lName, rName, body, errorID) case "NDArrayReindex" => val indexExpr = int32_literals(it) @@ -1068,7 +1008,7 @@ object IRParser { case "NDArrayFilter" => for { nd <- ir_value_expr(env)(it) - filters <- fillArray(tcoerce[TNDArray](nd.typ).nDims)(ir_value_expr(env)(it)) + filters <- repUntil(it, ir_value_expr(env), PunctuationToken(")")) } yield NDArrayFilter(nd, filters.toFastSeq) case "NDArrayMatMul" => val errorID = int32_literal(it) @@ -1123,7 +1063,7 @@ object IRParser { val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamMap(a, name, body) case "StreamTake" => for { @@ -1146,7 +1086,7 @@ object IRParser { val names = identifiers(it) for { as <- names.mapRecur(_ => ir_value_expr(env)(it)) - body <- ir_value_expr(env.bindEval(names.zip(as.map(a => tcoerce[TStream](a.typ).elementType)): _*))(it) + body <- ir_value_expr(env)(it) } yield StreamZip(as, names, body, behavior, errorID) case "StreamZipJoinProducers" => val key = identifiers(it) @@ -1155,10 +1095,9 @@ object IRParser { val curVals = identifier(it) for { ctxs <- ir_value_expr(env)(it) - makeProducer <- ir_value_expr(env.bindEval(ctxName, TIterable.elementType(ctxs.typ)))(it) + makeProducer <- ir_value_expr(env)(it) body <- { - val structType = TIterable.elementType(makeProducer.typ).asInstanceOf[TStruct] - ir_value_expr(env.bindEval((curKey, structType.typeAfterSelectNames(key)), (curVals, TArray(structType))))(it) + ir_value_expr(env)(it) } } yield StreamZipJoinProducers(ctxs, ctxName, makeProducer, key, curKey, curVals, body) case "StreamZipJoin" => @@ -1169,8 +1108,7 @@ object IRParser { for { streams <- (0 until nStreams).mapRecur(_ => ir_value_expr(env)(it)) body <- { - val structType = streams.head.typ.asInstanceOf[TStream].elementType.asInstanceOf[TStruct] - ir_value_expr(env.bindEval((curKey, structType.typeAfterSelectNames(key)), (curVals, TArray(structType))))(it) + ir_value_expr(env)(it) } } yield StreamZipJoin(streams, key, curKey, curVals, body) case "StreamMultiMerge" => @@ -1182,25 +1120,25 @@ object IRParser { val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamFilter(a, name, body) case "StreamTakeWhile" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamTakeWhile(a, name, body) case "StreamDropWhile" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamDropWhile(a, name, body) case "StreamFlatMap" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamFlatMap(a, name, body) case "StreamFold" => val accumName = identifier(it) @@ -1208,8 +1146,7 @@ object IRParser { for { a <- ir_value_expr(env)(it) zero <- ir_value_expr(env)(it) - eltType = tcoerce[TStream](a.typ).elementType - body <- ir_value_expr(env.bindEval(accumName -> zero.typ, valueName -> eltType))(it) + body <- ir_value_expr(env)(it) } yield StreamFold(a, zero, accumName, valueName, body) case "StreamFold2" => val accumNames = identifiers(it) @@ -1218,11 +1155,8 @@ object IRParser { a <- ir_value_expr(env)(it) accIRs <- fillArray(accumNames.length)(ir_value_expr(env)(it)) accs = accumNames.zip(accIRs) - eltType = tcoerce[TStream](a.typ).elementType - resultEnv = env.bindEval(accs.map { case (name, value) => (name, value.typ) }: _*) - seqEnv = resultEnv.bindEval(valueName, eltType) - seqs <- fillArray(accs.length)(ir_value_expr(seqEnv)(it)) - res <- ir_value_expr(resultEnv)(it) + seqs <- fillArray(accs.length)(ir_value_expr(env)(it)) + res <- ir_value_expr(env)(it) } yield StreamFold2(a, accs, valueName, seqs, res) case "StreamScan" => val accumName = identifier(it) @@ -1230,8 +1164,7 @@ object IRParser { for { a <- ir_value_expr(env)(it) zero <- ir_value_expr(env)(it) - eltType = tcoerce[TStream](a.typ).elementType - body <- ir_value_expr(env.bindEval(accumName -> zero.typ, valueName -> eltType))(it) + body <- ir_value_expr(env)(it) } yield StreamScan(a, zero, accumName, valueName, body) case "StreamWhiten" => val newChunk = identifier(it) @@ -1253,27 +1186,25 @@ object IRParser { for { left <- ir_value_expr(env)(it) right <- ir_value_expr(env)(it) - lelt = tcoerce[TStream](left.typ).elementType - relt = tcoerce[TStream](right.typ).elementType - join <- ir_value_expr(env.bindEval(l -> lelt, r -> relt))(it) + join <- ir_value_expr(env)(it) } yield StreamJoinRightDistinct(left, right, lKey, rKey, l, r, join, joinType) case "StreamFor" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - body <- ir_value_expr(env.bindEval(name, tcoerce[TStream](a.typ).elementType))(it) + body <- ir_value_expr(env)(it) } yield StreamFor(a, name, body) case "StreamAgg" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - query <- ir_value_expr(env.createAgg.bindAgg(name, tcoerce[TStream](a.typ).elementType))(it) + query <- ir_value_expr(env)(it) } yield StreamAgg(a, name, query) case "StreamAggScan" => val name = identifier(it) for { a <- ir_value_expr(env)(it) - query <- ir_value_expr(env.createScan.bindScan(name, tcoerce[TStream](a.typ).elementType))(it) + query <- ir_value_expr(env)(it) } yield StreamAggScan(a, name, query) case "RunAgg" => val signatures = agg_state_signatures(env)(it) @@ -1286,28 +1217,27 @@ object IRParser { val signatures = agg_state_signatures(env)(it) for { array <- ir_value_expr(env)(it) - newE = env.bindEval(name, tcoerce[TStream](array.typ).elementType) init <- ir_value_expr(env)(it) - seq <- ir_value_expr(newE)(it) - result <- ir_value_expr(newE)(it) + seq <- ir_value_expr(env)(it) + result <- ir_value_expr(env)(it) } yield RunAggScan(array, name, init, seq, result, signatures) case "AggFilter" => val isScan = boolean_literal(it) for { - cond <- ir_value_expr(env.promoteAggScan(isScan))(it) + cond <- ir_value_expr(env)(it) aggIR <- ir_value_expr(env)(it) } yield AggFilter(cond, aggIR, isScan) case "AggExplode" => val name = identifier(it) val isScan = boolean_literal(it) for { - a <- ir_value_expr(env.promoteAggScan(isScan))(it) - aggBody <- ir_value_expr(env.bindAggScan(isScan, name -> tcoerce[TStream](a.typ).elementType))(it) + a <- ir_value_expr(env)(it) + aggBody <- ir_value_expr(env)(it) } yield AggExplode(a, name, aggBody, isScan) case "AggGroupBy" => val isScan = boolean_literal(it) for { - key <- ir_value_expr(env.promoteAggScan(isScan))(it) + key <- ir_value_expr(env)(it) aggIR <- ir_value_expr(env)(it) } yield AggGroupBy(key, aggIR, isScan) case "AggArrayPerElement" => @@ -1316,39 +1246,32 @@ object IRParser { val isScan = boolean_literal(it) val hasKnownLength = boolean_literal(it) for { - a <- ir_value_expr(env.promoteAggScan(isScan))(it) - aggBody <- ir_value_expr(env - .bindEval(indexName, TInt32) - .bindAggScan(isScan, indexName -> TInt32, elementName -> tcoerce[TArray](a.typ).elementType))(it) + a <- ir_value_expr(env)(it) + aggBody <- ir_value_expr(env)(it) knownLength <- if (hasKnownLength) ir_value_expr(env)(it).map(Some(_)) else done(None) } yield AggArrayPerElement(a, elementName, indexName, aggBody, knownLength, isScan) case "ApplyAggOp" => val aggOp = agg_op(it) for { - initOpArgs <- ir_value_exprs(env.noAgg)(it) - seqOpArgs <- ir_value_exprs(env.promoteAgg)(it) - aggSig = AggSignature(aggOp, initOpArgs.map(arg => arg.typ), seqOpArgs.map(arg => arg.typ)) + initOpArgs <- ir_value_exprs(env)(it) + seqOpArgs <- ir_value_exprs(env)(it) + aggSig = AggSignature(aggOp, null, null) } yield ApplyAggOp(initOpArgs, seqOpArgs, aggSig) case "ApplyScanOp" => val aggOp = agg_op(it) for { - initOpArgs <- ir_value_exprs(env.noScan)(it) - seqOpArgs <- ir_value_exprs(env.promoteScan)(it) - aggSig = AggSignature(aggOp, initOpArgs.map(arg => arg.typ), seqOpArgs.map(arg => arg.typ)) + initOpArgs <- ir_value_exprs(env)(it) + seqOpArgs <- ir_value_exprs(env)(it) + aggSig = AggSignature(aggOp, null, null) } yield ApplyScanOp(initOpArgs, seqOpArgs, aggSig) case "AggFold" => val accumName = identifier(it) val otherAccumName = identifier(it) val isScan = boolean_literal(it) for { - zero <- ir_value_expr(env.noAggScan(isScan))(it) - seqOp <- ir_value_expr(env.promoteAggScan(isScan).bindEval(accumName, zero.typ))(it) - combEnv = (if (isScan) - env.copy(refMap = env.refMap.copy(eval = Env.empty, scan = None)) - else - env.copy(refMap = env.refMap.copy(eval = Env.empty, agg = None)) - ).bindEval(accumName -> zero.typ, otherAccumName -> zero.typ) - combOp <- ir_value_expr(combEnv)(it) + zero <- ir_value_expr(env)(it) + seqOp <- ir_value_expr(env)(it) + combOp <- ir_value_expr(env)(it) } yield AggFold(zero, seqOp, combOp, accumName, otherAccumName, isScan) case "InitOp" => val i = int32_literal(it) @@ -1450,14 +1373,12 @@ object IRParser { rngState <- ir_value_expr(env)(it) args <- ir_value_children(env)(it) } yield ApplySeeded(function, args, rngState, staticUID, rt) - case "ApplyIR" | "ApplySpecial" | "Apply" => - val errorID = int32_literal(it) - val function = identifier(it) - val typeArgs = type_exprs(it) - val rt = type_expr(it) - ir_value_children(env)(it).map { args => - invoke(function, rt, typeArgs, errorID, args: _*) - } + case "ApplyIR" => + apply_like(env, ApplyIR)(it) + case "ApplySpecial" => + apply_like(env, ApplySpecial)(it) + case "Apply" => + apply_like(env, Apply)(it) case "MatrixCount" => matrix_ir(env)(it).map(MatrixCount) case "TableCount" => @@ -1468,17 +1389,17 @@ object IRParser { table_ir(env)(it).map(TableCollect) case "TableAggregate" => for { - child <- table_ir(env.onlyRelational)(it) - query <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.rowEnv))(it) + child <- table_ir(env)(it) + query <- ir_value_expr(env)(it) } yield TableAggregate(child, query) case "TableToValueApply" => val config = string_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableToValueApply(child, RelationalFunctions.lookupTableToValue(env.ctx, config)) } case "MatrixToValueApply" => val config = string_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixToValueApply(child, RelationalFunctions.lookupMatrixToValue(env.ctx, config)) } case "BlockMatrixToValueApply" => @@ -1502,8 +1423,8 @@ object IRParser { } case "MatrixAggregate" => for { - child <- matrix_ir(env.onlyRelational)(it) - query <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.entryEnv))(it) + child <- matrix_ir(env)(it) + query <- ir_value_expr(env)(it) } yield MatrixAggregate(child, query) case "MatrixWrite" => val writerStr = string_literal(it) @@ -1540,7 +1461,7 @@ object IRParser { for { ctxs <- ir_value_expr(env)(it) globals <- ir_value_expr(env)(it) - body <- ir_value_expr(env.onlyRelational.bindEval(cname -> tcoerce[TStream](ctxs.typ).elementType, gname -> globals.typ))(it) + body <- ir_value_expr(env)(it) dynamicID <- ir_value_expr(env)(it) } yield CollectDistributedArray(ctxs, globals, cname, gname, body, dynamicID, staticID) case "JavaIR" => @@ -1624,14 +1545,14 @@ object IRParser { case "TableKeyBy" => val keys = identifiers(it) val isSorted = boolean_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableKeyBy(child, keys, isSorted) } - case "TableDistinct" => table_ir(env.onlyRelational)(it).map(TableDistinct) + case "TableDistinct" => table_ir(env)(it).map(TableDistinct) case "TableFilter" => for { - child <- table_ir(env.onlyRelational)(it) - pred <- ir_value_expr(env.onlyRelational.bindEval(child.typ.rowEnv))(it) + child <- table_ir(env)(it) + pred <- ir_value_expr(env)(it) } yield TableFilter(child, pred) case "TableRead" => val requestedTypeRaw = it.head match { @@ -1650,123 +1571,123 @@ object IRParser { case Right(t) => t } done(TableRead(requestedType, dropRows, reader)) - case "MatrixColsTable" => matrix_ir(env.onlyRelational)(it).map(MatrixColsTable) - case "MatrixRowsTable" => matrix_ir(env.onlyRelational)(it).map(MatrixRowsTable) - case "MatrixEntriesTable" => matrix_ir(env.onlyRelational)(it).map(MatrixEntriesTable) + case "MatrixColsTable" => matrix_ir(env)(it).map(MatrixColsTable) + case "MatrixRowsTable" => matrix_ir(env)(it).map(MatrixRowsTable) + case "MatrixEntriesTable" => matrix_ir(env)(it).map(MatrixEntriesTable) case "TableAggregateByKey" => for { - child <- table_ir(env.onlyRelational)(it) - expr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.rowEnv))(it) + child <- table_ir(env)(it) + expr <- ir_value_expr(env)(it) } yield TableAggregateByKey(child, expr) case "TableKeyByAndAggregate" => val nPartitions = opt(it, int32_literal) val bufferSize = int32_literal(it) for { - child <- table_ir(env.onlyRelational)(it) - expr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.rowEnv))(it) - newKey <- ir_value_expr(env.onlyRelational.bindEval(child.typ.rowEnv))(it) + child <- table_ir(env)(it) + expr <- ir_value_expr(env)(it) + newKey <- ir_value_expr(env)(it) } yield TableKeyByAndAggregate(child, expr, newKey, nPartitions, bufferSize) case "TableRepartition" => val n = int32_literal(it) val strategy = int32_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableRepartition(child, n, strategy) } case "TableHead" => val n = int64_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableHead(child, n) } case "TableTail" => val n = int64_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableTail(child, n) } case "TableJoin" => val joinType = identifier(it) val joinKey = int32_literal(it) for { - left <- table_ir(env.onlyRelational)(it) - right <- table_ir(env.onlyRelational)(it) + left <- table_ir(env)(it) + right <- table_ir(env)(it) } yield TableJoin(left, right, joinType, joinKey) case "TableLeftJoinRightDistinct" => val root = identifier(it) for { - left <- table_ir(env.onlyRelational)(it) - right <- table_ir(env.onlyRelational)(it) + left <- table_ir(env)(it) + right <- table_ir(env)(it) } yield TableLeftJoinRightDistinct(left, right, root) case "TableIntervalJoin" => val root = identifier(it) val product = boolean_literal(it) for { - left <- table_ir(env.onlyRelational)(it) - right <- table_ir(env.onlyRelational)(it) + left <- table_ir(env)(it) + right <- table_ir(env)(it) } yield TableIntervalJoin(left, right, root, product) case "TableMultiWayZipJoin" => val dataName = string_literal(it) val globalsName = string_literal(it) - table_ir_children(env.onlyRelational)(it).map { children => + table_ir_children(env)(it).map { children => TableMultiWayZipJoin(children, dataName, globalsName) } case "TableParallelize" => val nPartitions = opt(it, int32_literal) - ir_value_expr(env.onlyRelational)(it).map { rowsAndGlobal => + ir_value_expr(env)(it).map { rowsAndGlobal => TableParallelize(rowsAndGlobal, nPartitions) } case "TableMapRows" => for { - child <- table_ir(env.onlyRelational)(it) - newRow <- ir_value_expr(env.onlyRelational.createScan.bindEval(child.typ.rowEnv).bindScan(child.typ.rowEnv))(it) + child <- table_ir(env)(it) + newRow <- ir_value_expr(env)(it) } yield TableMapRows(child, newRow) case "TableMapGlobals" => for { - child <- table_ir(env.onlyRelational)(it) - newRow <- ir_value_expr(env.onlyRelational.bindEval(child.typ.globalEnv))(it) + child <- table_ir(env)(it) + newRow <- ir_value_expr(env)(it) } yield TableMapGlobals(child, newRow) case "TableRange" => val n = int32_literal(it) val nPartitions = opt(it, int32_literal) done(TableRange(n, nPartitions.getOrElse(HailContext.backend.defaultParallelism))) - case "TableUnion" => table_ir_children(env.onlyRelational)(it).map(TableUnion(_)) + case "TableUnion" => table_ir_children(env)(it).map(TableUnion(_)) case "TableOrderBy" => val sortFields = sort_fields(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableOrderBy(child, sortFields) } case "TableExplode" => val path = string_literals(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableExplode(child, path) } case "CastMatrixToTable" => val entriesField = string_literal(it) val colsField = string_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => CastMatrixToTable(child, entriesField, colsField) } case "MatrixToTableApply" => val config = string_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixToTableApply(child, RelationalFunctions.lookupMatrixToTable(env.ctx, config)) } case "TableToTableApply" => val config = string_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableToTableApply(child, RelationalFunctions.lookupTableToTable(env.ctx, config)) } case "BlockMatrixToTableApply" => val config = string_literal(it) for { - bm <- blockmatrix_ir(env.onlyRelational)(it) - aux <- ir_value_expr(env.onlyRelational)(it) + bm <- blockmatrix_ir(env)(it) + aux <- ir_value_expr(env)(it) } yield BlockMatrixToTableApply(bm, aux, RelationalFunctions.lookupBlockMatrixToTable(env.ctx, config)) - case "BlockMatrixToTable" => blockmatrix_ir(env.onlyRelational)(it).map(BlockMatrixToTable) + case "BlockMatrixToTable" => blockmatrix_ir(env)(it).map(BlockMatrixToTable) case "TableRename" => val rowK = string_literals(it) val rowV = string_literals(it) val globalK = string_literals(it) val globalV = string_literals(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableRename(child, rowK.zip(rowV).toMap, globalK.zip(globalV).toMap) } @@ -1776,21 +1697,19 @@ object IRParser { val partitioner = between(punctuation(_, "("), punctuation(_, ")"), partitioner_literal(env))(it) val errorId = int32_literal(it) for { - contexts <- ir_value_expr(env.onlyRelational)(it) - globals <- ir_value_expr(env.onlyRelational)(it) - body <- ir_value_expr(env.onlyRelational.bindEval( - cname -> TIterable.elementType(contexts.typ), - gname -> globals.typ - ))(it) + contexts <- ir_value_expr(env)(it) + globals <- ir_value_expr(env)(it) + body <- ir_value_expr(env)(it) } yield TableGen(contexts, globals, cname, gname, body, partitioner, errorId) case "TableFilterIntervals" => + val keyType = type_expr(it) val intervals = string_literal(it) val keep = boolean_literal(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => TableFilterIntervals(child, JSONAnnotationImpex.importAnnotation(JsonMethods.parse(intervals), - TArray(TInterval(child.typ.keyType)), + TArray(TInterval(keyType)), padNulls = false).asInstanceOf[IndexedSeq[Interval]], keep) } @@ -1800,14 +1719,14 @@ object IRParser { val requestedKey = int32_literal(it) val allowedOverlap = int32_literal(it) for { - child <- table_ir(env.onlyRelational)(it) - body <- ir_value_expr(env.onlyRelational.bindEval(globalsName -> child.typ.globalType, partitionStreamName -> TStream(child.typ.rowType)))(it) + child <- table_ir(env)(it) + body <- ir_value_expr(env)(it) } yield TableMapPartitions(child, globalsName, partitionStreamName, body, requestedKey, allowedOverlap) case "RelationalLetTable" => val name = identifier(it) for { - value <- ir_value_expr(env.onlyRelational)(it) - body <- table_ir(env.onlyRelational.bindRelational(name, value.typ))(it) + value <- ir_value_expr(env)(it) + body <- table_ir(env)(it) } yield RelationalLetTable(name, value, body) case "JavaTable" => val id = int32_literal(it) @@ -1830,69 +1749,63 @@ object IRParser { identifier(it) match { case "MatrixFilterCols" => for { - child <- matrix_ir(env.onlyRelational)(it) - pred <- ir_value_expr(env.onlyRelational.bindEval(child.typ.colEnv))(it) + child <- matrix_ir(env)(it) + pred <- ir_value_expr(env)(it) } yield MatrixFilterCols(child, pred) case "MatrixFilterRows" => for { - child <- matrix_ir(env.onlyRelational)(it) - pred <- ir_value_expr(env.onlyRelational.bindEval(child.typ.rowEnv))(it) + child <- matrix_ir(env)(it) + pred <- ir_value_expr(env)(it) } yield MatrixFilterRows(child, pred) case "MatrixFilterEntries" => for { - child <- matrix_ir(env.onlyRelational)(it) - pred <- ir_value_expr(env.onlyRelational.bindEval(child.typ.entryEnv))(it) + child <- matrix_ir(env)(it) + pred <- ir_value_expr(env)(it) } yield MatrixFilterEntries(child, pred) case "MatrixMapCols" => val newKey = opt(it, string_literals) for { - child <- matrix_ir(env.onlyRelational)(it) - newEnv = env.onlyRelational.createAgg.createScan - .bindEval(child.typ.colEnv).bindEval("n_rows", TInt64) - .bindAgg(child.typ.entryEnv).bindScan(child.typ.colEnv) - newCol <- ir_value_expr(newEnv)(it) + child <- matrix_ir(env)(it) + newCol <- ir_value_expr(env)(it) } yield MatrixMapCols(child, newCol, newKey.map(_.toFastSeq)) case "MatrixKeyRowsBy" => val key = identifiers(it) val isSorted = boolean_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixKeyRowsBy(child, key, isSorted) } case "MatrixMapRows" => for { - child <- matrix_ir(env.onlyRelational)(it) - newEnv = env.onlyRelational.createAgg.createScan - .bindEval(child.typ.rowEnv).bindEval("n_cols", TInt32) - .bindAgg(child.typ.entryEnv).bindScan(child.typ.rowEnv) - newRow <- ir_value_expr(newEnv)(it) + child <- matrix_ir(env)(it) + newRow <- ir_value_expr(env)(it) } yield MatrixMapRows(child, newRow) case "MatrixMapEntries" => for { child <- matrix_ir(env)(it) - newEntry <- ir_value_expr(env.onlyRelational.bindEval(child.typ.entryEnv))(it) + newEntry <- ir_value_expr(env)(it) } yield MatrixMapEntries(child, newEntry) case "MatrixUnionCols" => val joinType = identifier(it) for { - left <- matrix_ir(env.onlyRelational)(it) - right <- matrix_ir(env.onlyRelational)(it) + left <- matrix_ir(env)(it) + right <- matrix_ir(env)(it) } yield MatrixUnionCols(left, right, joinType) case "MatrixMapGlobals" => for { - child <- matrix_ir(env.onlyRelational)(it) - newGlobals <- ir_value_expr(env.onlyRelational.bindEval(child.typ.globalEnv))(it) + child <- matrix_ir(env)(it) + newGlobals <- ir_value_expr(env)(it) } yield MatrixMapGlobals(child, newGlobals) case "MatrixAggregateColsByKey" => for { - child <- matrix_ir(env.onlyRelational)(it) - entryExpr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.rowEnv).bindAgg(child.typ.entryEnv))(it) - colExpr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.colEnv))(it) + child <- matrix_ir(env)(it) + entryExpr <- ir_value_expr(env)(it) + colExpr <- ir_value_expr(env)(it) } yield MatrixAggregateColsByKey(child, entryExpr, colExpr) case "MatrixAggregateRowsByKey" => for { - child <- matrix_ir(env.onlyRelational)(it) - entryExpr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.colEnv).bindAgg(child.typ.entryEnv))(it) - rowExpr <- ir_value_expr(env.onlyRelational.createAgg.bindEval(child.typ.globalEnv).bindAgg(child.typ.rowEnv))(it) + child <- matrix_ir(env)(it) + entryExpr <- ir_value_expr(env)(it) + rowExpr <- ir_value_expr(env)(it) } yield MatrixAggregateRowsByKey(child, entryExpr, rowExpr) case "MatrixRead" => val requestedTypeRaw = it.head match { @@ -1905,7 +1818,7 @@ object IRParser { val dropCols = boolean_literal(it) val dropRows = boolean_literal(it) val readerStr = string_literal(it) - val reader = MatrixReader.fromJson(env.onlyRelational, JsonMethods.parse(readerStr).asInstanceOf[JObject]) + val reader = MatrixReader.fromJson(env, JsonMethods.parse(readerStr).asInstanceOf[JObject]) val fullType = reader.fullMatrixType val requestedType = requestedTypeRaw match { case Left("None") => fullType @@ -1923,70 +1836,70 @@ object IRParser { val root = string_literal(it) val product = boolean_literal(it) for { - child <- matrix_ir(env.onlyRelational)(it) - table <- table_ir(env.onlyRelational)(it) + child <- matrix_ir(env)(it) + table <- table_ir(env)(it) } yield MatrixAnnotateRowsTable(child, table, root, product) case "MatrixAnnotateColsTable" => val root = string_literal(it) for { - child <- matrix_ir(env.onlyRelational)(it) - table <- table_ir(env.onlyRelational)(it) + child <- matrix_ir(env)(it) + table <- table_ir(env)(it) } yield MatrixAnnotateColsTable(child, table, root) case "MatrixExplodeRows" => val path = identifiers(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixExplodeRows(child, path) } case "MatrixExplodeCols" => val path = identifiers(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixExplodeCols(child, path) } case "MatrixChooseCols" => val oldIndices = int32_literals(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixChooseCols(child, oldIndices) } case "MatrixCollectColsByKey" => - matrix_ir(env.onlyRelational)(it).map(MatrixCollectColsByKey) + matrix_ir(env)(it).map(MatrixCollectColsByKey) case "MatrixRepartition" => val n = int32_literal(it) val strategy = int32_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixRepartition(child, n, strategy) } - case "MatrixUnionRows" => matrix_ir_children(env.onlyRelational)(it).map(MatrixUnionRows(_)) - case "MatrixDistinctByRow" => matrix_ir(env.onlyRelational)(it).map(MatrixDistinctByRow) + case "MatrixUnionRows" => matrix_ir_children(env)(it).map(MatrixUnionRows(_)) + case "MatrixDistinctByRow" => matrix_ir(env)(it).map(MatrixDistinctByRow) case "MatrixRowsHead" => val n = int64_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixRowsHead(child, n) } case "MatrixColsHead" => val n = int32_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixColsHead(child, n) } case "MatrixRowsTail" => val n = int64_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixRowsTail(child, n) } case "MatrixColsTail" => val n = int32_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixColsTail(child, n) } case "CastTableToMatrix" => val entriesField = identifier(it) val colsField = identifier(it) val colKey = identifiers(it) - table_ir(env.onlyRelational)(it).map { child => + table_ir(env)(it).map { child => CastTableToMatrix(child, entriesField, colsField, colKey) } case "MatrixToMatrixApply" => val config = string_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixToMatrixApply(child, RelationalFunctions.lookupMatrixToMatrix(env.ctx, config)) } case "MatrixRename" => @@ -1998,24 +1911,25 @@ object IRParser { val rowV = string_literals(it) val entryK = string_literals(it) val entryV = string_literals(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixRename(child, globalK.zip(globalV).toMap, colK.zip(colV).toMap, rowK.zip(rowV).toMap, entryK.zip(entryV).toMap) } case "MatrixFilterIntervals" => + val keyType = type_expr(it) val intervals = string_literal(it) val keep = boolean_literal(it) - matrix_ir(env.onlyRelational)(it).map { child => + matrix_ir(env)(it).map { child => MatrixFilterIntervals(child, JSONAnnotationImpex.importAnnotation(JsonMethods.parse(intervals), - TArray(TInterval(child.typ.rowKeyStruct)), + TArray(TInterval(keyType)), padNulls = false).asInstanceOf[IndexedSeq[Interval]], keep) } case "RelationalLetMatrixTable" => val name = identifier(it) for { - value <- ir_value_expr(env.onlyRelational)(it) - body <- matrix_ir(env.onlyRelational.bindRelational(name, value.typ))(it) + value <- ir_value_expr(env)(it) + body <- matrix_ir(env)(it) } yield RelationalLetMatrixTable(name, value, body) } } @@ -2026,7 +1940,8 @@ object IRParser { case "PyRowIntervalSparsifier" => val blocksOnly = boolean_literal(it) punctuation(it, ")") - ir_value_expr(env)(it).map { ir => + ir_value_expr(env)(it).map { ir_ => + val ir = annotateTypes(env.ctx, ir_, BindingEnv.empty).asInstanceOf[IR] val Row(starts: IndexedSeq[Long @unchecked], stops: IndexedSeq[Long @unchecked]) = CompileAndEvaluate[Row](env.ctx, ir) RowIntervalSparsifier(blocksOnly, starts, stops) @@ -2034,20 +1949,23 @@ object IRParser { case "PyBandSparsifier" => val blocksOnly = boolean_literal(it) punctuation(it, ")") - ir_value_expr(env)(it).map { ir => + ir_value_expr(env)(it).map { ir_ => + val ir = annotateTypes(env.ctx, ir_, BindingEnv.empty).asInstanceOf[IR] val Row(l: Long, u: Long) = CompileAndEvaluate[Row](env.ctx, ir) BandSparsifier(blocksOnly, l, u) } case "PyPerBlockSparsifier" => punctuation(it, ")") - ir_value_expr(env)(it).map { ir => + ir_value_expr(env)(it).map { ir_ => + val ir = annotateTypes(env.ctx, ir_, BindingEnv.empty).asInstanceOf[IR] val indices: IndexedSeq[Int] = CompileAndEvaluate[IndexedSeq[Int]](env.ctx, ir) PerBlockSparsifier(indices) } case "PyRectangleSparsifier" => punctuation(it, ")") - ir_value_expr(env)(it).map { ir => + ir_value_expr(env)(it).map { ir_ => + val ir = annotateTypes(env.ctx, ir_, BindingEnv.empty).asInstanceOf[IR] val rectangles: IndexedSeq[Long] = CompileAndEvaluate[IndexedSeq[Long]](env.ctx, ir) RectangleSparsifier(rectangles.grouped(4).toIndexedSeq) @@ -2089,56 +2007,56 @@ object IRParser { val name = identifier(it) val needs_dense = boolean_literal(it) for { - child <- blockmatrix_ir(env.onlyRelational)(it) - f <- ir_value_expr(env.onlyRelational.bindEval(name, child.typ.elementType))(it) + child <- blockmatrix_ir(env)(it) + f <- ir_value_expr(env)(it) } yield BlockMatrixMap(child, name, f, needs_dense) case "BlockMatrixMap2" => val lName = identifier(it) val rName = identifier(it) val sparsityStrategy = SparsityStrategy.fromString(identifier(it)) for { - left <- blockmatrix_ir(env.onlyRelational)(it) - right <- blockmatrix_ir(env.onlyRelational)(it) - f <- ir_value_expr(env.onlyRelational.bindEval(lName -> left.typ.elementType, rName -> right.typ.elementType))(it) + left <- blockmatrix_ir(env)(it) + right <- blockmatrix_ir(env)(it) + f <- ir_value_expr(env)(it) } yield BlockMatrixMap2(left, right, lName, rName, f, sparsityStrategy) case "BlockMatrixDot" => for { - left <- blockmatrix_ir(env.onlyRelational)(it) - right <- blockmatrix_ir(env.onlyRelational)(it) + left <- blockmatrix_ir(env)(it) + right <- blockmatrix_ir(env)(it) } yield BlockMatrixDot(left, right) case "BlockMatrixBroadcast" => val inIndexExpr = int32_literals(it) val shape = int64_literals(it) val blockSize = int32_literal(it) - blockmatrix_ir(env.onlyRelational)(it).map { child => + blockmatrix_ir(env)(it).map { child => BlockMatrixBroadcast(child, inIndexExpr, shape, blockSize) } case "BlockMatrixAgg" => val outIndexExpr = int32_literals(it) - blockmatrix_ir(env.onlyRelational)(it).map { child => + blockmatrix_ir(env)(it).map { child => BlockMatrixAgg(child, outIndexExpr) } case "BlockMatrixFilter" => val indices = literals(literals(int64_literal))(it) - blockmatrix_ir(env.onlyRelational)(it).map { child => + blockmatrix_ir(env)(it).map { child => BlockMatrixFilter(child, indices) } case "BlockMatrixDensify" => - blockmatrix_ir(env.onlyRelational)(it).map(BlockMatrixDensify) + blockmatrix_ir(env)(it).map(BlockMatrixDensify) case "BlockMatrixSparsify" => for { - sparsifier <- blockmatrix_sparsifier(env.onlyRelational)(it) - child <- blockmatrix_ir(env.onlyRelational)(it) + sparsifier <- blockmatrix_sparsifier(env)(it) + child <- blockmatrix_ir(env)(it) } yield BlockMatrixSparsify(child, sparsifier) case "BlockMatrixSlice" => val slices = literals(literals(int64_literal))(it) - blockmatrix_ir(env.onlyRelational)(it).map { child => + blockmatrix_ir(env)(it).map { child => BlockMatrixSlice(child, slices.map(_.toFastSeq).toFastSeq) } case "ValueToBlockMatrix" => val shape = int64_literals(it) val blockSize = int32_literal(it) - ir_value_expr(env.onlyRelational)(it).map { child => + ir_value_expr(env)(it).map { child => ValueToBlockMatrix(child, shape, blockSize) } case "BlockMatrixRandom" => @@ -2150,20 +2068,68 @@ object IRParser { case "RelationalLetBlockMatrix" => val name = identifier(it) for { - value <- ir_value_expr(env.onlyRelational)(it) - body <- blockmatrix_ir(env.onlyRelational.bindRelational(name, value.typ))(it) + value <- ir_value_expr(env)(it) + body <- blockmatrix_ir(env)(it) } yield RelationalLetBlockMatrix(name, value, body) } } + def annotateTypes(ctx: ExecuteContext, ir: BaseIR, env: BindingEnv[Type]): BaseIR = { + def run(ir: BaseIR, env: BindingEnv[Type]): BaseIR = { + val rw = ir.mapChildrenWithEnv(env)(run) + rw match { + case x: Ref => + x._typ = env.eval(x.name) + x + case x: Recur => + val TTuple(IndexedSeq(_, TupleField(_, rt))) = env.eval.lookup(x.name) + x._typ = rt + x + case x: ApplyAggOp => + x.aggSig.initOpArgs = x.initOpArgs.map(_.typ) + x.aggSig.seqOpArgs = x.seqOpArgs.map(_.typ) + x + case x: ApplyScanOp => + x.aggSig.initOpArgs = x.initOpArgs.map(_.typ) + x.aggSig.seqOpArgs = x.seqOpArgs.map(_.typ) + x + case x: ApplyComparisonOp => + x.op = x.op.copy(x.l.typ, x.r.typ) + x + case MakeArray(args, typ) => + MakeArray.unify(ctx, args, typ) + case x@InitOp(_, _, BasicPhysicalAggSig(_, FoldStateSig(t, accumName, otherAccumName, combIR))) => + run(combIR, BindingEnv.empty.bindEval(accumName -> t.virtualType, otherAccumName -> t.virtualType)) + x + case x@SeqOp(_, _, BasicPhysicalAggSig(_, FoldStateSig(t, accumName, otherAccumName, combIR))) => + run(combIR, BindingEnv.empty.bindEval(accumName -> t.virtualType, otherAccumName -> t.virtualType)) + x + case x@CombOp(_, _, BasicPhysicalAggSig(_, FoldStateSig(t, accumName, otherAccumName, combIR))) => + run(combIR, BindingEnv.empty.bindEval(accumName -> t.virtualType, otherAccumName -> t.virtualType)) + x + case x@ResultOp(_, BasicPhysicalAggSig(_, FoldStateSig(t, accumName, otherAccumName, combIR))) => + run(combIR, BindingEnv.empty.bindEval(accumName -> t.virtualType, otherAccumName -> t.virtualType)) + x + case Apply(name, typeArgs, args, rt, errorID) => + invoke(name, rt, typeArgs, errorID, args: _*) + case _ => + rw + } + } + + run(ir, env) + } + def parse[T](s: String, f: (TokenIterator) => T): T = { - val t = System.nanoTime() val it = IRLexer.parse(s).toIterator.buffered f(it) } - def parse_value_ir(s: String, env: IRParserEnvironment): IR = { - parse(s, ir_value_expr(env)(_).run()) + def parse_value_ir(s: String, env: IRParserEnvironment, typeEnv: BindingEnv[Type] = BindingEnv.empty): IR = { + var ir = parse(s, ir_value_expr(env)(_).run()) + ir = annotateTypes(env.ctx, ir, typeEnv).asInstanceOf[IR] + TypeCheck(env.ctx, ir, typeEnv) + ir } def parse_value_ir(ctx: ExecuteContext, s: String): IR = { @@ -2172,13 +2138,28 @@ object IRParser { def parse_table_ir(ctx: ExecuteContext, s: String): TableIR = parse_table_ir(s, IRParserEnvironment(ctx)) - def parse_table_ir(s: String, env: IRParserEnvironment): TableIR = parse(s, table_ir(env)(_).run()) + def parse_table_ir(s: String, env: IRParserEnvironment): TableIR = { + var ir = parse(s, table_ir(env)(_).run()) + ir = annotateTypes(env.ctx, ir, BindingEnv.empty).asInstanceOf[TableIR] + TypeCheck(env.ctx, ir) + ir + } - def parse_matrix_ir(s: String, env: IRParserEnvironment): MatrixIR = parse(s, matrix_ir(env)(_).run()) + def parse_matrix_ir(s: String, env: IRParserEnvironment): MatrixIR = { + var ir = parse(s, matrix_ir(env)(_).run()) + ir = annotateTypes(env.ctx, ir, BindingEnv.empty).asInstanceOf[MatrixIR] + TypeCheck(env.ctx, ir) + ir + } def parse_matrix_ir(ctx: ExecuteContext, s: String): MatrixIR = parse_matrix_ir(s, IRParserEnvironment(ctx)) - def parse_blockmatrix_ir(s: String, env: IRParserEnvironment): BlockMatrixIR = parse(s, blockmatrix_ir(env)(_).run()) + def parse_blockmatrix_ir(s: String, env: IRParserEnvironment): BlockMatrixIR = { + var ir = parse(s, blockmatrix_ir(env)(_).run()) + ir = annotateTypes(env.ctx, ir, BindingEnv.empty).asInstanceOf[BlockMatrixIR] + TypeCheck(env.ctx, ir) + ir + } def parse_blockmatrix_ir(ctx: ExecuteContext, s: String): BlockMatrixIR = parse_blockmatrix_ir(s, IRParserEnvironment(ctx)) diff --git a/hail/src/main/scala/is/hail/expr/ir/Pretty.scala b/hail/src/main/scala/is/hail/expr/ir/Pretty.scala index 353b2d1760f..c2bb7922ea9 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Pretty.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Pretty.scala @@ -183,12 +183,10 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, single(Pretty.prettyBooleanLiteral(isScan)) else FastSeq(prettyIdentifier(name), Pretty.prettyBooleanLiteral(isScan)) - case TailLoop(name, args, _) if !elideBindings => - FastSeq(prettyIdentifier(name), prettyIdentifiers(args.map(_._1).toFastSeq)) - case Recur(name, _, t) => if (elideBindings) - single(t.parsableString()) - else - FastSeq(prettyIdentifier(name), t.parsableString()) + case TailLoop(name, args, returnType, _) if !elideBindings => + FastSeq(prettyIdentifier(name), prettyIdentifiers(args.map(_._1).toFastSeq), returnType.parsableString()) + case Recur(name, _, t) if !elideBindings => + FastSeq(prettyIdentifier(name)) // case Ref(name, t) if t != null => FastSeq(prettyIdentifier(name), t.parsableString()) // For debug purposes case Ref(name, _) => single(prettyIdentifier(name)) case RelationalRef(name, t) => if (elideBindings) @@ -276,7 +274,7 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, case NDArrayInv(_, errorID) => single(s"$errorID") case ArraySort(_, l, r, _) if !elideBindings => FastSeq(prettyIdentifier(l), prettyIdentifier(r)) case ArrayRef(_,_, errorID) => single(s"$errorID") - case ApplyIR(function, typeArgs, _, errorID) => FastSeq(s"$errorID", prettyIdentifier(function), prettyTypes(typeArgs), ir.typ.parsableString()) + case ApplyIR(function, typeArgs, _, _, errorID) => FastSeq(s"$errorID", prettyIdentifier(function), prettyTypes(typeArgs), ir.typ.parsableString()) case Apply(function, typeArgs, _, t, errorID) => FastSeq(s"$errorID", prettyIdentifier(function), prettyTypes(typeArgs), t.parsableString()) case ApplySeeded(function, _, rngState, staticUID, t) => FastSeq(prettyIdentifier(function), staticUID.toString, t.parsableString()) case ApplySpecial(function, typeArgs, _, t, errorID) => FastSeq(s"$errorID", prettyIdentifier(function), prettyTypes(typeArgs), t.parsableString()) @@ -414,12 +412,14 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, prettyStrings(entryKV.map(_._1)), prettyStrings(entryKV.map(_._2))) case TableFilterIntervals(child, intervals, keep) => FastSeq( + child.typ.keyType.parsableString(), prettyStringLiteral(Serialization.write( JSONAnnotationImpex.exportAnnotation(intervals, TArray(TInterval(child.typ.keyType))) )(RelationalSpec.formats)), Pretty.prettyBooleanLiteral(keep)) case MatrixFilterIntervals(child, intervals, keep) => FastSeq( + child.typ.rowType.parsableString(), prettyStringLiteral(Serialization.write( JSONAnnotationImpex.exportAnnotation(intervals, TArray(TInterval(child.typ.rowKeyStruct))) )(RelationalSpec.formats)), @@ -494,7 +494,7 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, if (i > 0) Some(FastSeq()) else None case _: Switch => if (i > 0) Some(FastSeq()) else None - case TailLoop(name, args, body) => if (i == args.length) + case TailLoop(name, args, _, body) => if (i == args.length) Some(args.map { case (name, ir) => name -> "loopvar" } :+ name -> "loop") else None case StreamMap(a, name, _) => diff --git a/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala b/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala index 4bf3dd0ee76..eea3d832126 100644 --- a/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala +++ b/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala @@ -1511,7 +1511,7 @@ object PruneDeadFields { ) memoizeMatrixIR(ctx, child, dep, memo) BindingEnv.empty - case TailLoop(name, params, body) => + case TailLoop(name, params, _, body) => val bodyEnv = memoizeValueIR(ctx, body, body.typ, memo) val paramTypes = params.map{ case (paramName, paramIR) => unifySeq(paramIR.typ, uses(paramName, bodyEnv.eval)) diff --git a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala index b8ee0de9978..e2c6b0c317e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala @@ -158,7 +158,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { case Let(name, value, body) => addBinding(name, value) case RelationalLet(name, value, body) => addBinding(name, value) case RelationalLetTable(name, value, body) => addBinding(name, value) - case TailLoop(loopName, params, body) => + case TailLoop(loopName, params, _, body) => addBinding(loopName, body) val argDefs = Array.fill(params.length)(new BoxedArrayBuilder[IR]()) refMap.getOrElse(loopName, FastSeq()).map(_.t).foreach { case Recur(_, args, _) => @@ -173,7 +173,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { i += 1 } states.bind(node, s) - case x@ApplyIR(_, _, args, _) => + case x@ApplyIR(_, _, args, _, _) => x.refIdx.foreach { case (n, i) => addBinding(n, args(i)) } case ArraySort(a, l, r, c) => addElementBinding(l, a, makeRequired = true) @@ -543,7 +543,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { requiredness.unionFrom(lookup(body)) case RelationalLet(name, value, body) => requiredness.unionFrom(lookup(body)) - case TailLoop(name, params, body) => + case TailLoop(name, params, _, body) => requiredness.unionFrom(lookup(body)) case x: BaseRef => requiredness.unionFrom(defs(node).map(tcoerce[TypeWithRequiredness])) diff --git a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala index 492510e0707..a963f36b350 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala @@ -276,15 +276,15 @@ object Simplify { case CastRename(x, t) if x.typ == t => x case CastRename(CastRename(x, _), t) => CastRename(x, t) - case ApplyIR("indexArray", _, Seq(a, i@I32(v)), errorID) if v >= 0 => + case ApplyIR("indexArray", _, Seq(a, i@I32(v)), _, errorID) if v >= 0 => ArrayRef(a, i, errorID) - case ApplyIR("contains", _, Seq(CastToArray(x), element), _) if x.typ.isInstanceOf[TSet] => invoke("contains", TBoolean, x, element) + case ApplyIR("contains", _, Seq(CastToArray(x), element), _, _) if x.typ.isInstanceOf[TSet] => invoke("contains", TBoolean, x, element) - case ApplyIR("contains", _, Seq(Literal(t, v), element), _) if t.isInstanceOf[TArray] => + case ApplyIR("contains", _, Seq(Literal(t, v), element), _, _) if t.isInstanceOf[TArray] => invoke("contains", TBoolean, Literal(TSet(t.asInstanceOf[TArray].elementType), v.asInstanceOf[IndexedSeq[_]].toSet), element) - case ApplyIR("contains", _, Seq(ToSet(x), element), _) if x.typ.isInstanceOf[TArray] => invoke("contains", TBoolean, x, element) + case ApplyIR("contains", _, Seq(ToSet(x), element), _, _) if x.typ.isInstanceOf[TArray] => invoke("contains", TBoolean, x, element) case x: ApplyIR if x.inline || x.body.size < 10 => x.explicitNode @@ -635,7 +635,7 @@ object Simplify { // ArrayAgg(GetField(Ref(uid, rowsAndGlobal.typ), "rows"), "row", query))) // } - case ApplyIR("annotate", _, Seq(s, MakeStruct(fields)), _) => + case ApplyIR("annotate", _, Seq(s, MakeStruct(fields)), _, _) => InsertFields(s, fields) // simplify Boolean equality diff --git a/hail/src/main/scala/is/hail/expr/ir/TableIR.scala b/hail/src/main/scala/is/hail/expr/ir/TableIR.scala index cf38e3b00aa..fa1e40fbc08 100644 --- a/hail/src/main/scala/is/hail/expr/ir/TableIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/TableIR.scala @@ -8,7 +8,7 @@ import is.hail.backend.{ExecuteContext, HailStateManager, HailTaskContext, TaskF import is.hail.expr.ir import is.hail.expr.ir.functions.{BlockMatrixToTableFunction, IntervalFunctions, MatrixToTableFunction, TableToTableFunction} import is.hail.expr.ir.lowering._ -import is.hail.expr.ir.streams.StreamProducer +import is.hail.expr.ir.streams.{StreamProducer, StreamUtils} import is.hail.io._ import is.hail.io.avro.AvroTableReader import is.hail.io.fs.FS @@ -72,6 +72,8 @@ abstract sealed class TableIR extends BaseIR { } def pyUnpersist(): TableIR = unpersist() + + def typecheck(): Unit = {} } object TableLiteral { @@ -1684,15 +1686,14 @@ case class TableRead(typ: TableType, dropRows: Boolean, tr: TableReader) extends } case class TableParallelize(rowsAndGlobal: IR, nPartitions: Option[Int] = None) extends TableIR { - require(rowsAndGlobal.typ.isInstanceOf[TStruct]) - require(rowsAndGlobal.typ.asInstanceOf[TStruct].fieldNames.sameElements(Array("rows", "global"))) - require(nPartitions.forall(_ > 0)) + override def typecheck(): Unit = { + assert(rowsAndGlobal.typ.isInstanceOf[TStruct]) + assert(rowsAndGlobal.typ.asInstanceOf[TStruct].fieldNames.sameElements(Array("rows", "global"))) + assert(nPartitions.forall(_ > 0)) + } lazy val rowCountUpperBound: Option[Long] = None - private val rowsType = rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("rows").asInstanceOf[TArray] - private val globalsType = rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("global").asInstanceOf[TStruct] - val childrenSeq: IndexedSeq[BaseIR] = FastSeq(rowsAndGlobal) def copy(newChildren: IndexedSeq[BaseIR]): TableParallelize = { @@ -1700,10 +1701,14 @@ case class TableParallelize(rowsAndGlobal: IR, nPartitions: Option[Int] = None) TableParallelize(newrowsAndGlobal, nPartitions) } - val typ: TableType = TableType( - rowsType.elementType.asInstanceOf[TStruct], - FastSeq(), - globalsType) + lazy val typ: TableType = { + def rowsType = rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("rows").asInstanceOf[TArray] + def globalsType = rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("global").asInstanceOf[TStruct] + TableType( + rowsType.elementType.asInstanceOf[TStruct], + FastSeq(), + globalsType) + } protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val (ptype: PStruct, res) = CompileAndEvaluate._apply(ctx, rowsAndGlobal, optimize = false) match { @@ -1776,14 +1781,16 @@ case class TableParallelize(rowsAndGlobal: IR, nPartitions: Option[Int] = None) * - Otherwise, if 'isSorted' is false and n < 'keys.length', then shuffle. */ case class TableKeyBy(child: TableIR, keys: IndexedSeq[String], isSorted: Boolean = false) extends TableIR { - private val fields = child.typ.rowType.fieldNames.toSet - assert(keys.forall(fields.contains), s"${ keys.filter(k => !fields.contains(k)).mkString(", ") }") + override def typecheck(): Unit = { + val fields = child.typ.rowType.fieldNames.toSet + assert(keys.forall(fields.contains), s"${keys.filter(k => !fields.contains(k)).mkString(", ")}") + } lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound val childrenSeq: IndexedSeq[BaseIR] = Array(child) - val typ: TableType = child.typ.copy(key = keys) + lazy val typ: TableType = child.typ.copy(key = keys) def definitelyDoesNotShuffle: Boolean = child.typ.key.startsWith(keys) || isSorted @@ -1794,7 +1801,7 @@ case class TableKeyBy(child: TableIR, keys: IndexedSeq[String], isSorted: Boolea protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val tv = child.execute(ctx, r).asTableValue(ctx) - new TableValueIntermediate(tv.copy(typ = typ, rvd = tv.rvd.enforceKey(ctx, keys, isSorted))) + TableValueIntermediate(tv.copy(typ = typ, rvd = tv.rvd.enforceKey(ctx, keys, isSorted))) } } @@ -1820,24 +1827,29 @@ case class TableGen(contexts: IR, errorId: Int = ErrorIDs.NO_ERROR ) extends TableIR { - TypeCheck.coerce[TStream]("contexts", contexts.typ) + override def typecheck(): Unit = { + TypeCheck.coerce[TStream]("contexts", contexts.typ) + TypeCheck.coerce[TStruct]("globals", globals.typ) + val bodyType = TypeCheck.coerce[TStream]("body", body.typ) + val rowType = TypeCheck.coerce[TStruct]("body.elementType", bodyType.elementType) + + if (!partitioner.kType.isSubsetOf(rowType)) + throw new IllegalArgumentException( + s"""'partitioner': key type contains fields absent from row type + | Key type: ${partitioner.kType} + | Row type: $rowType""".stripMargin + ) + } - private val globalType = + private def globalType = TypeCheck.coerce[TStruct]("globals", globals.typ) - private val rowType = { + private def rowType = { val bodyType = TypeCheck.coerce[TStream]( "body", body.typ) TypeCheck.coerce[TStruct]( "body.elementType", bodyType.elementType) } - if (!partitioner.kType.isSubsetOf(rowType)) - throw new IllegalArgumentException( - s"""'partitioner': key type contains fields absent from row type - | Key type: ${partitioner.kType} - | Row type: $rowType""".stripMargin - ) - - override def typ: TableType = + override lazy val typ: TableType = TableType(rowType, partitioner.kType.fieldNames, globalType) override val rowCountUpperBound: Option[Long] = @@ -1910,7 +1922,7 @@ case class TableRange(n: Int, nPartitions: Int) extends TableIR { case class TableFilter(child: TableIR, pred: IR) extends TableIR { val childrenSeq: IndexedSeq[BaseIR] = Array(child, pred) - val typ: TableType = child.typ + def typ: TableType = child.typ lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound @@ -2048,22 +2060,26 @@ case class TableJoin(left: TableIR, right: TableIR, joinType: String, joinKey: I extends TableIR { require(joinKey >= 0) - require(left.typ.key.length >= joinKey) - require(right.typ.key.length >= joinKey) - require(left.typ.keyType.truncate(joinKey) isIsomorphicTo right.typ.keyType.truncate(joinKey)) - require(left.typ.globalType.fieldNames.toSet - .intersect(right.typ.globalType.fieldNames.toSet) - .isEmpty) require(joinType == "inner" || joinType == "left" || joinType == "right" || joinType == "outer") + override def typecheck(): Unit = { + assert(left.typ.key.length >= joinKey) + assert(right.typ.key.length >= joinKey) + assert(left.typ.keyType.truncate(joinKey) isIsomorphicTo right.typ.keyType.truncate(joinKey)) + assert( + left.typ.globalType.fieldNames.toSet + .intersect(right.typ.globalType.fieldNames.toSet) + .isEmpty) + } + val childrenSeq: IndexedSeq[BaseIR] = Array(left, right) lazy val rowCountUpperBound: Option[Long] = None - private val newRowType = { + lazy val typ: TableType = { val leftRowType = left.typ.rowType val rightRowType = right.typ.rowType val leftKey = left.typ.key.take(joinKey) @@ -2077,14 +2093,12 @@ case class TableJoin(left: TableIR, right: TableIR, joinType: String, joinKey: I .nonEmpty) throw new RuntimeException(s"invalid join: \n left value: $leftValueType\n right value: $rightValueType") - leftKeyType ++ leftValueType ++ rightValueType - } - - private val newGlobalType = left.typ.globalType ++ right.typ.globalType - - private val newKey = left.typ.key ++ right.typ.key.drop(joinKey) + val newRowType = leftKeyType ++ leftValueType ++ rightValueType + val newGlobalType = left.typ.globalType ++ right.typ.globalType + val newKey = left.typ.key ++ right.typ.key.drop(joinKey) - val typ: TableType = TableType(newRowType, newKey, newGlobalType) + TableType(newRowType, newKey, newGlobalType) + } def copy(newChildren: IndexedSeq[BaseIR]): TableJoin = { assert(newChildren.length == 2) @@ -2112,8 +2126,10 @@ case class TableIntervalJoin( lazy val rowCountUpperBound: Option[Long] = left.rowCountUpperBound - val rightType: Type = if (product) TArray(right.typ.valueType) else right.typ.valueType - val typ: TableType = left.typ.copy(rowType = left.typ.rowType.appendKey(root, rightType)) + lazy val typ: TableType = { + val rightType: Type = if (product) TArray(right.typ.valueType) else right.typ.valueType + left.typ.copy(rowType = left.typ.rowType.appendKey(root, rightType)) + } override def copy(newChildren: IndexedSeq[BaseIR]): TableIR = TableIntervalJoin(newChildren(0).asInstanceOf[TableIR], newChildren(1).asInstanceOf[TableIR], root, product) @@ -2194,21 +2210,25 @@ case class TableIntervalJoin( * is likely the last. */ case class TableMultiWayZipJoin(childrenSeq: IndexedSeq[TableIR], fieldName: String, globalName: String) extends TableIR { - require(childrenSeq.length > 0, "there must be at least one table as an argument") + require(childrenSeq.nonEmpty, "there must be at least one table as an argument") - private val first = childrenSeq.head - private val rest = childrenSeq.tail + override def typecheck(): Unit = { + val first = childrenSeq.head + val rest = childrenSeq.tail + assert(rest.forall(e => e.typ.rowType == first.typ.rowType), "all rows must have the same type") + assert(rest.forall(e => e.typ.key == first.typ.key), "all keys must be the same") + assert( + rest.forall(e => e.typ.globalType == first.typ.globalType), + "all globals must have the same type") + } - lazy val rowCountUpperBound: Option[Long] = None + private def first = childrenSeq.head - require(rest.forall(e => e.typ.rowType == first.typ.rowType), "all rows must have the same type") - require(rest.forall(e => e.typ.key == first.typ.key), "all keys must be the same") - require(rest.forall(e => e.typ.globalType == first.typ.globalType), - "all globals must have the same type") + lazy val rowCountUpperBound: Option[Long] = None - private val newGlobalType = TStruct(globalName -> TArray(first.typ.globalType)) - private val newValueType = TStruct(fieldName -> TArray(first.typ.valueType)) - private val newRowType = first.typ.keyType ++ newValueType + private def newGlobalType = TStruct(globalName -> TArray(first.typ.globalType)) + private def newValueType = TStruct(fieldName -> TArray(first.typ.valueType)) + private def newRowType = first.typ.keyType ++ newValueType lazy val typ: TableType = first.typ.copy( rowType = newRowType, @@ -2294,15 +2314,18 @@ case class TableMultiWayZipJoin(childrenSeq: IndexedSeq[TableIR], fieldName: Str } case class TableLeftJoinRightDistinct(left: TableIR, right: TableIR, root: String) extends TableIR { - require(right.typ.keyType isPrefixOf left.typ.keyType, - s"\n L: ${ left.typ }\n R: ${ right.typ }") + override def typecheck(): Unit = { + assert( + right.typ.keyType isPrefixOf left.typ.keyType, + s"\n L: ${left.typ}\n R: ${right.typ}") + } lazy val rowCountUpperBound: Option[Long] = left.rowCountUpperBound lazy val childrenSeq: IndexedSeq[BaseIR] = Array(left, right) - private val newRowType = left.typ.rowType.structInsert(right.typ.valueType, List(root))._1 - val typ: TableType = left.typ.copy(rowType = newRowType) + lazy val typ: TableType = left.typ.copy( + rowType = left.typ.rowType.structInsert(right.typ.valueType, List(root))) override def partitionCounts: Option[IndexedSeq[Long]] = left.partitionCounts @@ -2337,11 +2360,18 @@ case class TableMapPartitions(child: TableIR, requestedKey: Int, allowedOverlap: Int ) extends TableIR { - assert(body.typ.isInstanceOf[TStream], s"${ body.typ }") - assert(allowedOverlap >= -1 && allowedOverlap <= child.typ.key.size) - assert(requestedKey >= 0 && requestedKey <= child.typ.key.size) + override def typecheck(): Unit = { + assert(body.typ.isInstanceOf[TStream], s"${body.typ}") + assert(allowedOverlap >= -1) + assert(allowedOverlap <= child.typ.key.size) + assert(requestedKey >= 0) + assert(requestedKey <= child.typ.key.size) + assert(StreamUtils.isIterationLinear(body, partitionStreamName), "must iterate over the partition exactly once") + val newRowType = body.typ.asInstanceOf[TStream].elementType.asInstanceOf[TStruct] + child.typ.key.foreach { k => if (!newRowType.hasField(k)) throw new RuntimeException(s"prev key: ${child.typ.key}, new row: ${newRowType}") } + } - lazy val typ = child.typ.copy( + lazy val typ: TableType = child.typ.copy( rowType = body.typ.asInstanceOf[TStream].elementType.asInstanceOf[TStruct]) lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child, body) @@ -2406,11 +2436,16 @@ case class TableMapPartitions(child: TableIR, // Must leave key fields unchanged. case class TableMapRows(child: TableIR, newRow: IR) extends TableIR { + override def typecheck(): Unit = { + val newFieldSet = newRow.typ.asInstanceOf[TStruct].fieldNames.toSet + assert(child.typ.key.forall(newFieldSet.contains)) + } + val childrenSeq: IndexedSeq[BaseIR] = Array(child, newRow) lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound - val typ: TableType = child.typ.copy(rowType = newRow.typ.asInstanceOf[TStruct]) + lazy val typ: TableType = child.typ.copy(rowType = newRow.typ.asInstanceOf[TStruct]) def copy(newChildren: IndexedSeq[BaseIR]): TableMapRows = { assert(newChildren.length == 2) @@ -2729,7 +2764,7 @@ case class TableMapGlobals(child: TableIR, newGlobals: IR) extends TableIR { lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound - val typ: TableType = + lazy val typ: TableType = child.typ.copy(globalType = newGlobals.typ.asInstanceOf[TStruct]) def copy(newChildren: IndexedSeq[BaseIR]): TableMapGlobals = { @@ -2758,24 +2793,19 @@ case class TableMapGlobals(child: TableIR, newGlobals: IR) extends TableIR { case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableIR { assert(path.nonEmpty) - assert(!child.typ.key.contains(path.head)) + + override def typecheck(): Unit = { + assert(!child.typ.key.contains(path.head)) + } lazy val rowCountUpperBound: Option[Long] = None lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child) - private val childRowType = child.typ.rowType + private def childRowType = child.typ.rowType - private val length: IR = { - Coalesce(FastSeq( - ArrayLen(CastToArray( - path.foldLeft[IR](Ref("row", childRowType))((struct, field) => - GetField(struct, field)))), - 0)) - } - - val idx = Ref(genUID(), TInt32) - val newRow: InsertFields = { + private[this] lazy val idx = Ref(genUID(), TInt32) + private[this] lazy val newRow: InsertFields = { val refs = path.init.scanLeft(Ref("row", childRowType))((struct, name) => Ref(genUID(), tcoerce[TStruct](struct.typ).field(name).typ)) @@ -2789,7 +2819,7 @@ case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableI }.asInstanceOf[InsertFields] } - val typ: TableType = child.typ.copy(rowType = newRow.typ) + lazy val typ: TableType = child.typ.copy(rowType = newRow.typ) def copy(newChildren: IndexedSeq[BaseIR]): TableExplode = { assert(newChildren.length == 1) @@ -2799,6 +2829,14 @@ case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableI protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val prev = child.execute(ctx, r).asTableValue(ctx) + val length: IR = + Coalesce(FastSeq( + ArrayLen(CastToArray( + path.foldLeft[IR](Ref("row", childRowType)) { (struct, field) => + GetField(struct, field) + })), + 0)) + val (len, l) = Compile[AsmFunction2RegionLongInt](ctx, FastSeq(("row", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(prev.rvd.rowPType)))), FastSeq(classInfo[Region], LongInfo), IntInfo, @@ -2816,7 +2854,7 @@ case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableI prev.rvd.typ.key.takeWhile(_ != path.head) ) val fsBc = ctx.fsBc - new TableValueIntermediate( + TableValueIntermediate( TableValue(ctx, typ, prev.globals, prev.rvd.boundary.mapPartitionsWithIndex(rvdType) { (i, ctx, it) => @@ -2844,8 +2882,11 @@ case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableI case class TableUnion(childrenSeq: IndexedSeq[TableIR]) extends TableIR { assert(childrenSeq.nonEmpty) - assert(childrenSeq.tail.forall(_.typ.rowType == childrenSeq(0).typ.rowType)) - assert(childrenSeq.tail.forall(_.typ.key == childrenSeq(0).typ.key)) + + override def typecheck(): Unit = { + assert(childrenSeq.tail.forall(_.typ.rowType == childrenSeq(0).typ.rowType)) + assert(childrenSeq.tail.forall(_.typ.key == childrenSeq(0).typ.key)) + } lazy val rowCountUpperBound: Option[Long] = { val definedChildren = childrenSeq.flatMap(_.rowCountUpperBound) @@ -2859,11 +2900,11 @@ case class TableUnion(childrenSeq: IndexedSeq[TableIR]) extends TableIR { TableUnion(newChildren.map(_.asInstanceOf[TableIR])) } - val typ: TableType = childrenSeq(0).typ + def typ: TableType = childrenSeq(0).typ protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val tvs = childrenSeq.map(_.execute(ctx, r).asTableValue(ctx)) - new TableValueIntermediate( + TableValueIntermediate( tvs(0).copy( rvd = RVD.union(RVD.unify(ctx, tvs.map(_.rvd)), tvs(0).typ.key.length, ctx))) } @@ -2881,7 +2922,7 @@ case class MatrixRowsTable(child: MatrixIR) extends TableIR { MatrixRowsTable(newChildren(0).asInstanceOf[MatrixIR]) } - val typ: TableType = child.typ.rowsTableType + def typ: TableType = child.typ.rowsTableType } case class MatrixColsTable(child: MatrixIR) extends TableIR { @@ -2894,7 +2935,7 @@ case class MatrixColsTable(child: MatrixIR) extends TableIR { MatrixColsTable(newChildren(0).asInstanceOf[MatrixIR]) } - val typ: TableType = child.typ.colsTableType + def typ: TableType = child.typ.colsTableType } case class MatrixEntriesTable(child: MatrixIR) extends TableIR { @@ -2907,7 +2948,7 @@ case class MatrixEntriesTable(child: MatrixIR) extends TableIR { MatrixEntriesTable(newChildren(0).asInstanceOf[MatrixIR]) } - val typ: TableType = child.typ.entriesTableType + def typ: TableType = child.typ.entriesTableType } case class TableDistinct(child: TableIR) extends TableIR { @@ -2920,7 +2961,7 @@ case class TableDistinct(child: TableIR) extends TableIR { TableDistinct(newChild.asInstanceOf[TableIR]) } - val typ: TableType = child.typ + def typ: TableType = child.typ protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val prev = child.execute(ctx, r).asTableValue(ctx) @@ -2933,10 +2974,14 @@ case class TableKeyByAndAggregate( expr: IR, newKey: IR, nPartitions: Option[Int] = None, - bufferSize: Int) extends TableIR { - require(expr.typ.isInstanceOf[TStruct]) - require(newKey.typ.isInstanceOf[TStruct]) - require(bufferSize > 0) + bufferSize: Int +) extends TableIR { + assert(bufferSize > 0) + + override def typecheck(): Unit = { + assert(expr.typ.isInstanceOf[TStruct]) + assert(newKey.typ.isInstanceOf[TStruct]) + } lazy val childrenSeq: IndexedSeq[BaseIR] = Array(child, expr, newKey) @@ -2947,8 +2992,9 @@ case class TableKeyByAndAggregate( TableKeyByAndAggregate(newChild, newExpr, newNewKey, nPartitions, bufferSize) } - private val keyType = newKey.typ.asInstanceOf[TStruct] - val typ: TableType = TableType(rowType = keyType ++ tcoerce[TStruct](expr.typ), + private lazy val keyType = newKey.typ.asInstanceOf[TStruct] + lazy val typ: TableType = TableType( + rowType = keyType ++ tcoerce[TStruct](expr.typ), globalType = child.typ.globalType, key = keyType.fieldNames ) @@ -3094,7 +3140,9 @@ case class TableKeyByAndAggregate( // follows key_by non-empty key case class TableAggregateByKey(child: TableIR, expr: IR) extends TableIR { - require(child.typ.key.nonEmpty) + override def typecheck(): Unit = { + assert(child.typ.key.nonEmpty) + } lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound @@ -3106,7 +3154,7 @@ case class TableAggregateByKey(child: TableIR, expr: IR) extends TableIR { TableAggregateByKey(newChild, newExpr) } - val typ: TableType = child.typ.copy(rowType = child.typ.keyType ++ tcoerce[TStruct](expr.typ)) + lazy val typ: TableType = child.typ.copy(rowType = child.typ.keyType ++ tcoerce[TStruct](expr.typ)) protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val prev = child.execute(ctx, r).asTableValue(ctx) @@ -3236,7 +3284,7 @@ case class TableOrderBy(child: TableIR, sortFields: IndexedSeq[SortField]) exten TableOrderBy(newChild.asInstanceOf[TableIR], sortFields) } - val typ: TableType = child.typ.copy(key = FastSeq()) + lazy val typ: TableType = child.typ.copy(key = FastSeq()) protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val prev = child.execute(ctx, r).asTableValue(ctx) @@ -3289,15 +3337,15 @@ case class CastMatrixToTable( } case class TableRename(child: TableIR, rowMap: Map[String, String], globalMap: Map[String, String]) extends TableIR { - require(rowMap.keys.forall(child.typ.rowType.hasField)) - require(globalMap.keys.forall(child.typ.globalType.hasField)) + override def typecheck(): Unit = { + assert(rowMap.keys.forall(child.typ.rowType.hasField)) + assert(globalMap.keys.forall(child.typ.globalType.hasField)) + } lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound def rowF(old: String): String = rowMap.getOrElse(old, old) - def globalF(old: String): String = globalMap.getOrElse(old, old) - lazy val typ: TableType = child.typ.copy( rowType = child.typ.rowType.rename(rowMap), globalType = child.typ.globalType.rename(globalMap), @@ -3314,7 +3362,7 @@ case class TableRename(child: TableIR, rowMap: Map[String, String], globalMap: M } protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = - new TableValueIntermediate( + TableValueIntermediate( child.execute(ctx, r).asTableValue(ctx).rename(globalMap, rowMap)) } @@ -3328,7 +3376,7 @@ case class TableFilterIntervals(child: TableIR, intervals: IndexedSeq[Interval], TableFilterIntervals(newChild, intervals, keep) } - override lazy val typ: TableType = child.typ + override def typ: TableType = child.typ protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { val tv = child.execute(ctx, r).asTableValue(ctx) @@ -3381,7 +3429,8 @@ case class TableToTableApply(child: TableIR, function: TableToTableFunction) ext case class BlockMatrixToTableApply( bm: BlockMatrixIR, aux: IR, - function: BlockMatrixToTableFunction) extends TableIR { + function: BlockMatrixToTableFunction +) extends TableIR { override lazy val childrenSeq: IndexedSeq[BaseIR] = Array(bm, aux) @@ -3418,7 +3467,7 @@ case class BlockMatrixToTable(child: BlockMatrixIR) extends TableIR { } protected[ir] override def execute(ctx: ExecuteContext, r: LoweringAnalyses): TableExecuteIntermediate = { - new TableValueIntermediate(child.execute(ctx).entriesTable(ctx)) + TableValueIntermediate(child.execute(ctx).entriesTable(ctx)) } } diff --git a/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala b/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala index 566be722765..582baeb089a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala +++ b/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala @@ -29,17 +29,15 @@ object TypeCheck { def check(ctx: ExecuteContext, ir: BaseIR, env: BindingEnv[Type]): StackFrame[Unit] = { for { - _ <- ir.children - .zipWithIndex - .foreachRecur { case (child, i) => - for { - _ <- call(check(ctx, child, ChildBindings(ir, i, env))) - } yield { - if (child.typ == TVoid) { - checkVoidTypedChild(ctx, ir, i, env) - } else () - } + _ <- ir.forEachChildWithEnvStackSafe(env) { (child, i, childEnv) => + for { + _ <- call(check(ctx, child, childEnv)) + } yield { + if (child.typ == TVoid) { + checkVoidTypedChild(ctx, ir, i, env) + } else () } + } } yield checkSingleNode(ctx, ir, env) } @@ -118,17 +116,18 @@ object TypeCheck { case None => throw new RuntimeException(s"RelationalRef not found in env: $name") } - case x@TailLoop(name, _, body) => - assert(x.typ == body.typ) + case x@TailLoop(name, _, rt, body) => + assert(x.typ == rt) + assert(body.typ == rt) def recurInTail(node: IR, tailPosition: Boolean): Boolean = node match { case x: Recur => x.name != name || tailPosition case _ => node.children.zipWithIndex .forall { - case (c: IR, i) => recurInTail(c, tailPosition && InTailPosition(node, i)) + case (c: IR, i) => recurInTail(c, tailPosition && InTailPosition(node, i)) case _ => true - } + } } assert(recurInTail(body, tailPosition = true)) case x@Recur(name, args, typ) => @@ -142,6 +141,7 @@ object TypeCheck { case x@ApplyComparisonOp(op, l, r) => assert(op.t1 == l.typ) assert(op.t2 == r.typ) + ComparisonOp.checkCompatible(op.t1, op.t2) op match { case _: Compare => assert(x.typ == TInt32) case _ => assert(x.typ == TBoolean) @@ -283,7 +283,7 @@ object TypeCheck { assert(a.typ.isInstanceOf[TStream]) assert(lessThan.typ == TBoolean) case x@ToSet(a) => - assert(a.typ.isInstanceOf[TStream]) + assert(a.typ.isInstanceOf[TStream], a.typ) case x@ToDict(a) => assert(a.typ.isInstanceOf[TStream]) assert(tcoerce[TBaseStruct](tcoerce[TStream](a.typ).elementType).size == 2) @@ -360,7 +360,7 @@ object TypeCheck { assert(key.forall(eltType.hasField)) case x@StreamFilter(a, name, cond) => assert(a.typ.asInstanceOf[TStream].elementType.isRealizable) - assert(cond.typ == TBoolean) + assert(cond.typ == TBoolean, cond.typ) assert(x.typ == a.typ) case x@StreamTakeWhile(a, name, cond) => assert(a.typ.asInstanceOf[TStream].elementType.isRealizable) @@ -520,18 +520,26 @@ object TypeCheck { assert(msg.typ == TString) case Trap(child) => case ConsoleLog(msg, _) => assert(msg.typ == TString) - case x@ApplyIR(fn, typeArgs, args, _) => + case x@ApplyIR(fn, _, typeArgs, args, _) => case x: AbstractApplyNode[_] => assert(x.implementation.unify(x.typeArgs, x.args.map(_.typ), x.returnType)) case MatrixWrite(_, _) => - case MatrixMultiWrite(_, _) => // do nothing + case MatrixMultiWrite(children, _) => + val t = children.head.typ + assert( + !t.rowType.hasField(MatrixReader.rowUIDFieldName) && + !t.colType.hasField(MatrixReader.colUIDFieldName), t + ) + assert(children.forall(_.typ == t)) case x@TableAggregate(child, query) => assert(x.typ == query.typ) case x@MatrixAggregate(child, query) => assert(x.typ == query.typ) case RelationalLet(_, _, _) => case TableWrite(_, _) => - case TableMultiWrite(_, _) => + case TableMultiWrite(children, _) => + val t = children.head.typ + assert(children.forall(_.typ == t)) case TableCount(_) => case MatrixCount(_) => case TableGetGlobals(_) => @@ -543,8 +551,6 @@ object TypeCheck { case BlockMatrixCollect(_) => case BlockMatrixWrite(_, writer) => writer.loweredTyp case BlockMatrixMultiWrite(_, _) => - case ValueToBlockMatrix(child, _, _) => - assert(child.typ.isInstanceOf[TArray] || child.typ.isInstanceOf[TNDArray] || child.typ == TFloat64) case CollectDistributedArray(ctxs, globals, cname, gname, body, dynamicID, _, _) => assert(ctxs.typ.isInstanceOf[TStream]) assert(dynamicID.typ == TString) @@ -571,21 +577,10 @@ object TypeCheck { assert(stagingFile.forall(_.typ == TString)) case LiftMeOut(_) => case Consume(_) => - case TableMapRows(child, newRow) => - val newFieldSet = newRow.typ.asInstanceOf[TStruct].fieldNames.toSet - assert(child.typ.key.forall(newFieldSet.contains)) - case TableMapPartitions(child, globalName, partitionStreamName, body, requestedKey, allowedOverlap) => - assert(StreamUtils.isIterationLinear(body, partitionStreamName), "must iterate over the partition exactly once") - val newRowType = body.typ.asInstanceOf[TStream].elementType.asInstanceOf[TStruct] - child.typ.key.foreach { k => if (!newRowType.hasField(k)) throw new RuntimeException(s"prev key: ${child.typ.key}, new row: ${newRowType}")} - case MatrixUnionCols(left, right, joinType) => - assert(left.typ.rowKeyStruct == right.typ.rowKeyStruct, s"${left.typ.rowKeyStruct} != ${right.typ.rowKeyStruct}") - assert(left.typ.colType == right.typ.colType, s"${left.typ.colType} != ${right.typ.colType}") - assert(left.typ.entryType == right.typ.entryType, s"${left.typ.entryType} != ${right.typ.entryType}") - case _: TableIR => - case _: MatrixIR => - case _: BlockMatrixIR => + case x: TableIR => x.typecheck() + case x: MatrixIR => x.typecheck() + case x: BlockMatrixIR => x.typecheck() } } diff --git a/hail/src/main/scala/is/hail/expr/ir/analyses/ControlFlowPreventsSplit.scala b/hail/src/main/scala/is/hail/expr/ir/analyses/ControlFlowPreventsSplit.scala index afee062cd8e..6796e4fef72 100644 --- a/hail/src/main/scala/is/hail/expr/ir/analyses/ControlFlowPreventsSplit.scala +++ b/hail/src/main/scala/is/hail/expr/ir/analyses/ControlFlowPreventsSplit.scala @@ -11,7 +11,7 @@ object ControlFlowPreventsSplit { case r@Recur(name, _, _) => var parent: BaseIR = r while (parent match { - case TailLoop(`name`, _, _) => false + case TailLoop(`name`, _, _, _) => false case _ => true }) { if (!m.contains(parent)) diff --git a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala index 39b5e9e6b72..a012c87f560 100644 --- a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala +++ b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala @@ -114,7 +114,7 @@ case object SemanticHash extends Logging { case ApplyComparisonOp(op, _, _) => buffer ++= Bytes.fromClass(op.getClass) - case ApplyIR(fname, tyArgs, _, _) => + case ApplyIR(fname, tyArgs, _, _, _) => buffer ++= fname.getBytes tyArgs.foreach(buffer ++= EncodeTypename(_)) diff --git a/hail/src/main/scala/is/hail/expr/ir/functions/Functions.scala b/hail/src/main/scala/is/hail/expr/ir/functions/Functions.scala index 5812d74dac1..1143d680843 100644 --- a/hail/src/main/scala/is/hail/expr/ir/functions/Functions.scala +++ b/hail/src/main/scala/is/hail/expr/ir/functions/Functions.scala @@ -99,8 +99,8 @@ object IRFunctionRegistry { val refMap = BindingEnv.eval(argNames.zip(valueParameterTypes): _*) val body = IRParser.parse_value_ir( bodyStr, - IRParserEnvironment(ctx, refMap, Map()) - ) + IRParserEnvironment(ctx, Map()), + refMap) userAddedFunctions += ((name, (body.typ, typeParameters, valueParameterTypes))) addIR( @@ -147,7 +147,7 @@ object IRFunctionRegistry { ): JVMFunction = { jvmRegistry.lift(name) match { case None => - fatal(s"no functions found with the name ${name}") + fatal(s"no functions found with the signature $name(${valueParameterTypes.mkString(", ")}): $returnType") case Some(functions) => functions.filter(t => t.unify(typeParameters, valueParameterTypes, returnType)).toSeq match { case Seq() => @@ -196,7 +196,7 @@ object IRFunctionRegistry { def lookupUnseeded(name: String, returnType: Type, typeParameters: Seq[Type], arguments: Seq[Type]): Option[IRFunctionImplementation] = { val validIR: Option[IRFunctionImplementation] = lookupIR(name, returnType, typeParameters, arguments).map { case ((_, _, _, inline), conversion) => (typeParametersPassed, args, errorID) => - val x = ApplyIR(name, typeParametersPassed, args, errorID) + val x = ApplyIR(name, typeParametersPassed, args, returnType, errorID) x.conversion = conversion x.inline = inline x diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala index 984c7d9a4ed..f0d168dc829 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala @@ -729,27 +729,31 @@ object LowerTableIR { })) } - bindIR(TailLoop(treeAggFunction, + val loopBody = If( + ArrayLen(currentAggStates) <= I32(branchFactor), + currentAggStates, + Recur( + treeAggFunction, + FastSeq( + CollectDistributedArray( + mapIR(StreamGrouped(ToStream(currentAggStates), I32(branchFactor)))(x => ToArray(x)), + MakeStruct(FastSeq()), + distAggStatesRef.name, + genUID(), + RunAgg( + combineGroup(distAggStatesRef, false), + WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), + aggs.states), + strConcat(Str("iteration="), invoke("str", TString, iterNumber), Str(", n_states="), invoke("str", TString, ArrayLen(currentAggStates))), + "table_tree_aggregate"), + iterNumber + 1), + currentAggStates.typ)) + bindIR(TailLoop( + treeAggFunction, FastSeq[(String, IR)](currentAggStates.name -> collected, iterNumber.name -> I32(0)), - If(ArrayLen(currentAggStates) <= I32(branchFactor), - currentAggStates, - Recur(treeAggFunction, - FastSeq( - CollectDistributedArray( - mapIR(StreamGrouped(ToStream(currentAggStates), I32(branchFactor)))(x => ToArray(x)), - MakeStruct(FastSeq()), - distAggStatesRef.name, - genUID(), - RunAgg( - combineGroup(distAggStatesRef, false), - WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), - aggs.states - ), - strConcat(Str("iteration="), invoke("str", TString, iterNumber), Str(", n_states="), invoke("str", TString, ArrayLen(currentAggStates))), - "table_tree_aggregate"), - iterNumber + 1), - currentAggStates.typ))) - ) { finalParts => + loopBody.typ, + loopBody + )) { finalParts => RunAgg( combineGroup(finalParts, true), Let("global", globals, @@ -1073,17 +1077,25 @@ object LowerTableIR { val howManyPartsToTry = if (targetNumRows == 1L) 1 else 4 val iteration = Ref(genUID(), TInt32) - TailLoop( - partitionSizeArrayFunc, - FastSeq(howManyPartsToTryRef.name -> howManyPartsToTry, iteration.name -> 0), - bindIR(loweredChild.mapContexts(_ => StreamTake(ToStream(childContexts), howManyPartsToTryRef)) { ctx: IR => ctx } - .mapCollect("table_head_recursive_count", + val loopBody = bindIR( + loweredChild + .mapContexts(_ => StreamTake(ToStream(childContexts), howManyPartsToTryRef)) { ctx: IR => ctx } + .mapCollect( + "table_head_recursive_count", strConcat(Str("iteration="), invoke("str", TString, iteration), Str(",nParts="), invoke("str", TString, howManyPartsToTryRef)) - )(streamLenOrMax)) { counts => - If((Cast(streamSumIR(ToStream(counts)), TInt64) >= targetNumRows) || (ArrayLen(childContexts) <= ArrayLen(counts)), + )(streamLenOrMax) + ) { counts => + If( + (Cast(streamSumIR(ToStream(counts)), TInt64) >= targetNumRows) + || (ArrayLen(childContexts) <= ArrayLen(counts)), counts, Recur(partitionSizeArrayFunc, FastSeq(howManyPartsToTryRef * 4, iteration + 1), TArray(TInt32))) - }) + } + TailLoop( + partitionSizeArrayFunc, + FastSeq(howManyPartsToTryRef.name -> howManyPartsToTry, iteration.name -> 0), + loopBody.typ, + loopBody) } } @@ -1094,16 +1106,22 @@ object LowerTableIR { val numLeft = Ref(genUID(), TInt64) def makeAnswer(howManyParts: IR, howManyFromLast: IR) = MakeTuple(FastSeq((0, howManyParts), (1, howManyFromLast))) + val loopBody = If( + (i ceq numPartitions - 1) || ((numLeft - Cast(ArrayRef(partitionSizeArrayRef, i), TInt64)) <= 0L), + makeAnswer(i + 1, numLeft), + Recur( + howManyPartsToKeep, + FastSeq( + i + 1, + numLeft - Cast(ArrayRef(partitionSizeArrayRef, i), TInt64)), + TTuple(TInt32, TInt64))) If(numPartitions ceq 0, makeAnswer(0, 0L), - TailLoop(howManyPartsToKeep, FastSeq(i.name -> 0, numLeft.name -> targetNumRows), - If((i ceq numPartitions - 1) || ((numLeft - Cast(ArrayRef(partitionSizeArrayRef, i), TInt64)) <= 0L), - makeAnswer(i + 1, numLeft), - Recur(howManyPartsToKeep, - FastSeq( - i + 1, - numLeft - Cast(ArrayRef(partitionSizeArrayRef, i), TInt64)), - TTuple(TInt32, TInt64))))) + TailLoop( + howManyPartsToKeep, + FastSeq(i.name -> 0, numLeft.name -> targetNumRows), + loopBody.typ, + loopBody)) } val newCtxs = bindIR(ToArray(loweredChild.contexts)) { childContexts => @@ -1168,19 +1186,24 @@ object LowerTableIR { val iteration = Ref(genUID(), TInt32) + val loopBody = bindIR( + loweredChild + .mapContexts(_ => StreamDrop(ToStream(childContexts), maxIR(totalNumPartitions - howManyPartsToTryRef, 0))) { ctx: IR => ctx } + .mapCollect( + "table_tail_recursive_count", + strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", nParts="), invoke("str", TString, howManyPartsToTryRef)) + )(StreamLen) + ) { counts => + If( + (Cast(streamSumIR(ToStream(counts)), TInt64) >= targetNumRows) || (totalNumPartitions <= ArrayLen(counts)), + counts, + Recur(partitionSizeArrayFunc, FastSeq(howManyPartsToTryRef * 4, iteration + 1), TArray(TInt32))) + } TailLoop( partitionSizeArrayFunc, FastSeq(howManyPartsToTryRef.name -> howManyPartsToTry, iteration.name -> 0), - bindIR( - loweredChild - .mapContexts(_ => StreamDrop(ToStream(childContexts), maxIR(totalNumPartitions - howManyPartsToTryRef, 0))) { ctx: IR => ctx } - .mapCollect("table_tail_recursive_count", - strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", nParts="), invoke("str", TString, howManyPartsToTryRef)))(StreamLen) - ) { counts => - If((Cast(streamSumIR(ToStream(counts)), TInt64) >= targetNumRows) || (totalNumPartitions <= ArrayLen(counts)), - counts, - Recur(partitionSizeArrayFunc, FastSeq(howManyPartsToTryRef * 4, iteration + 1), TArray(TInt32))) - }) + loopBody.typ, + loopBody) } } @@ -1192,19 +1215,22 @@ object LowerTableIR { val nRowsToRight = Ref(genUID(), TInt64) def makeAnswer(howManyParts: IR, howManyFromLast: IR) = MakeTuple.ordered(FastSeq(howManyParts, howManyFromLast)) + val loopBody = If( + (i ceq numPartitions) || ((nRowsToRight + Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64)) >= targetNumRows), + makeAnswer(i, maxIR(0L, Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64) - (I64(targetNumRows) - nRowsToRight)).toI), + Recur( + howManyPartsToDrop, + FastSeq( + i + 1, + nRowsToRight + Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64)), + TTuple(TInt32, TInt32))) If(numPartitions ceq 0, makeAnswer(0, 0), TailLoop( howManyPartsToDrop, FastSeq(i.name -> 1, nRowsToRight.name -> 0L), - If((i ceq numPartitions) || ((nRowsToRight + Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64)) >= targetNumRows), - makeAnswer(i, maxIR(0L, Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64) - (I64(targetNumRows) - nRowsToRight)).toI), - Recur( - howManyPartsToDrop, - FastSeq( - i + 1, - nRowsToRight + Cast(ArrayRef(partitionSizeArrayRef, numPartitions - i), TInt64)), - TTuple(TInt32, TInt32))))) + loopBody.typ, + loopBody)) } } @@ -1325,30 +1351,33 @@ object LowerTableIR { val iteration = Ref(genUID(), TInt32) val loopName = genUID() - TailLoop(loopName, IndexedSeq((aggStack.name, MakeArray(collected)), (iteration.name, I32(0))), - bindIR(ArrayRef(aggStack, (ArrayLen(aggStack) - 1))) { states => - bindIR(ArrayLen(states)) { statesLen => - If(statesLen > branchFactor, - bindIR((statesLen + branchFactor - 1) floorDiv branchFactor) { nCombines => - val contexts = mapIR(rangeIR(nCombines)) { outerIdxRef => - sliceArrayIR(states, outerIdxRef * branchFactor, (outerIdxRef + 1) * branchFactor) - } - val cdaResult = cdaIR(contexts, MakeStruct(FastSeq()), "table_scan_up_pass", - strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", nStates="), invoke("str", TString, statesLen)) - ) { case (contexts, _) => - RunAgg( - combineGroup(contexts), - WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), - aggs.states - ) - } - Recur(loopName, IndexedSeq(invoke("extend", TArray(TArray(TString)), aggStack, MakeArray(cdaResult)), iteration + 1), TArray(TArray(TString))) - }, - aggStack - ) - } + val loopBody = bindIR(ArrayRef(aggStack, (ArrayLen(aggStack) - 1))) { states => + bindIR(ArrayLen(states)) { statesLen => + If( + statesLen > branchFactor, + bindIR((statesLen + branchFactor - 1) floorDiv branchFactor) { nCombines => + val contexts = mapIR(rangeIR(nCombines)) { outerIdxRef => + sliceArrayIR(states, outerIdxRef * branchFactor, (outerIdxRef + 1) * branchFactor) + } + val cdaResult = cdaIR( + contexts, MakeStruct(FastSeq()), "table_scan_up_pass", + strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", nStates="), invoke("str", TString, statesLen)) + ) { case (contexts, _) => + RunAgg( + combineGroup(contexts), + WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), + aggs.states) + } + Recur(loopName, IndexedSeq(invoke("extend", TArray(TArray(TString)), aggStack, MakeArray(cdaResult)), iteration + 1), TArray(TArray(TString))) + }, + aggStack) } - ) + } + TailLoop( + loopName, + IndexedSeq((aggStack.name, MakeArray(collected)), (iteration.name, I32(0))), + loopBody.typ, + loopBody) } // The downward pass traverses the tree from root to leaves, computing partial scan @@ -1365,53 +1394,54 @@ object LowerTableIR { bindIR(WriteValue(initState, Str(tmpDir) + UUID4(), writer)) { freshState => - TailLoop(downPassLoopName, IndexedSeq((level.name, ArrayLen(aggStack) - 1), (last.name, MakeArray(freshState)), (iteration.name, I32(0))), - If(level < 0, - last, - bindIR(ArrayRef(aggStack, level)) { aggsArray => - - val groups = mapIR(zipWithIndex(mapIR(StreamGrouped(ToStream(aggsArray), I32(branchFactor)))(x => ToArray(x)))) { eltAndIdx => - MakeStruct(FastSeq( - ("prev", ArrayRef(last, GetField(eltAndIdx, "idx"))), - ("partialSums", GetField(eltAndIdx, "elt")) - )) - } + val loopBody = If( + level < 0, + last, + bindIR(ArrayRef(aggStack, level)) { aggsArray => + val groups = mapIR(zipWithIndex(mapIR(StreamGrouped(ToStream(aggsArray), I32(branchFactor)))(x => ToArray(x)))) { eltAndIdx => + MakeStruct(FastSeq( + ("prev", ArrayRef(last, GetField(eltAndIdx, "idx"))), + ("partialSums", GetField(eltAndIdx, "elt")))) + } - val results = cdaIR(groups, MakeTuple.ordered(FastSeq()), "table_scan_down_pass", - strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", level="), invoke("str", TString, level)) - ) { case (context, _) => - bindIR(GetField(context, "prev")) { prev => - - val elt = Ref(genUID(), TString) - ToArray(RunAggScan( - ToStream(GetField(context, "partialSums"), requiresMemoryManagementPerElement = true), - elt.name, - bindIR(ReadValue(prev, reader, reader.spec.encodedVirtualType)) { serializedTuple => - Begin( - aggs.aggs.zipWithIndex.map { case (sig, i) => - InitFromSerializedValue(i, GetTupleElement(serializedTuple, i), sig.state) - }) - }, - bindIR(ReadValue(elt, reader, reader.spec.encodedVirtualType)) { serializedTuple => - Begin( - aggs.aggs.zipWithIndex.map { case (sig, i) => - CombOpValue(i, GetTupleElement(serializedTuple, i), sig) - }) - }, - WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), - aggs.states - )) - } + val results = cdaIR( + groups, MakeTuple.ordered(FastSeq()), "table_scan_down_pass", + strConcat(Str("iteration="), invoke("str", TString, iteration), Str(", level="), invoke("str", TString, level)) + ) { case (context, _) => + bindIR(GetField(context, "prev")) { prev => + val elt = Ref(genUID(), TString) + ToArray(RunAggScan( + ToStream(GetField(context, "partialSums"), requiresMemoryManagementPerElement = true), + elt.name, + bindIR(ReadValue(prev, reader, reader.spec.encodedVirtualType)) { serializedTuple => + Begin( + aggs.aggs.zipWithIndex.map { case (sig, i) => + InitFromSerializedValue(i, GetTupleElement(serializedTuple, i), sig.state) + }) + }, + bindIR(ReadValue(elt, reader, reader.spec.encodedVirtualType)) { serializedTuple => + Begin( + aggs.aggs.zipWithIndex.map { case (sig, i) => + CombOpValue(i, GetTupleElement(serializedTuple, i), sig) + }) + }, + WriteValue(MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), Str(tmpDir) + UUID4(), writer), + aggs.states)) } - Recur(downPassLoopName, - IndexedSeq( - level - 1, - ToArray(flatten(ToStream(results))), - iteration + 1), - TArray(TString)) } - ) - ) + Recur( + downPassLoopName, + IndexedSeq( + level - 1, + ToArray(flatten(ToStream(results))), + iteration + 1), + TArray(TString)) + }) + TailLoop( + downPassLoopName, + IndexedSeq((level.name, ArrayLen(aggStack) - 1), (last.name, MakeArray(freshState)), (iteration.name, I32(0))), + loopBody.typ, + loopBody) } } } diff --git a/hail/src/main/scala/is/hail/expr/ir/package.scala b/hail/src/main/scala/is/hail/expr/ir/package.scala index 6f7d56838fa..4e25e2cc232 100644 --- a/hail/src/main/scala/is/hail/expr/ir/package.scala +++ b/hail/src/main/scala/is/hail/expr/ir/package.scala @@ -37,10 +37,12 @@ package object ir { ir.Ref(pred, TBoolean))) } - def invoke(name: String, rt: Type, typeArgs: Array[Type], errorID: Int, args: IR*): IR = IRFunctionRegistry.lookupUnseeded(name, rt, typeArgs, args.map(_.typ)) match { - case Some(f) => f(typeArgs, args, errorID) - case None => fatal(s"no conversion found for $name(${typeArgs.mkString(", ")}, ${args.map(_.typ).mkString(", ")}) => $rt") - } + def invoke(name: String, rt: Type, typeArgs: Seq[Type], errorID: Int, args: IR*): IR = + IRFunctionRegistry.lookupUnseeded(name, rt, typeArgs, args.map(_.typ)) match { + case Some(f) => f(typeArgs, args, errorID) + case None => fatal(s"no conversion found for $name(${typeArgs.mkString(", ")}, ${args.map(_.typ).mkString(", ")}) => $rt") + } + def invoke(name: String, rt: Type, typeArgs: Array[Type], args: IR*): IR = invoke(name, rt, typeArgs, ErrorIDs.NO_ERROR, args:_*) diff --git a/hail/src/main/scala/is/hail/types/virtual/TStruct.scala b/hail/src/main/scala/is/hail/types/virtual/TStruct.scala index 5e467ec0f4c..a104c2c399d 100644 --- a/hail/src/main/scala/is/hail/types/virtual/TStruct.scala +++ b/hail/src/main/scala/is/hail/types/virtual/TStruct.scala @@ -144,10 +144,10 @@ final case class TStruct(fields: IndexedSeq[Field]) extends TBaseStruct { } } - def structInsert(signature: Type, p: List[String]): (TStruct, Inserter) = { + def structInsert(signature: Type, p: List[String]): TStruct = { require(p.nonEmpty || signature.isInstanceOf[TStruct], s"tried to remap top-level struct to non-struct $signature") val (t, f) = insert(signature, p) - (t.asInstanceOf[TStruct], f) + t.asInstanceOf[TStruct] } def updateKey(key: String, i: Int, sig: Type): TStruct = { diff --git a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala index 0beaa71fe6b..c778465f0cd 100644 --- a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala @@ -35,7 +35,7 @@ class ForwardLetsSuite extends HailSuite { Array( NDArrayMap(In(1, TNDArray(TInt32, Nat(1))), "y", x + y), NDArrayMap2(In(1, TNDArray(TInt32, Nat(1))), In(2, TNDArray(TInt32, Nat(1))), "y", "z", x + y + Ref("z", TInt32), ErrorIDs.NO_ERROR), - TailLoop("f", FastSeq("y" -> I32(0)), If(y < x, Recur("f", FastSeq[IR](y - I32(1)), TInt32), x)) + TailLoop("f", FastSeq("y" -> I32(0)), TInt32, If(y < x, Recur("f", FastSeq[IR](y - I32(1)), TInt32), x)) ).map(ir => Array[IR](Let("x", In(0, TInt32) + In(0, TInt32), ir))) } diff --git a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala index 621b56fca5a..f3d83d9507e 100644 --- a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala @@ -2766,12 +2766,12 @@ class IRSuite extends HailSuite { NDArrayAgg(nd, FastSeq(0)), NDArrayWrite(nd, Str("/path/to/ndarray")), NDArrayMatMul(nd, nd, ErrorIDs.NO_ERROR), - NDArraySlice(nd, MakeTuple.ordered(FastSeq(MakeTuple.ordered(FastSeq(F64(0), F64(2), F64(1))), - MakeTuple.ordered(FastSeq(F64(0), F64(2), F64(1)))))), + NDArraySlice(nd, MakeTuple.ordered(FastSeq(MakeTuple.ordered(FastSeq(I64(0), I64(2), I64(1))), + MakeTuple.ordered(FastSeq(I64(0), I64(2), I64(1)))))), NDArrayFilter(nd, FastSeq(NA(TArray(TInt64)), NA(TArray(TInt64)))), ArrayRef(a, i) -> Array(a), ArrayLen(a) -> Array(a), - RNGSplit(rngState, MakeTuple.ordered(FastSeq(I64(1), MakeTuple.ordered(FastSeq(I64(2), I64(3)))))), + RNGSplit(rngState, MakeTuple.ordered(FastSeq(I64(1), I64(2), I64(3)))), StreamLen(st) -> Array(st), StreamRange(I32(0), I32(5), I32(1)), StreamRange(I32(0), I32(5), I32(1)), @@ -2781,8 +2781,8 @@ class IRSuite extends HailSuite { ToArray(st) -> Array(st), CastToArray(NA(TSet(TInt32))), ToStream(a) -> Array(a), - LowerBoundOnOrderedCollection(a, i, onKey = true) -> Array(a), - GroupByKey(da) -> Array(da), + LowerBoundOnOrderedCollection(a, i, onKey = false) -> Array(a), + GroupByKey(std) -> Array(std), StreamTake(st, I32(10)) -> Array(st), StreamDrop(st, I32(10)) -> Array(st), StreamTakeWhile(st, "v", v < I32(5)) -> Array(st), @@ -2794,7 +2794,7 @@ class IRSuite extends HailSuite { StreamFold(st, I32(0), "x", "v", v) -> Array(st), StreamFold2(StreamFold(st, I32(0), "x", "v", v)) -> Array(st), StreamScan(st, I32(0), "x", "v", v) -> Array(st), - StreamWhiten(whitenStream, "newChunk", "prevWindow", 0, 0, 0, 0, false) -> Array(whitenStream), + StreamWhiten(whitenStream, "newChunk", "prevWindow", 1, 1, 1, 1, false) -> Array(whitenStream), StreamJoinRightDistinct( StreamMap(StreamRange(0, 2, 1), "x", MakeStruct(FastSeq("x" -> Ref("x", TInt32)))), StreamMap(StreamRange(0, 3, 1), "x", MakeStruct(FastSeq("x" -> Ref("x", TInt32)))), @@ -2804,12 +2804,12 @@ class IRSuite extends HailSuite { StreamAggScan(st, "x", ApplyScanOp(FastSeq.empty, FastSeq(Cast(Ref("x", TInt32), TInt64)), sumSig)) -> Array(st), RunAgg(Begin(FastSeq( InitOp(0, FastSeq(Begin(FastSeq(InitOp(0, FastSeq(), pSumSig)))), groupSignature), - SeqOp(0, FastSeq(I32(1), SeqOp(0, FastSeq(), pSumSig)), groupSignature))), + SeqOp(0, FastSeq(I32(1), SeqOp(0, FastSeq(I64(1)), pSumSig)), groupSignature))), AggStateValue(0, groupSignature.state), FastSeq(groupSignature.state)), RunAggScan(StreamRange(I32(0), I32(1), I32(1)), "foo", InitOp(0, FastSeq(Begin(FastSeq(InitOp(0, FastSeq(), pSumSig)))), groupSignature), - SeqOp(0, FastSeq(Ref("foo", TInt32), SeqOp(0, FastSeq(), pSumSig)), groupSignature), + SeqOp(0, FastSeq(Ref("foo", TInt32), SeqOp(0, FastSeq(I64(1)), pSumSig)), groupSignature), AggStateValue(0, groupSignature.state), FastSeq(groupSignature.state)), AggFilter(True(), I32(0), false) -> (_.createAgg), @@ -2845,7 +2845,7 @@ class IRSuite extends HailSuite { TableCount(table), MatrixCount(mt), TableGetGlobals(table), - TableCollect(table), + TableCollect(TableKeyBy(table, FastSeq())), TableAggregate(table, MakeStruct(IndexedSeq("foo" -> count))), TableToValueApply(table, ForceCountTable()), MatrixToValueApply(mt, ForceCountMatrixTable()), @@ -2872,14 +2872,14 @@ class IRSuite extends HailSuite { MakeStream(FastSeq(), TStream(TStruct())), NA(TString), PartitionNativeWriter(TypedCodecSpec(PType.canonical(TStruct()), BufferSpec.default), IndexedSeq(), "path", None, None)), WriteMetadata( - NA(TStruct("global" -> TString, "partitions" -> TStruct("filePath" -> TString, "partitionCounts" -> TInt64))), + Begin(FastSeq()), RelationalWriter("path", overwrite = false, None)), ReadValue(Str("foo"), ETypeValueReader(TypedCodecSpec(PCanonicalStruct("foo" -> PInt32(), "bar" -> PCanonicalString()), BufferSpec.default)), TStruct("foo" -> TInt32)), WriteValue(I32(1), Str("foo"), ETypeValueWriter(TypedCodecSpec(PInt32(), BufferSpec.default))), WriteValue(I32(1), Str("foo"), ETypeValueWriter(TypedCodecSpec(PInt32(), BufferSpec.default)), Some(Str("/tmp/uid/part"))), LiftMeOut(I32(1)), RelationalLet("x", I32(0), I32(0)), - TailLoop("y", IndexedSeq("x" -> I32(0)), Recur("y", FastSeq(I32(4)), TInt32)) + TailLoop("y", IndexedSeq("x" -> I32(0)), TInt32, Recur("y", FastSeq(I32(4)), TInt32)) ) val emptyEnv = BindingEnv.empty[Type] irs.map { case (ir, bind) => Array(ir, bind(emptyEnv)) } @@ -3092,11 +3092,11 @@ class IRSuite extends HailSuite { @Test(dataProvider = "valueIRs") def testValueIRParser(x: IR, refMap: BindingEnv[Type]) { - val env = IRParserEnvironment(ctx, refMap = refMap) + val env = IRParserEnvironment(ctx) val s = Pretty.sexprStyle(x, elideLiterals = false) - val x2 = IRParser.parse_value_ir(s, env) + val x2 = IRParser.parse_value_ir(s, env, refMap) assert(x2 == x) } @@ -3314,6 +3314,7 @@ class IRSuite extends HailSuite { implicit val execStrats = ExecStrategy.compileOnly val triangleSum: IR = TailLoop("f", FastSeq("x" -> In(0, TInt32), "accum" -> In(1, TInt32)), + TInt32, If(Ref("x", TInt32) <= I32(0), Ref("accum", TInt32), Recur("f", @@ -3331,9 +3332,11 @@ class IRSuite extends HailSuite { implicit val execStrats = ExecStrategy.compileOnly val triangleSum: IR = TailLoop("f1", FastSeq("x" -> In(0, TInt32), "accum" -> I32(0)), + TInt32, If(Ref("x", TInt32) <= I32(0), TailLoop("f2", FastSeq("x2" -> Ref("accum", TInt32), "accum2" -> I32(0)), + TInt32, If(Ref("x2", TInt32) <= I32(0), Ref("accum2", TInt32), Recur("f2", @@ -3357,6 +3360,7 @@ class IRSuite extends HailSuite { val ndSum: IR = TailLoop("f", FastSeq("x" -> In(0, TInt32), "accum" -> In(1, ndType)), + ndType, If(Ref("x", TInt32) <= I32(0), Ref("accum", ndType), Recur("f", diff --git a/hail/src/test/scala/is/hail/expr/ir/MatrixIRSuite.scala b/hail/src/test/scala/is/hail/expr/ir/MatrixIRSuite.scala index 394f673868c..b747c351247 100644 --- a/hail/src/test/scala/is/hail/expr/ir/MatrixIRSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/MatrixIRSuite.scala @@ -265,7 +265,7 @@ class MatrixIRSuite extends HailSuite { // The entry field must be an array interceptFatal("") { - CastTableToMatrix(rowTab, "animal", "__cols", Array("col_idx")) + TypeCheck(ctx, CastTableToMatrix(rowTab, "animal", "__cols", Array("col_idx"))) } val rdata2 = Array( @@ -322,8 +322,8 @@ class MatrixIRSuite extends HailSuite { val range = rangeMatrix(10, 2, None) val path1 = ctx.createTmpPath("test1") val path2 = ctx.createTmpPath("test2") - intercept[java.lang.IllegalArgumentException] { - val ir = MatrixMultiWrite(FastSeq(vcf, range), MatrixNativeMultiWriter(IndexedSeq(path1, path2))) + intercept[HailException] { + TypeCheck(ctx, MatrixMultiWrite(FastSeq(vcf, range), MatrixNativeMultiWriter(IndexedSeq(path1, path2)))) } } } diff --git a/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala b/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala index e50fdc48b39..c75c4aef7dd 100644 --- a/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala @@ -200,9 +200,12 @@ class RequirednessSuite extends HailSuite { // TailLoop val param1 = Ref(genUID(), tarray) val param2 = Ref(genUID(), TInt32) - val loop = TailLoop("loop", FastSeq( - param1.name -> array(required, required), - param2.name -> int(required)), + val loop = TailLoop( + "loop", + FastSeq( + param1.name -> array(required, required), + param2.name -> int(required)), + tnestedarray, If(False(), // required MakeArray(FastSeq(param1), tnestedarray), // required If(param2 <= I32(1), // possibly missing diff --git a/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala b/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala index 8a86b410c21..ed6b72c9584 100644 --- a/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala @@ -20,7 +20,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithInvalidContextsType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(contexts = Some(Str("oh noes :'("))) + mkTableGen(contexts = Some(Str("oh noes :'("))).typecheck() } ex.getMessage should include("contexts") @@ -31,7 +31,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithInvalidGlobalsType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(globals = Some(Str("oh noes :'(")), body = Some(MakeStream(IndexedSeq(), TStream(TStruct())))) + mkTableGen(globals = Some(Str("oh noes :'(")), body = Some(MakeStream(IndexedSeq(), TStream(TStruct())))).typecheck() } ex.getMessage should include("globals") ex.getMessage should include(s"Expected: ${classOf[TStruct].getName}") @@ -41,7 +41,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithInvalidBodyType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(body = Some(Str("oh noes :'("))) + mkTableGen(body = Some(Str("oh noes :'("))).typecheck() } ex.getMessage should include("body") ex.getMessage should include(s"Expected: ${classOf[TStream].getName}") @@ -51,7 +51,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithInvalidBodyElementType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(body = Some(MakeStream(IndexedSeq(Str("oh noes :'(")), TStream(TString)))) + mkTableGen(body = Some(MakeStream(IndexedSeq(Str("oh noes :'(")), TStream(TString)))).typecheck() } ex.getMessage should include("body.elementType") ex.getMessage should include(s"Expected: ${classOf[TStruct].getName}") @@ -61,7 +61,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithInvalidPartitionerKeyType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(partitioner = Some(RVDPartitioner.empty(ctx.stateManager, TStruct("does-not-exist" -> TInt32)))) + mkTableGen(partitioner = Some(RVDPartitioner.empty(ctx.stateManager, TStruct("does-not-exist" -> TInt32)))).typecheck() } ex.getMessage should include("partitioner") } @@ -69,7 +69,7 @@ class TableGenSuite extends HailSuite { @Test(groups = Array("construction", "typecheck")) def testWithTooLongPartitionerKeyType: Unit = { val ex = intercept[IllegalArgumentException] { - mkTableGen(partitioner = Some(RVDPartitioner.empty(ctx.stateManager, TStruct("does-not-exist" -> TInt32)))) + mkTableGen(partitioner = Some(RVDPartitioner.empty(ctx.stateManager, TStruct("does-not-exist" -> TInt32)))).typecheck() } ex.getMessage should include("partitioner") } From e9e8e1724b3835d1d60f2a7403b2881da267f5c7 Mon Sep 17 00:00:00 2001 From: jigold Date: Wed, 15 Nov 2023 08:59:33 -0500 Subject: [PATCH 05/48] [infra] Turn dry run off on artifact registry cleanup policies (#14001) --- infra/gcp-broad/main.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/infra/gcp-broad/main.tf b/infra/gcp-broad/main.tf index a9c495f9af2..30152037024 100644 --- a/infra/gcp-broad/main.tf +++ b/infra/gcp-broad/main.tf @@ -416,10 +416,11 @@ resource "google_artifact_registry_repository" "repository" { format = "DOCKER" repository_id = "hail" location = var.artifact_registry_location + cleanup_policy_dry_run = false # https://github.com/hashicorp/terraform-provider-azurerm/issues/7396 lifecycle { - ignore_changes = [cleanup_policies, timeouts, cleanup_policy_dry_run] + ignore_changes = [cleanup_policies, timeouts] } } From 001f93a6432a8b2072c3ccd398aded29e87888ab Mon Sep 17 00:00:00 2001 From: Patrick Schultz Date: Wed, 15 Nov 2023 15:14:39 -0500 Subject: [PATCH 06/48] [compiler] allow relational IR in ExtractIntervals (#14013) Fix bug where relational IR inside the condition of a TableFilter or MatrixFilter causes a ClassCastException. This can happen if, for example, there's a TableAggregate inside the condition. --- .../is/hail/expr/ir/ExtractIntervalFilters.scala | 5 ++++- .../hail/expr/ir/ExtractIntervalFiltersSuite.scala | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala index 4619fe17287..ce317d8618b 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala @@ -866,7 +866,10 @@ class ExtractIntervalFilters(ctx: ExecuteContext, keyType: TStruct) { } res = if (res == null) { - val children = x.children.map(child => recur(child.asInstanceOf[IR])).toFastSeq + val children = x.children.map { + case child: IR => recur(child) + case _ => AbstractLattice.top + }.toFastSeq val keyOrConstVal = computeKeyOrConst(x, children) if (x.typ == TBoolean) { if (keyOrConstVal == AbstractLattice.top) diff --git a/hail/src/test/scala/is/hail/expr/ir/ExtractIntervalFiltersSuite.scala b/hail/src/test/scala/is/hail/expr/ir/ExtractIntervalFiltersSuite.scala index 81b01856209..9eb1c93bad7 100644 --- a/hail/src/test/scala/is/hail/expr/ir/ExtractIntervalFiltersSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/ExtractIntervalFiltersSuite.scala @@ -757,6 +757,19 @@ class ExtractIntervalFiltersSuite extends HailSuite { outer => ) } + @Test def testRelationalChildren(): Unit = { + val testRows = FastSeq( + Row(0, 0, true), + Row(0, 10, true), + Row(0, 20, true), + Row(0, null, true)) + + val count = TableAggregate(TableRange(10, 1), ApplyAggOp(FastSeq(), FastSeq(), AggSignature(Count(), FastSeq(), FastSeq()))) + print(count.typ) + val filter = gt(count, Cast(k1, TInt64)) + check(filter, ref1, k1Full, testRows, filter, FastSeq(Interval(Row(), Row(), true, true))) + } + @Test def testIntegration() { hc // force initialization val tab1 = TableRange(10, 5) From eaf41978c8366cd747d917dcdec0cb05f3a62d9d Mon Sep 17 00:00:00 2001 From: Edmund Higham Date: Wed, 15 Nov 2023 19:10:33 -0500 Subject: [PATCH 07/48] [compiler] Minor Requiredness Performance Enchancements (#13991) Main change: add `var mark: Int` to `BaseIR`. On profiling the benchmark `matrix_multi_write_nothing`, I noticed a significant amount of time was spent - iterating through zipped arrays in requiredness - Adding and removing elements from `HashSet`s. In fact, half the time spent in requiredness was removing ir nodes from the `HashSet` set used as the queue! With this change, requiredness runs like a stabbed rat! Explanation of `mark`: This field acts as a flag that analyses can set. For example: - `HasSharing` can use the field to see if it has visited a node before. - `Requiredness` uses this field to tell if a node is currently enqueued. The `nextFlag` method in `IrMetadata` allows for analyses to get a fresh value they can set the `mark` field. This removes the need to traverse the IR after analyses to re-zero every `mark` field. --- .../main/scala/is/hail/expr/ir/BaseIR.scala | 12 +++++++- .../main/scala/is/hail/expr/ir/Compile.scala | 18 +++++------ .../src/main/scala/is/hail/expr/ir/Emit.scala | 2 +- .../scala/is/hail/expr/ir/ForwardLets.scala | 6 ++-- .../scala/is/hail/expr/ir/Interpret.scala | 2 +- .../ir/LowerOrInterpretNonCompilable.scala | 2 +- .../is/hail/expr/ir/NormalizeNames.scala | 10 +++++-- .../main/scala/is/hail/expr/ir/Optimize.scala | 4 +-- .../scala/is/hail/expr/ir/RefEquality.scala | 21 ++++++------- .../scala/is/hail/expr/ir/Requiredness.scala | 30 ++++++++++++++++--- .../main/scala/is/hail/expr/ir/Simplify.scala | 13 ++++---- .../hail/expr/ir/analyses/SemanticHash.scala | 2 +- .../ir/lowering/LowerAndExecuteShuffles.scala | 2 +- .../hail/expr/ir/lowering/LoweringPass.scala | 11 +++++-- .../is/hail/types/TypeWithRequiredness.scala | 13 ++++---- .../is/hail/expr/ir/ForwardLetsSuite.scala | 27 ++++++++--------- .../test/scala/is/hail/expr/ir/IRSuite.scala | 4 +-- .../scala/is/hail/expr/ir/SimplifySuite.scala | 2 +- 18 files changed, 110 insertions(+), 71 deletions(-) diff --git a/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala b/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala index b3008b7a6c9..51611bbd13e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/BaseIR.scala @@ -1,5 +1,6 @@ package is.hail.expr.ir +import is.hail.backend.ExecuteContext import is.hail.types.BaseType import is.hail.types.virtual.Type import is.hail.utils.StackSafe._ @@ -16,7 +17,16 @@ abstract class BaseIR { def deepCopy(): this.type = copy(newChildren = childrenSeq.map(_.deepCopy())).asInstanceOf[this.type] - lazy val noSharing: this.type = if (HasIRSharing(this)) this.deepCopy() else this + def noSharing(ctx: ExecuteContext): this.type = + if (HasIRSharing(ctx)(this)) this.deepCopy() else this + + + // For use as a boolean flag by IR passes. Each pass uses a different sentinel value to encode + // "true" (and anything else is false). As long as we maintain the global invariant that no + // two passes use the same sentinel value, this allows us to reuse this field across passes + // without ever having to initialize it at the start of a pass. + // New sentinel values can be obtained by `nextFlag` on `IRMetadata`. + var mark: Int = 0 def mapChildrenWithIndex(f: (BaseIR, Int) => BaseIR): BaseIR = { val newChildren = childrenSeq.view.zipWithIndex.map(f.tupled).toArray diff --git a/hail/src/main/scala/is/hail/expr/ir/Compile.scala b/hail/src/main/scala/is/hail/expr/ir/Compile.scala index fb7d59ccfee..56e127af9b4 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Compile.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Compile.scala @@ -32,9 +32,9 @@ object Compile { print: Option[PrintWriter] = None ): (Option[SingleCodeType], (HailClassLoader, FS, HailTaskContext, Region) => F) = { - val normalizeNames = new NormalizeNames(_.toString) - val normalizedBody = normalizeNames(body, - Env(params.map { case (n, _) => n -> n }: _*)) + val normalizedBody = new NormalizeNames(_.toString)(ctx, body, + Env(params.map { case (n, _) => n -> n }: _*) + ) val k = CodeCacheKey(FastSeq[AggStateSig](), params.map { case (n, pt) => (n, pt) }, normalizedBody) (ctx.backend.lookupOrCompileCachedFunction[F](k) { @@ -42,7 +42,7 @@ object Compile { ir = Subst(ir, BindingEnv(params .zipWithIndex .foldLeft(Env.empty[IR]) { case (e, ((n, t), i)) => e.bind(n, In(i, t)) })) - ir = LoweringPipeline.compileLowerer(optimize).apply(ctx, ir).asInstanceOf[IR].noSharing + ir = LoweringPipeline.compileLowerer(optimize).apply(ctx, ir).asInstanceOf[IR].noSharing(ctx) TypeCheck(ctx, ir, BindingEnv.empty) @@ -85,9 +85,9 @@ object CompileWithAggregators { body: IR, optimize: Boolean = true ): (Option[SingleCodeType], (HailClassLoader, FS, HailTaskContext, Region) => (F with FunctionWithAggRegion)) = { - val normalizeNames = new NormalizeNames(_.toString) - val normalizedBody = normalizeNames(body, - Env(params.map { case (n, _) => n -> n }: _*)) + val normalizedBody = new NormalizeNames(_.toString)(ctx, body, + Env(params.map { case (n, _) => n -> n }: _*) + ) val k = CodeCacheKey(aggSigs, params.map { case (n, pt) => (n, pt) }, normalizedBody) (ctx.backend.lookupOrCompileCachedFunction[F with FunctionWithAggRegion](k) { @@ -95,7 +95,7 @@ object CompileWithAggregators { ir = Subst(ir, BindingEnv(params .zipWithIndex .foldLeft(Env.empty[IR]) { case (e, ((n, t), i)) => e.bind(n, In(i, t)) })) - ir = LoweringPipeline.compileLowerer(optimize).apply(ctx, ir).asInstanceOf[IR].noSharing + ir = LoweringPipeline.compileLowerer(optimize).apply(ctx, ir).asInstanceOf[IR].noSharing(ctx) TypeCheck(ctx, ir, BindingEnv(Env.fromSeq[Type](params.map { case (name, t) => name -> t.virtualType }))) @@ -184,7 +184,7 @@ object CompileIterator { val outerRegion = outerRegionField - val ir = LoweringPipeline.compileLowerer(true)(ctx, body).asInstanceOf[IR].noSharing + val ir = LoweringPipeline.compileLowerer(true)(ctx, body).asInstanceOf[IR].noSharing(ctx) TypeCheck(ctx, ir) var elementAddress: Settable[Long] = null diff --git a/hail/src/main/scala/is/hail/expr/ir/Emit.scala b/hail/src/main/scala/is/hail/expr/ir/Emit.scala index ecfc03297ef..cf9b9b006fe 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Emit.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Emit.scala @@ -30,7 +30,7 @@ object EmitContext { def analyze(ctx: ExecuteContext, ir: IR, pTypeEnv: Env[PType] = Env.empty): EmitContext = { ctx.timer.time("EmitContext.analyze") { val usesAndDefs = ComputeUsesAndDefs(ir, errorIfFreeVariables = false) - val requiredness = Requiredness.apply(ir, usesAndDefs, null, pTypeEnv) + val requiredness = Requiredness(ir, usesAndDefs, ctx, pTypeEnv) val inLoopCriticalPath = ControlFlowPreventsSplit(ir, ParentPointers(ir), usesAndDefs) val methodSplits = ComputeMethodSplits(ctx, ir, inLoopCriticalPath) new EmitContext(ctx, requiredness, usesAndDefs, methodSplits, inLoopCriticalPath, Memo.empty[Unit]) diff --git a/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala b/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala index 5c9b17232c4..1a5db502a32 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala @@ -1,12 +1,12 @@ package is.hail.expr.ir -import is.hail.utils._ +import is.hail.backend.ExecuteContext import scala.collection.mutable object ForwardLets { - def apply[T <: BaseIR](ir0: T): T = { - val ir1 = new NormalizeNames(_ => genUID(), allowFreeVariables = true).apply(ir0) + def apply[T <: BaseIR](ctx: ExecuteContext)(ir0: T): T = { + val ir1 = new NormalizeNames(_ => genUID(), allowFreeVariables = true)(ctx, ir0) val UsesAndDefs(uses, defs, _) = ComputeUsesAndDefs(ir1, errorIfFreeVariables = false) val nestingDepth = NestingDepth(ir1) diff --git a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala index e6f7127865e..4b90bd6702c 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala @@ -24,7 +24,7 @@ object Interpret { apply(tir, ctx, optimize = true) def apply(tir: TableIR, ctx: ExecuteContext, optimize: Boolean): TableValue = { - val lowered = LoweringPipeline.legacyRelationalLowerer(optimize)(ctx, tir).asInstanceOf[TableIR].noSharing + val lowered = LoweringPipeline.legacyRelationalLowerer(optimize)(ctx, tir).asInstanceOf[TableIR].noSharing(ctx) lowered.analyzeAndExecute(ctx).asTableValue(ctx) } diff --git a/hail/src/main/scala/is/hail/expr/ir/LowerOrInterpretNonCompilable.scala b/hail/src/main/scala/is/hail/expr/ir/LowerOrInterpretNonCompilable.scala index abbbc58ccd8..f36863d5f2c 100644 --- a/hail/src/main/scala/is/hail/expr/ir/LowerOrInterpretNonCompilable.scala +++ b/hail/src/main/scala/is/hail/expr/ir/LowerOrInterpretNonCompilable.scala @@ -60,6 +60,6 @@ object LowerOrInterpretNonCompilable { } } - rewrite(ir.noSharing, mutable.HashMap.empty) + rewrite(ir.noSharing(ctx), mutable.HashMap.empty) } } diff --git a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala index fab2531f5a6..740bf040597 100644 --- a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala +++ b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala @@ -1,5 +1,6 @@ package is.hail.expr.ir +import is.hail.backend.ExecuteContext import is.hail.utils.StackSafe._ class NormalizeNames(normFunction: Int => String, allowFreeVariables: Boolean = false) { @@ -10,11 +11,14 @@ class NormalizeNames(normFunction: Int => String, allowFreeVariables: Boolean = normFunction(count) } - def apply(ir: IR, env: Env[String]): IR = apply(ir.noSharing, BindingEnv(env)) + def apply(ctx: ExecuteContext, ir: IR, env: Env[String]): IR = + normalizeIR(ir.noSharing(ctx), BindingEnv(env)).run().asInstanceOf[IR] - def apply(ir: IR, env: BindingEnv[String]): IR = normalizeIR(ir.noSharing, env).run().asInstanceOf[IR] + def apply(ctx: ExecuteContext, ir: IR, env: BindingEnv[String]): IR = + normalizeIR(ir.noSharing(ctx), env).run().asInstanceOf[IR] - def apply(ir: BaseIR): BaseIR = normalizeIR(ir.noSharing, BindingEnv(agg=Some(Env.empty), scan=Some(Env.empty))).run() + def apply(ctx: ExecuteContext, ir: BaseIR): BaseIR = + normalizeIR(ir.noSharing(ctx), BindingEnv(agg=Some(Env.empty), scan=Some(Env.empty))).run() private def normalizeIR(ir: BaseIR, env: BindingEnv[String], context: Array[String] = Array()): StackFrame[BaseIR] = { diff --git a/hail/src/main/scala/is/hail/expr/ir/Optimize.scala b/hail/src/main/scala/is/hail/expr/ir/Optimize.scala index b8fff2e726d..45061ba1131 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Optimize.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Optimize.scala @@ -21,9 +21,9 @@ object Optimize { last = ir runOpt(FoldConstants(ctx, _), iter, "FoldConstants") runOpt(ExtractIntervalFilters(ctx, _), iter, "ExtractIntervalFilters") - runOpt(normalizeNames(_), iter, "NormalizeNames") + runOpt(normalizeNames(ctx, _), iter, "NormalizeNames") runOpt(Simplify(ctx, _), iter, "Simplify") - runOpt(ForwardLets(_), iter, "ForwardLets") + runOpt(ForwardLets(ctx), iter, "ForwardLets") runOpt(ForwardRelationalLets(_), iter, "ForwardRelationalLets") runOpt(PruneDeadFields(ctx, _), iter, "PruneDeadFields") diff --git a/hail/src/main/scala/is/hail/expr/ir/RefEquality.scala b/hail/src/main/scala/is/hail/expr/ir/RefEquality.scala index 7fa83bb96da..4a0e0acf5fe 100644 --- a/hail/src/main/scala/is/hail/expr/ir/RefEquality.scala +++ b/hail/src/main/scala/is/hail/expr/ir/RefEquality.scala @@ -1,5 +1,7 @@ package is.hail.expr.ir +import is.hail.backend.ExecuteContext + import scala.collection.mutable object RefEquality { @@ -61,19 +63,14 @@ class Memo[T] private(val m: mutable.HashMap[RefEquality[BaseIR], T]) { object HasIRSharing { - def apply(ir: BaseIR): Boolean = { - val m = mutable.HashSet.empty[RefEquality[BaseIR]] - - def recur(x: BaseIR): Boolean = { - val re = RefEquality(x) - if (m.contains(re)) - true - else { - m.add(re) - x.children.exists(recur) - } + def apply(ctx: ExecuteContext)(ir: BaseIR): Boolean = { + val mark = ctx.irMetadata.nextFlag + + for (node <- IRTraversal.levelOrder(ir)) { + if (node.mark == mark) return true + node.mark = mark } - recur(ir) + false } } diff --git a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala index e2c6b0c317e..59f9ebbf519 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala @@ -35,7 +35,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { type State = Memo[BaseTypeWithRequiredness] private val cache = Memo.empty[BaseTypeWithRequiredness] private val dependents = Memo.empty[mutable.Set[RefEquality[BaseIR]]] - private val q = mutable.Set[RefEquality[BaseIR]]() + private[this] val q = new Queue(ctx.irMetadata.nextFlag) private val defs = Memo.empty[IndexedSeq[BaseTypeWithRequiredness]] private val states = Memo.empty[IndexedSeq[TypeWithRequiredness]] @@ -90,8 +90,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { def run(): Unit = { while (q.nonEmpty) { - val node = q.head - q -= node + val node = q.pop() if (analyze(node.t) && dependents.contains(node)) { q ++= dependents.lookup(node) } @@ -615,7 +614,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { val eltType = tcoerce[RIterable](requiredness).elementType eltType.unionFrom(lookup(joinF)) case StreamMultiMerge(as, _) => - requiredness.union(as.forall(lookup(_).required)) + requiredness.union(as.forall(lookup(_).required)) val elt = tcoerce[RStruct](tcoerce[RIterable](requiredness).elementType) as.foreach { a => elt.unionFields(tcoerce[RStruct](tcoerce[RIterable](lookup(a)).elementType)) @@ -828,4 +827,27 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { requiredness.probeChangedAndReset() } + + + final class Queue(val markFlag: Int) { + private[this] val q = mutable.Queue[RefEquality[BaseIR]]() + + def nonEmpty: Boolean = + q.nonEmpty + + def pop(): RefEquality[BaseIR] = { + val n = q.dequeue() + n.t.mark = 0 + n + } + + def +=(re: RefEquality[BaseIR]): Unit = + if (re.t.mark != markFlag) { + re.t.mark = markFlag + q += re + } + + def ++=(res: Iterable[RefEquality[BaseIR]]): Unit = + res.foreach(this += _) + } } diff --git a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala index a963f36b350..1f9d0d3cffd 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala @@ -30,8 +30,9 @@ object Simplify { private[this] def simplifyValue(ctx: ExecuteContext): IR => IR = visitNode( Simplify(ctx, _), - rewriteValueNode, - simplifyValue(ctx)) + rewriteValueNode(ctx), + simplifyValue(ctx) + ) private[this] def simplifyTable(ctx: ExecuteContext)(tir: TableIR): TableIR = visitNode( @@ -55,8 +56,8 @@ object Simplify { )(bmir) } - private[this] def rewriteValueNode(ir: IR): Option[IR] = - valueRules.lift(ir).orElse(numericRules(ir)) + private[this] def rewriteValueNode(ctx: ExecuteContext)(ir: IR): Option[IR] = + valueRules(ctx).lift(ir).orElse(numericRules(ir)) private[this] def rewriteTableNode(ctx: ExecuteContext)(tir: TableIR): Option[TableIR] = tableRules(ctx).lift(tir) @@ -218,7 +219,7 @@ object Simplify { ).reduce((f, g) => ir => f(ir).orElse(g(ir))) } - private[this] def valueRules: PartialFunction[IR, IR] = { + private[this] def valueRules(ctx: ExecuteContext): PartialFunction[IR, IR] = { // propagate NA case x: IR if hasMissingStrictChild(x) => NA(x.typ) @@ -456,7 +457,7 @@ object Simplify { val rw = fieldNames.foldLeft[IR](Let(name, old, rewrite(body))) { case (comb, fieldName) => Let(newFieldRefs(fieldName).name, newFieldMap(fieldName), comb) } - ForwardLets[IR](rw) + ForwardLets(ctx)(rw) case SelectFields(old, fields) if tcoerce[TStruct](old.typ).fieldNames sameElements fields => old diff --git a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala index a012c87f560..73812b9860a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala +++ b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala @@ -30,7 +30,7 @@ case object SemanticHash extends Logging { // Running the algorithm on the name-normalised IR // removes sensitivity to compiler-generated names val nameNormalizedIR = ctx.timer.time("NormalizeNames") { - new NormalizeNames(_.toString, allowFreeVariables = true)(root) + new NormalizeNames(_.toString, allowFreeVariables = true)(ctx, root) } val semhash = ctx.timer.time("Hash") { diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala index 78419622694..5ea8fb96ef8 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala @@ -53,7 +53,7 @@ object LowerAndExecuteShuffles { StreamBufferedAggregate(Ref(streamName, streamTyp), bindIR(GetField(insGlob, "__initState")) { states => Begin(aggSigs.indices.map { aIdx => InitFromSerializedValue(aIdx, GetTupleElement(states, aIdx), aggSigs(aIdx).state) }) }, newKey, seq, "row", aggSigs, bufferSize)), - 0, 0).noSharing + 0, 0).noSharing(ctx) val analyses = LoweringAnalyses(partiallyAggregated, ctx) diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala index 772197a8e45..351ca79574a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala @@ -8,11 +8,16 @@ import is.hail.utils._ final case class IrMetadata(semhash: Option[SemanticHash.Type]) { private[this] var hashCounter: Int = 0 + private[this] var markCounter: Int = 0 def nextHash: Option[SemanticHash.Type] = { hashCounter += 1 semhash.map(SemanticHash.extend(_, SemanticHash.Bytes.fromInt(hashCounter))) } + def nextFlag: Int = { + markCounter += 1 + markCounter + } } trait LoweringPass { @@ -102,7 +107,7 @@ case object LowerArrayAggsToRunAggsPass extends LoweringPass { val context: String = "LowerArrayAggsToRunAggs" def transform(ctx: ExecuteContext, ir: BaseIR): BaseIR = { - val x = ir.noSharing + val x = ir.noSharing(ctx) val r = Requiredness(x, ctx) RewriteBottomUp(x, { case x@StreamAgg(a, name, query) => @@ -126,7 +131,7 @@ case object LowerArrayAggsToRunAggsPass extends LoweringPass { if (newNode.typ != x.typ) throw new RuntimeException(s"types differ:\n new: ${newNode.typ}\n old: ${x.typ}") - Some(newNode.noSharing) + Some(newNode.noSharing(ctx)) case x@StreamAggScan(a, name, query) => val res = genUID() val aggs = Extract(query, res, r, isScan=true) @@ -142,7 +147,7 @@ case object LowerArrayAggsToRunAggsPass extends LoweringPass { } if (newNode.typ != x.typ) throw new RuntimeException(s"types differ:\n new: ${ newNode.typ }\n old: ${ x.typ }") - Some(newNode.noSharing) + Some(newNode.noSharing(ctx)) case _ => None }) } diff --git a/hail/src/main/scala/is/hail/types/TypeWithRequiredness.scala b/hail/src/main/scala/is/hail/types/TypeWithRequiredness.scala index fef61cd7e2e..1bfa5aa0a44 100644 --- a/hail/src/main/scala/is/hail/types/TypeWithRequiredness.scala +++ b/hail/src/main/scala/is/hail/types/TypeWithRequiredness.scala @@ -98,8 +98,11 @@ sealed abstract class BaseTypeWithRequiredness { throw new AssertionError( s"children lengths differed ${children.length} ${newChildren.length}. ${children} ${newChildren} ${this}") } - (children, newChildren).zipped.foreach { (r1, r2) => - r1.unionFrom(r2) + + // foreach on zipped seqs is very slow as the implementation + // doesn't know that the seqs are the same length. + for (i <- children.indices) { + children(i).unionFrom(newChildren(i)) } } @@ -500,12 +503,12 @@ object RTable { RTable(rowStruct.fields.map(f => f.name -> f.typ), globStruct.fields.map(f => f.name -> f.typ), key) } - def fromTableStage(ec: ExecuteContext, s: TableStage): RTable = { + def fromTableStage(ctx: ExecuteContext, s: TableStage): RTable = { def virtualTypeWithReq(ir: IR, inputs: Env[PType]): VirtualTypeWithReq = { import is.hail.expr.ir.Requiredness - val ns = ir.noSharing + val ns = ir.noSharing(ctx) val usesAndDefs = ComputeUsesAndDefs(ns, errorIfFreeVariables = false) - val req = Requiredness.apply(ns, usesAndDefs, ec, inputs) + val req = Requiredness.apply(ns, usesAndDefs, ctx, inputs) VirtualTypeWithReq(ir.typ, req.lookup(ns).asInstanceOf[TypeWithRequiredness]) } diff --git a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala index c778465f0cd..35534bb6380 100644 --- a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala @@ -85,45 +85,45 @@ class ForwardLetsSuite extends HailSuite { @Test(dataProvider = "nonForwardingOps") def testNonForwardingOps(ir: IR): Unit = { - val after = ForwardLets(ir) - val normalizedBefore = (new NormalizeNames(_.toString)).apply(ir) - val normalizedAfter = (new NormalizeNames(_.toString)).apply(after) + val after = ForwardLets(ctx)(ir) + val normalizedBefore = (new NormalizeNames(_.toString))(ctx, ir) + val normalizedAfter = (new NormalizeNames(_.toString))(ctx, after) assert(normalizedBefore == normalizedAfter) } @Test(dataProvider = "nonForwardingNonEvalOps") def testNonForwardingNonEvalOps(ir: IR): Unit = { - val after = ForwardLets(ir) + val after = ForwardLets(ctx)(ir) assert(after.isInstanceOf[Let]) } @Test(dataProvider = "nonForwardingAggOps") def testNonForwardingAggOps(ir: IR): Unit = { - val after = ForwardLets(ir) + val after = ForwardLets(ctx)(ir) assert(after.isInstanceOf[AggLet]) } @Test(dataProvider = "forwardingOps") def testForwardingOps(ir: IR): Unit = { - val after = ForwardLets(ir) + val after = ForwardLets(ctx)(ir) assert(!after.isInstanceOf[Let]) assertEvalSame(ir, args = Array(5 -> TInt32)) } @Test(dataProvider = "forwardingAggOps") def testForwardingAggOps(ir: IR): Unit = { - val after = ForwardLets(ir) + val after = ForwardLets(ctx)(ir) assert(!after.isInstanceOf[AggLet]) } @Test def testLetNoMention(): Unit = { val ir = Let("x", I32(1), I32(2)) - assert(ForwardLets[IR](ir) == I32(2)) + assert(ForwardLets[IR](ctx)(ir) == I32(2)) } @Test def testLetRefRewrite(): Unit = { val ir = Let("x", I32(1), Ref("x", TInt32)) - assert(ForwardLets[IR](ir) == I32(1)) + assert(ForwardLets[IR](ctx)(ir) == I32(1)) } @Test def testAggregators(): Unit = { @@ -133,10 +133,7 @@ class ForwardLetsSuite extends HailSuite { })) .apply(aggEnv) - TypeCheck( - ctx, - ForwardLets(ir0).asInstanceOf[IR], - BindingEnv(Env.empty, agg = Some(aggEnv))) + TypeCheck(ctx, ForwardLets(ctx)(ir0), BindingEnv(Env.empty, agg = Some(aggEnv))) } @Test def testNestedBindingOverwrites(): Unit = { @@ -146,7 +143,7 @@ class ForwardLetsSuite extends HailSuite { }(env) TypeCheck(ctx, ir, BindingEnv(env)) - TypeCheck(ctx, ForwardLets(ir).asInstanceOf[IR], BindingEnv(env)) + TypeCheck(ctx, ForwardLets(ctx)(ir), BindingEnv(env)) } @Test def testLetsDoNotForwardInsideArrayAggWithNoOps(): Unit = { @@ -163,6 +160,6 @@ class ForwardLetsSuite extends HailSuite { ))) TypeCheck(ctx, x, BindingEnv(Env("y" -> TInt32))) - TypeCheck(ctx, ForwardLets(x).asInstanceOf[IR], BindingEnv(Env("y" -> TInt32))) + TypeCheck(ctx, ForwardLets(ctx)(x), BindingEnv(Env("y" -> TInt32))) } } diff --git a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala index f3d83d9507e..519e4f0258c 100644 --- a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala @@ -3387,8 +3387,8 @@ class IRSuite extends HailSuite { @Test def testHasIRSharing(): Unit = { val r = Ref("x", TInt32) val ir1 = MakeTuple.ordered(FastSeq(I64(1), r, r, I32(1))) - assert(HasIRSharing(ir1)) - assert(!HasIRSharing(ir1.deepCopy())) + assert(HasIRSharing(ctx)(ir1)) + assert(!HasIRSharing(ctx)(ir1.deepCopy())) } @Test def freeVariablesAggScanBindingEnv(): Unit = { diff --git a/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala b/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala index 87aa463d6ba..b3a7c5c6e56 100644 --- a/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala @@ -129,7 +129,7 @@ class SimplifySuite extends HailSuite { ) ) ) - val simplified = new NormalizeNames(_.toString, true).apply(Simplify(ctx, l)) + val simplified = new NormalizeNames(_.toString, true)(ctx, Simplify(ctx, l)) val expected = Let("1", I32(1) + Ref("OTHER_1", TInt32), Let("2", I32(1) + Ref("1", TInt32), From c958f74aad2380c350174fc431f9708d898dd429 Mon Sep 17 00:00:00 2001 From: jigold Date: Thu, 16 Nov 2023 11:38:41 -0500 Subject: [PATCH 08/48] [batch] Turn off metrics collecting from ops agent (#14015) --- batch/batch/cloud/gcp/driver/create_instance.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/batch/batch/cloud/gcp/driver/create_instance.py b/batch/batch/cloud/gcp/driver/create_instance.py index 255cb569922..5bddfe9d814 100644 --- a/batch/batch/cloud/gcp/driver/create_instance.py +++ b/batch/batch/cloud/gcp/driver/create_instance.py @@ -246,19 +246,11 @@ def scheduling() -> dict: receivers: [runlog, workerlog, jvmlog] metrics: - receivers: - hostmetrics: - type: hostmetrics - collection_interval: 60s processors: metrics_filter: type: exclude_metrics - metrics_pattern: [] - service: - pipelines: - default_pipeline: - receivers: [hostmetrics] - processors: [metrics_filter] + metrics_pattern: + - agent.googleapis.com/processes/* EOF sudo systemctl restart google-cloud-ops-agent From df46a96be9edd4a0c6848d2b1d8756f9e7e8222f Mon Sep 17 00:00:00 2001 From: Mark Walker Date: Thu, 16 Nov 2023 15:33:36 -0500 Subject: [PATCH 09/48] Minor doc typo (#14017) On the Azure doc page --- hail/python/hail/docs/cloud/azure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/hail/docs/cloud/azure.rst b/hail/python/hail/docs/cloud/azure.rst index 3b23ecbfc01..a379694c570 100644 --- a/hail/python/hail/docs/cloud/azure.rst +++ b/hail/python/hail/docs/cloud/azure.rst @@ -1,5 +1,5 @@ =============== -Micorosft Azure +Microsoft Azure =============== ``hailctl hdinsight`` From 84a84191c233be75c2ff3455841e0ea4927853a8 Mon Sep 17 00:00:00 2001 From: Christopher Vittal Date: Mon, 20 Nov 2023 23:17:55 -0600 Subject: [PATCH 10/48] [query] Make test_hail_in_notebook idempotent (#13999) closes #13994 --- hail/python/test/hail/test_hail_in_notebook.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hail/python/test/hail/test_hail_in_notebook.ipynb b/hail/python/test/hail/test_hail_in_notebook.ipynb index 9512dbd9d6b..e0f186aa4fb 100644 --- a/hail/python/test/hail/test_hail_in_notebook.ipynb +++ b/hail/python/test/hail/test_hail_in_notebook.ipynb @@ -9,7 +9,8 @@ "source": [ "import hail as hl\n", "import os\n", - "hl.utils.range_table(1).write(f'{os.environ[\"HAIL_TEST_STORAGE_URI\"]}/test_hail_in_notebook.ht')\n", + "from hailtop.utils import secret_alnum_string\n", + "hl.utils.range_table(1).write(f'{os.environ[\"HAIL_TEST_STORAGE_URI\"]}/test_hail_in_notebook_{secret_alnum_string(10)}.ht')\n", "from helpers import resource\n", "hl.read_table(resource('backward_compatability/1.7.0/table/9.ht')).count()" ] From a3252d67c0396fe72fb8c59798a4ee88ae221d55 Mon Sep 17 00:00:00 2001 From: iris <84595986+iris-garden@users.noreply.github.com> Date: Tue, 21 Nov 2023 14:01:20 -0500 Subject: [PATCH 11/48] [query/plot] removes "chr" prefix for manhattan plot x-axis labels (#13966) Fixes #13952. --- hail/python/hail/plot/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/hail/plot/plots.py b/hail/python/hail/plot/plots.py index 51b07a0998d..f16ae728551 100644 --- a/hail/python/hail/plot/plots.py +++ b/hail/python/hail/plot/plots.py @@ -1594,7 +1594,7 @@ def manhattan(pvals: 'Float64Expression', assert legend is not None legend.visible = False p.xaxis.ticker = contig_ticks - p.xaxis.major_label_overrides = dict(zip(contig_ticks, observed_contigs)) + p.xaxis.major_label_overrides = dict(zip(contig_ticks, [contig.replace("chr", "") for contig in observed_contigs])) if significance_line is not None: p.renderers.append(Span(location=-math.log10(significance_line), From 5034728451849a8313853656b19138c741ede40b Mon Sep 17 00:00:00 2001 From: Dan King Date: Tue, 21 Nov 2023 15:14:25 -0500 Subject: [PATCH 12/48] [query] fix #13937 which manifests as Google throwing an NPE (#14022) CHANGELOG: Fix #13937 caused by faulty library code in the Google Cloud Storage API Java client library. --- hail/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/build.gradle b/hail/build.gradle index 1b65904484f..eebeb531797 100644 --- a/hail/build.gradle +++ b/hail/build.gradle @@ -166,7 +166,7 @@ dependencies { throw new UnsupportedOperationException("Couldn't pick a valid elasticsearch.") } - implementation(group: 'com.google.cloud', name: 'google-cloud-storage', version: '2.27.1') { + implementation(group: 'com.google.cloud', name: 'google-cloud-storage', version: '2.29.1') { exclude group: 'com.fasterxml.jackson.core' } From 8b498aaa364e7d17701fdf6992b8abf0cbeac716 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Tue, 21 Nov 2023 13:41:09 -0800 Subject: [PATCH 13/48] [batch] Use default credentials for the Azure SAS token test (#13981) A very small PR but here's the background and context behind this change. When talking to either GCP or Azure, hail chooses credentials in the following order from highest priority to lowest priority: 1. An explicit `credential_file` argument passed to the relevant credentials class 2. An environment variable containing the path to the credentials (`GOOGLE_APPLICATION_CREDENTIALS` or `AZURE_APPLICATION_CREDENTIALS`) (from this you can see why the code that was here is totally redundant) 3. The latent credentials present on the machine. This might be `gcloud` or `az` credentials, or the metadata server if you're on a cloud VM. I'm trying to rid the codebase of most explicit providing of credentials file paths, for two reasons: - Quality of life. I'm already signed into the cloud with `gcloud` and `az`. I shouldn't need to download some file and provide `AZURE_APPLICATION_CREDENTIALS` to run this test. It should just use the latent credentials. - We are trying to phase out credentials files altogether for security reasons. These files are long-lived secrets that you really don't want to leak and are currently exposed to users in Batch jobs, so they can be easily exfiltrated. Using the latent credentials on a cloud VM (the metadata server) has the benefit of only issuing short-lived access tokens which last for hours not months, so it's basically always better to use the latent credentials when possible. --- hail/python/test/hail/fs/test_worker_driver_fs.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hail/python/test/hail/fs/test_worker_driver_fs.py b/hail/python/test/hail/fs/test_worker_driver_fs.py index 7a2b695546d..43565225f04 100644 --- a/hail/python/test/hail/fs/test_worker_driver_fs.py +++ b/hail/python/test/hail/fs/test_worker_driver_fs.py @@ -141,8 +141,7 @@ def test_qob_can_use_sas_tokens(): sub_id = os.environ['HAIL_AZURE_SUBSCRIPTION_ID'] rg = os.environ['HAIL_AZURE_RESOURCE_GROUP'] - creds_file = os.environ['AZURE_APPLICATION_CREDENTIALS'] - sas_token = asyncio.run(AzureAsyncFS(credential_file=creds_file).generate_sas_token(sub_id, rg, account, "rl")) + sas_token = asyncio.run(AzureAsyncFS().generate_sas_token(sub_id, rg, account, "rl")) mt = hl.import_vcf(f'{vcf}?{sas_token}', min_partitions=4) mt._force_count_rows() From c80078b2ea9ac9b93c96e7745344f3b6dccd701e Mon Sep 17 00:00:00 2001 From: Christopher Vittal Date: Mon, 27 Nov 2023 13:36:56 -0600 Subject: [PATCH 14/48] [query] Deprecate default_reference parameter to hl.init (#13987) CHANGELOG: Deprecate default_reference parameter to hl.init, users should use `default_reference` with an argument to set new default references usually shortly after init. Resolves #13856 --------- Co-authored-by: Dan King --- hail/python/hail/context.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/hail/python/hail/context.py b/hail/python/hail/context.py index bf8c5c26743..af7aa55fe91 100644 --- a/hail/python/hail/context.py +++ b/hail/python/hail/context.py @@ -11,7 +11,7 @@ from pyspark import SparkContext import hail -from hail.genetics.reference_genome import ReferenceGenome +from hail.genetics.reference_genome import ReferenceGenome, reference_genome_type from hail.typecheck import (nullable, typecheck, typecheck_method, enumeration, dictof, oneof, sized_tupleof, sequenceof) from hail.utils import get_env_or_default @@ -142,7 +142,7 @@ def default_reference(self) -> ReferenceGenome: return self._default_ref @default_reference.setter - def set_default_reference(self, value): + def default_reference(self, value): if not isinstance(value, ReferenceGenome): raise TypeError(f'{value} is {type(value)} not a ReferenceGenome') self._default_ref = value @@ -167,7 +167,7 @@ def stop(self): min_block_size=int, branching_factor=int, tmp_dir=nullable(str), - default_reference=enumeration(*BUILTIN_REFERENCES), + default_reference=nullable(enumeration(*BUILTIN_REFERENCES)), idempotent=bool, global_seed=nullable(int), spark_conf=nullable(dictof(str, str)), @@ -192,7 +192,7 @@ def init(sc=None, min_block_size=0, branching_factor=50, tmp_dir=None, - default_reference='GRCh37', + default_reference=None, idempotent=False, global_seed=None, spark_conf=None, @@ -212,11 +212,11 @@ def init(sc=None, This function will be called with default arguments if any Hail functionality is used. If you need custom configuration, you must explicitly call this function before using Hail. For - example, to set the default reference genome to GRCh38, import Hail and immediately call + example, to set the global random seed to 0, import Hail and immediately call :func:`.init`: >>> import hail as hl - >>> hl.init(default_reference='GRCh38') # doctest: +SKIP + >>> hl.init(global_seed=0) # doctest: +SKIP Hail has two backends, ``spark`` and ``batch``. Hail selects a backend by consulting, in order, these configuration locations: @@ -289,6 +289,8 @@ def init(sc=None, Networked temporary directory. Must be a network-visible file path. Defaults to /tmp in the default scheme. default_reference : :class:`str` + *Deprecated*. Please use :func:`.default_reference` to set the default reference genome + Default reference genome. Either ``'GRCh37'``, ``'GRCh38'``, ``'GRCm38'``, or ``'CanFam3'``. idempotent : :obj:`bool` @@ -334,6 +336,14 @@ def init(sc=None, warning('Hail has already been initialized. If this call was intended to change configuration,' ' close the session with hl.stop() first.') + if default_reference is not None: + warnings.warn('Using hl.init with a default_reference argument is deprecated. ' + 'To set a default reference genome after initializing hail, ' + 'call `hl.default_reference` with an argument to set the ' + 'default reference genome.') + else: + default_reference = 'GRCh37' + backend = choose_backend(backend) if backend == 'service': @@ -826,7 +836,8 @@ async def _async_current_backend() -> Backend: return (await Env._async_hc())._backend -def default_reference(new_default_reference: Optional[ReferenceGenome] = None) -> Optional[ReferenceGenome]: +@typecheck(new_default_reference=nullable(reference_genome_type)) +def default_reference(new_default_reference=None) -> Optional[ReferenceGenome]: """With no argument, returns the default reference genome (``'GRCh37'`` by default). With an argument, sets the default reference genome to the argument. From d5efa30ef727bab36609d829a8ecee11e400be7f Mon Sep 17 00:00:00 2001 From: Edmund Higham Date: Mon, 27 Nov 2023 18:20:16 -0500 Subject: [PATCH 15/48] `Let` Binds Multiple Values (#13984) This change allows `Let` nodes to bind multiple values. Serialisation is backwards compatible meaning no changes to existing python code are necessary. This form of `Let` is perferable because it flattens deeply nested IRs which can help reduce the time and stack space needed to type-check. An extreme example of this is the benchmark [matrix_multi_write_nothing](https://github.com/hail-is/hail/blob/67801dfc66b504a7d49daa53f7ec6d22c1194585/benchmark/python/benchmark_hail/run/matrix_table_benchmarks.py#L369C10-L373), which overflows the stack on type-checking without this change. --- hail/build.gradle | 6 +- .../main/scala/is/hail/expr/ir/Binds.scala | 5 +- .../main/scala/is/hail/expr/ir/Children.scala | 7 +- .../is/hail/expr/ir/ComputeUsesAndDefs.scala | 7 +- .../src/main/scala/is/hail/expr/ir/Copy.scala | 10 +- .../is/hail/expr/ir/DeprecatedIRBuilder.scala | 12 +- .../src/main/scala/is/hail/expr/ir/Emit.scala | 40 +- hail/src/main/scala/is/hail/expr/ir/Env.scala | 3 +- .../hail/expr/ir/ExtractIntervalFilters.scala | 5 +- .../scala/is/hail/expr/ir/ForwardLets.scala | 42 ++- hail/src/main/scala/is/hail/expr/ir/IR.scala | 24 +- .../scala/is/hail/expr/ir/IRBuilder.scala | 15 +- .../is/hail/expr/ir/InTailPosition.scala | 2 +- .../scala/is/hail/expr/ir/InferType.scala | 2 +- .../scala/is/hail/expr/ir/Interpret.scala | 36 +- .../scala/is/hail/expr/ir/LowerMatrixIR.scala | 157 ++++---- .../main/scala/is/hail/expr/ir/MatrixIR.scala | 58 +-- .../scala/is/hail/expr/ir/MatrixValue.scala | 2 +- .../scala/is/hail/expr/ir/MatrixWriter.scala | 18 +- .../is/hail/expr/ir/NormalizeNames.scala | 21 +- .../main/scala/is/hail/expr/ir/Optimize.scala | 2 +- .../main/scala/is/hail/expr/ir/Parser.scala | 16 +- .../main/scala/is/hail/expr/ir/Pretty.scala | 12 +- .../is/hail/expr/ir/PruneDeadFields.scala | 106 +++--- .../scala/is/hail/expr/ir/Requiredness.scala | 14 +- .../main/scala/is/hail/expr/ir/Simplify.scala | 351 ++++++++++-------- .../main/scala/is/hail/expr/ir/TableIR.scala | 18 +- .../scala/is/hail/expr/ir/TypeCheck.scala | 5 +- .../scala/is/hail/expr/ir/agg/Extract.scala | 19 +- .../hail/expr/ir/analyses/SemanticHash.scala | 6 +- .../expr/ir/functions/ArrayFunctions.scala | 60 +-- .../expr/ir/functions/DictFunctions.scala | 6 +- .../hail/expr/ir/functions/SetFunctions.scala | 8 +- .../expr/ir/functions/StringFunctions.scala | 25 +- .../ir/lowering/LowerAndExecuteShuffles.scala | 47 ++- .../expr/ir/lowering/LowerBlockMatrixIR.scala | 34 +- .../hail/expr/ir/lowering/LowerTableIR.scala | 88 ++--- .../ir/lowering/LowerTableIRHelpers.scala | 6 +- .../hail/expr/ir/lowering/LoweringPass.scala | 25 +- .../expr/ir/lowering/RVDToTableStage.scala | 21 +- .../main/scala/is/hail/expr/ir/package.scala | 28 +- .../is/hail/expr/ir/streams/EmitStream.scala | 12 +- .../scala/is/hail/io/bgen/BgenSettings.scala | 2 +- .../main/scala/is/hail/io/bgen/LoadBgen.scala | 6 +- .../is/hail/io/bgen/StagedBGENReader.scala | 2 +- .../main/scala/is/hail/io/vcf/LoadVCF.scala | 4 +- hail/src/main/scala/is/hail/methods/PCA.scala | 6 +- .../scala/is/hail/types/physical/PType.scala | 2 +- .../scala/is/hail/types/virtual/TStruct.scala | 107 ++++-- .../scala/is/hail/types/virtual/Type.scala | 17 +- hail/src/test/scala/is/hail/HailSuite.scala | 2 +- .../is/hail/expr/ir/Aggregators2Suite.scala | 25 +- .../is/hail/expr/ir/EmitStreamSuite.scala | 12 +- .../is/hail/expr/ir/ForwardLetsSuite.scala | 108 +++++- .../test/scala/is/hail/expr/ir/IRSuite.scala | 69 ++-- .../is/hail/expr/ir/LiftLiteralsSuite.scala | 4 +- .../scala/is/hail/expr/ir/PruneSuite.scala | 47 ++- .../is/hail/expr/ir/RequirednessSuite.scala | 22 +- .../scala/is/hail/expr/ir/SimplifySuite.scala | 171 ++++++--- .../expr/ir/analyses/SemanticHashSuite.scala | 31 +- .../is/hail/types/virtual/TStructSuite.scala | 42 +++ 61 files changed, 1216 insertions(+), 844 deletions(-) diff --git a/hail/build.gradle b/hail/build.gradle index eebeb531797..1312dd1ed6e 100644 --- a/hail/build.gradle +++ b/hail/build.gradle @@ -73,7 +73,10 @@ compileScala { ] if (scalaMajorVersion == "2.12") { - scalaCompileOptions.additionalParameters += "-Xlint:-unused" + scalaCompileOptions.additionalParameters += + [ "-Xlint:-unused" + , "-Ypartial-unification" + ] } scalaCompileOptions.forkOptions.with { @@ -108,6 +111,7 @@ configurations { } dependencies { + scalaCompilerPlugins 'com.olegpy:better-monadic-for_2.12:0.3.1' justSpark('org.apache.spark:spark-mllib_' + scalaMajorVersion + ':' + sparkVersion) { exclude group: 'org.scalanlp' } diff --git a/hail/src/main/scala/is/hail/expr/ir/Binds.scala b/hail/src/main/scala/is/hail/expr/ir/Binds.scala index 1c6f3049b5a..60ad922da29 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Binds.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Binds.scala @@ -14,7 +14,10 @@ object Bindings { // A call to Bindings(x, i) may only query the types of children with // index < i def apply(x: BaseIR, i: Int): Iterable[(String, Type)] = x match { - case Let(name, value, _) => if (i == 1) Array(name -> value.typ) else empty + case Let(bindings, _) => + val result = Array.ofDim[(String, Type)](i) + for (k <- 0 until i) result(k) = bindings(k)._1 -> bindings(k)._2.typ + result case TailLoop(name, args, resultType, _) => if (i == args.length) args.map { case (name, ir) => name -> ir.typ } :+ name -> TTuple(TTuple(args.map(_._2.typ): _*), resultType) else empty diff --git a/hail/src/main/scala/is/hail/expr/ir/Children.scala b/hail/src/main/scala/is/hail/expr/ir/Children.scala index d81ab5c764e..2d486940b09 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Children.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Children.scala @@ -34,8 +34,11 @@ object Children { children(1) = default for (i <- cases.indices) children(2 + i) = cases(i) children - case Let(name, value, body) => - Array(value, body) + case Let(bindings, body) => + val children = Array.ofDim[BaseIR](x.size) + for (i <- bindings.indices) children(i) = bindings(i)._2 + children(bindings.size) = body + children case RelationalLet(name, value, body) => Array(value, body) case AggLet(name, value, body, _) => diff --git a/hail/src/main/scala/is/hail/expr/ir/ComputeUsesAndDefs.scala b/hail/src/main/scala/is/hail/expr/ir/ComputeUsesAndDefs.scala index 261e7b32cae..85d3abd8f4e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ComputeUsesAndDefs.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ComputeUsesAndDefs.scala @@ -2,13 +2,16 @@ package is.hail.expr.ir import scala.collection.mutable -case class UsesAndDefs(uses: Memo[mutable.Set[RefEquality[BaseRef]]], defs: Memo[BaseIR], free: mutable.Set[RefEquality[BaseRef]]) +case class UsesAndDefs(uses: Memo[mutable.Set[RefEquality[BaseRef]]], + defs: Memo[BaseIR], + free: mutable.Set[RefEquality[BaseRef]] + ) object ComputeUsesAndDefs { def apply(ir0: BaseIR, errorIfFreeVariables: Boolean = true): UsesAndDefs = { val uses = Memo.empty[mutable.Set[RefEquality[BaseRef]]] val defs = Memo.empty[BaseIR] - val free = if (errorIfFreeVariables) null else mutable.Set[RefEquality[BaseRef]]() + val free = mutable.Set.empty[RefEquality[BaseRef]] def compute(ir: BaseIR, env: BindingEnv[BaseIR]): Unit = { diff --git a/hail/src/main/scala/is/hail/expr/ir/Copy.scala b/hail/src/main/scala/is/hail/expr/ir/Copy.scala index 93f6ef4913b..7a5ac9d4a74 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Copy.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Copy.scala @@ -34,9 +34,13 @@ object Copy { case s: Switch => assert(s.size == newChildren.size) Switch(newChildren(0).asInstanceOf[IR], newChildren(1).asInstanceOf[IR], newChildren.drop(2).asInstanceOf[IndexedSeq[IR]]) - case Let(name, _, _) => - assert(newChildren.length == 2) - Let(name, newChildren(0).asInstanceOf[IR], newChildren(1).asInstanceOf[IR]) + case Let(bindings, _) => + assert(newChildren.length == x.size) + val newBindings = + (bindings, newChildren.init) + .zipped + .map { case ((name, _), ir: IR) => name -> ir } + Let(newBindings, newChildren.last.asInstanceOf[IR]) case AggLet(name, _, _, isScan) => assert(newChildren.length == 2) AggLet(name, newChildren(0).asInstanceOf[IR], newChildren(1).asInstanceOf[IR], isScan) diff --git a/hail/src/main/scala/is/hail/expr/ir/DeprecatedIRBuilder.scala b/hail/src/main/scala/is/hail/expr/ir/DeprecatedIRBuilder.scala index b9c2be4e8a9..837ea5c51b6 100644 --- a/hail/src/main/scala/is/hail/expr/ir/DeprecatedIRBuilder.scala +++ b/hail/src/main/scala/is/hail/expr/ir/DeprecatedIRBuilder.scala @@ -234,13 +234,13 @@ object DeprecatedIRBuilder { def insertStruct(other: IRProxy, ordering: Option[IndexedSeq[String]] = None): IRProxy = (env: E) => { val right = other(env) val sym = genUID() - Let( - sym, - right, + Let(FastSeq(sym -> right), InsertFields( ir(env), right.typ.asInstanceOf[TStruct].fieldNames.map(f => f -> GetField(Ref(sym, right.typ), f)), - ordering)) + ordering + ) + ) } def len: IRProxy = (env: E) => ArrayLen(ir(env)) @@ -250,7 +250,7 @@ object DeprecatedIRBuilder { def orElse(alt: IRProxy): IRProxy = { env: E => val uid = genUID() val eir = ir(env) - Let(uid, eir, If(IsNA(Ref(uid, eir.typ)), alt(env), Ref(uid, eir.typ))) + Let(FastSeq(uid -> eir), If(IsNA(Ref(uid, eir.typ)), alt(env), Ref(uid, eir.typ))) } def filter(pred: LambdaProxy): IRProxy = (env: E) => { @@ -344,7 +344,7 @@ object DeprecatedIRBuilder { val name = sym.name val value = binding(env) scope match { - case Scope.EVAL => Let(name, value, bind(rest, body, env.bind(name -> value.typ), scope)) + case Scope.EVAL => Let(FastSeq(name -> value), bind(rest, body, env.bind(name -> value.typ), scope)) case Scope.AGG => AggLet(name, value, bind(rest, body, env.bind(name -> value.typ), scope), isScan = false) case Scope.SCAN => AggLet(name, value, bind(rest, body, env.bind(name -> value.typ), scope), isScan = true) } diff --git a/hail/src/main/scala/is/hail/expr/ir/Emit.scala b/hail/src/main/scala/is/hail/expr/ir/Emit.scala index cf9b9b006fe..7465aea42e0 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Emit.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Emit.scala @@ -635,7 +635,7 @@ class Emit[C]( def emit(ir: IR, mb: EmitMethodBuilder[C] = mb, region: Value[Region] = region, env: EmitEnv = env, container: Option[AggContainer] = container, loopEnv: Option[Env[LoopRef]] = loopEnv): EmitCode = this.emit(ir, mb, region, env, container, loopEnv) - def emitStream(ir: IR, outerRegion: Value[Region], mb: EmitMethodBuilder[C] = mb): EmitCode = + def emitStream(ir: IR, outerRegion: Value[Region], mb: EmitMethodBuilder[C] = mb, env: EmitEnv = env): EmitCode = EmitCode.fromI(mb)(cb => EmitStream.produce(this, ir, cb, cb.emb, outerRegion, env, container)) def emitVoid(ir: IR, cb: EmitCodeBuilder = cb, region: Value[Region] = region, env: EmitEnv = env, container: Option[AggContainer] = container, loopEnv: Option[Env[LoopRef]] = loopEnv): Unit = @@ -668,12 +668,22 @@ class Emit[C]( emitI(cond).consume(cb, {}, m => cb.if_(m.asBoolean.value, emitVoid(cnsq), emitVoid(altr))) - case Let(name, value, body) => - val xVal = if (value.typ.isInstanceOf[TStream]) emitStream(value, region) else emit(value) - cb.withScopedMaybeStreamValue(xVal, s"let_$name") { ev => - emitVoid(body, env = env.bind(name, ev)) + case Let(bindings, body) => + def go(env: EmitEnv): IndexedSeq[(String, IR)] => Unit = { + case (name, value) +: rest => + val xVal = + if (value.typ.isInstanceOf[TStream]) emitStream(value, region, env = env) + else emit(value, env = env) + + cb.withScopedMaybeStreamValue(xVal, s"let_$name") { ev => + go(env.bind(name, ev))(rest) + } + case Seq() => + emitVoid(body, env = env) } + go(env)(bindings) + case StreamFor(a, valueName, body) => emitStream(a, region).toI(cb).consume(cb, {}, @@ -2703,7 +2713,7 @@ class Emit[C]( } } - def emitStream(ir: IR, outerRegion: Value[Region]): EmitCode = + def emitStream(ir: IR, outerRegion: Value[Region], env: EmitEnv = env): EmitCode = EmitCode.fromI(mb)(cb => EmitStream.produce(this, ir, cb, cb.emb, outerRegion, env, container)) // ideally, emit would not be called with void values, but initOp args can be void @@ -2716,12 +2726,22 @@ class Emit[C]( val result: EmitCode = (ir: @unchecked) match { - case Let(name, value, body) => + case Let(bindings, body) => EmitCode.fromI(mb) { cb => - val xVal = if (value.typ.isInstanceOf[TStream]) emitStream(value, region) else emit(value) - cb.withScopedMaybeStreamValue(xVal, s"let_$name") { ev => - emitI(body, cb, env = env.bind(name, ev)) + def go(env: EmitEnv): IndexedSeq[(String, IR)] => IEmitCode = { + case (name, value) +: rest => + val xVal = + if (value.typ.isInstanceOf[TStream]) emitStream(value, region, env = env) + else emit(value, env = env) + + cb.withScopedMaybeStreamValue(xVal, s"let_$name") { ev => + go(env.bind(name, ev))(rest) + } + case Seq() => + emitI(body, cb, env = env) } + + go(env)(bindings) } case Ref(name, t) => diff --git a/hail/src/main/scala/is/hail/expr/ir/Env.scala b/hail/src/main/scala/is/hail/expr/ir/Env.scala index 999b3f378c8..d1f4c9297d2 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Env.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Env.scala @@ -130,8 +130,7 @@ class Env[V] private(val m: Map[Env.K, V]) { def apply(name: String): V = m(name) def lookup(name: String): V = - m.get(name) - .getOrElse(throw new RuntimeException(s"Cannot find $name in $m")) + m.get(name).getOrElse(throw new RuntimeException(s"Cannot find $name in $m")) def lookupOption(name: String): Option[V] = m.get(name) diff --git a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala index ce317d8618b..90db484f1dd 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ExtractIntervalFilters.scala @@ -815,7 +815,10 @@ class ExtractIntervalFilters(ctx: ExecuteContext, keyType: TStruct) { var res: AbstractLattice.Value = if (env.keySet == KeySetLattice.bottom) AbstractLattice.bottom else x match { - case Let(name, value, body) => recur(body, env.bind(name -> recur(value))) + case Let(bindings, body) => + recur(body, bindings.foldLeft(env) { case (env, (name, value)) => + env.bind(name -> recur(value, env)) + }) case Ref(name, _) => env(name) case GetField(o, name) => recur(o).asInstanceOf[StructValue](name) case MakeStruct(fields) => StructValue(fields.view.map { case (name, field) => diff --git a/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala b/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala index 1a5db502a32..d78c0630ea3 100644 --- a/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala +++ b/hail/src/main/scala/is/hail/expr/ir/ForwardLets.scala @@ -1,8 +1,9 @@ package is.hail.expr.ir import is.hail.backend.ExecuteContext +import is.hail.utils.BoxedArrayBuilder -import scala.collection.mutable +import scala.collection.Set object ForwardLets { def apply[T <: BaseIR](ctx: ExecuteContext)(ir0: T): T = { @@ -12,7 +13,7 @@ object ForwardLets { def rewrite(ir: BaseIR, env: BindingEnv[IR]): BaseIR = { - def shouldForward(value: IR, refs: mutable.Set[RefEquality[BaseRef]], base: IR): Boolean = { + def shouldForward(value: IR, refs: Set[RefEquality[BaseRef]], base: IR): Boolean = { value.isInstanceOf[Ref] || value.isInstanceOf[In] || (IsConstant(value) && !value.isInstanceOf[Str]) || @@ -24,18 +25,21 @@ object ForwardLets { !ContainsAggIntermediate(value) } - def mapRewrite(): BaseIR = ir.mapChildrenWithIndex { (ir1, i) => - rewrite(ir1, ChildEnvWithoutBindings(ir, i, env)) - } - ir match { - case l@Let(name, value, body) => - val refs = uses.lookup(ir) - val rewriteValue = rewrite(value, env).asInstanceOf[IR] - if (shouldForward(rewriteValue, refs, l)) - rewrite(body, env.bindEval(name -> rewriteValue)) - else - Let(name, rewriteValue, rewrite(body, env).asInstanceOf[IR]) + case l@Let(bindings, body) => + val keep = new BoxedArrayBuilder[(String, IR)] + val refs = uses(ir) + val newEnv = bindings.foldLeft(env) { case (env, (name, value)) => + val rewriteValue = rewrite(value, env).asInstanceOf[IR] + if (shouldForward(rewriteValue, refs.filter(_.t.name == name), l)) + env.bindEval(name -> rewriteValue) + else {keep += (name -> rewriteValue); env} + } + + val newBody = rewrite(body, newEnv).asInstanceOf[IR] + if (keep.isEmpty) newBody + else Let(keep.result(), newBody) + case l@AggLet(name, value, body, isScan) => val refs = uses.lookup(ir) val rewriteValue = rewrite(value, if (isScan) env.promoteScan else env.promoteAgg).asInstanceOf[IR] @@ -46,11 +50,15 @@ object ForwardLets { rewrite(body, env.copy(agg = Some(env.agg.get.bind(name -> rewriteValue)))) else AggLet(name, rewriteValue, rewrite(body, env).asInstanceOf[IR], isScan) - case x@Ref(name, _) => env.eval.lookupOption(name) - .map { forwarded => if (uses.lookup(defs.lookup(x)).size > 1) forwarded.deepCopy() else forwarded } - .getOrElse(x) + case x@Ref(name, _) => + env.eval + .lookupOption(name) + .map { forwarded => if (uses.lookup(defs.lookup(x)).count(_.t.name == name) > 1) forwarded.deepCopy() else forwarded } + .getOrElse(x) case _ => - mapRewrite() + ir.mapChildrenWithIndex { (ir1, i) => + rewrite(ir1, ChildEnvWithoutBindings(ir, i, env)) + } } } diff --git a/hail/src/main/scala/is/hail/expr/ir/IR.scala b/hail/src/main/scala/is/hail/expr/ir/IR.scala index ccaba5925d5..1fcc1fb7ccf 100644 --- a/hail/src/main/scala/is/hail/expr/ir/IR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/IR.scala @@ -189,7 +189,24 @@ final case class Switch(x: IR, default: IR, cases: IndexedSeq[IR]) extends IR { } final case class AggLet(name: String, value: IR, body: IR, isScan: Boolean) extends IR -final case class Let(name: String, value: IR, body: IR) extends IR +final case class Let(bindings: IndexedSeq[(String, IR)], body: IR) extends IR { + override lazy val size: Int = + bindings.length + 1 +} + +object Let { + case class Extract(p: ((String, IR)) => Boolean) { + def unapply(bindings: IndexedSeq[(String, IR)]): + Option[(IndexedSeq[(String, IR)], IndexedSeq[(String, IR)])] = { + val idx = bindings.indexWhere(p) + if (idx == -1) None else Some(bindings.splitAt(idx)) + } + } + + object Nested extends Extract(_._2.isInstanceOf[Let]) + object Insert extends Extract(_._2.isInstanceOf[InsertFields]) + +} sealed abstract class BaseRef extends IR with TrivialIR { def name: String @@ -441,7 +458,7 @@ object StreamJoin { // joined is a stream of {leftElement, rightGroup} bindIR(MakeArray(NA(rEltType))) { missingSingleton => flatMapIR(joined) { x => - Let(l, GetField(x, "left"), bindIR(GetField(GetField(x, "rightGroup"), groupField)) { rightElts => + Let(FastSeq(l -> GetField(x, "left")), bindIR(GetField(GetField(x, "rightGroup"), groupField)) { rightElts => joinType match { case "left" | "outer" => StreamMap(ToStream(If(IsNA(rightElts), missingSingleton, rightElts), requiresMemoryManagement), r, joinF) case "right" | "inner" => StreamMap(ToStream(rightElts, requiresMemoryManagement), r, joinF) @@ -721,8 +738,7 @@ final case class ApplyIR(function: String, typeArgs: Seq[Type], args: Seq[IR], r lazy val refIdx: Map[String, Int] = refs.map(_.name).zipWithIndex.toMap lazy val explicitNode: IR = { - // foldRight because arg1 should be at the top so it is evaluated first - val ir = refs.zip(args).foldRight(body) { case ((ref, arg), bodyIR) => Let(ref.name, arg, bodyIR) } + val ir = Let(refs.map(_.name).zip(args), body) assert(ir.typ == returnType) ir } diff --git a/hail/src/main/scala/is/hail/expr/ir/IRBuilder.scala b/hail/src/main/scala/is/hail/expr/ir/IRBuilder.scala index 2e20bda1bbf..d8fcea882e8 100644 --- a/hail/src/main/scala/is/hail/expr/ir/IRBuilder.scala +++ b/hail/src/main/scala/is/hail/expr/ir/IRBuilder.scala @@ -1,17 +1,18 @@ package is.hail.expr.ir -import scala.collection.mutable +import is.hail.utils.BoxedArrayBuilder object IRBuilder { def scoped(f: IRBuilder => IR): IR = { - val ctx = new IRBuilder() - val result = f(ctx) - ctx.wrap(result) + val builder = new IRBuilder() + val result = f(builder) + Let(builder.bindings.result(), result) } } class IRBuilder() { - private val bindings: mutable.ArrayBuffer[(String, IR)] = mutable.ArrayBuffer() + private val bindings: BoxedArrayBuilder[(String, IR)] = + new BoxedArrayBuilder[(String, IR)]() def getBindings: IndexedSeq[(String, IR)] = bindings.result() @@ -27,8 +28,4 @@ class IRBuilder() { bindings += name -> ir Ref(name, ir.typ) } - - def wrap(ir: IR): IR = { - bindings.foldRight[IR](ir) { case ((f, v), accum) => Let(f, v, accum) } - } } diff --git a/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala b/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala index 108e3082f7e..6689d2d3941 100644 --- a/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala +++ b/hail/src/main/scala/is/hail/expr/ir/InTailPosition.scala @@ -2,7 +2,7 @@ package is.hail.expr.ir object InTailPosition { def apply(x: IR, i: Int): Boolean = x match { - case Let(_, _, _) => i == 1 + case Let(bindings, _) => i == bindings.length case If(_, _, _) => i != 0 case _: Switch => i != 0 case TailLoop(_, params, _, _) => i == params.length diff --git a/hail/src/main/scala/is/hail/expr/ir/InferType.scala b/hail/src/main/scala/is/hail/expr/ir/InferType.scala index 528577b4384..dd48deb97e2 100644 --- a/hail/src/main/scala/is/hail/expr/ir/InferType.scala +++ b/hail/src/main/scala/is/hail/expr/ir/InferType.scala @@ -62,7 +62,7 @@ object InferType { cnsq.typ case Switch(_, default, _) => default.typ - case Let(name, value, body) => + case Let(_, body) => body.typ case AggLet(name, value, body, _) => body.typ diff --git a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala index 4b90bd6702c..c1d5e80b215 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Interpret.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Interpret.scala @@ -40,18 +40,16 @@ object Interpret { def apply[T](ctx: ExecuteContext, ir: IR): T = apply(ctx, ir, Env.empty[(Any, Type)], FastSeq[(Any, Type)]()).asInstanceOf[T] - def apply[T](ctx: ExecuteContext, + def apply[T]( + ctx: ExecuteContext, ir0: IR, env: Env[(Any, Type)], args: IndexedSeq[(Any, Type)], optimize: Boolean = true ): T = { - val rwIR = env.m.foldLeft[IR](ir0) { case (acc, (k, (value, t))) => Let(k, Literal.coerce(t, value), acc) } - - val lowered = LoweringPipeline.relationalLowerer(optimize).apply(ctx, rwIR).asInstanceOf[IR] - + val bindings = env.m.view.map { case (k, (value, t)) => k -> Literal.coerce(t, value) }.toFastSeq + val lowered = LoweringPipeline.relationalLowerer(optimize).apply(ctx, Let(bindings, ir0)).asInstanceOf[IR] val result = run(ctx, lowered, Env.empty[Any], args, Memo.empty).asInstanceOf[T] - result } @@ -129,9 +127,11 @@ object Interpret { case null => null } - case Let(name, value, body) => - val valueValue = interpret(value, env, args) - interpret(body, env.bind(name, valueValue), args) + case Let(bindings, body) => + val newEnv = bindings.foldLeft(env) { case (env, (name, value)) => + env.bind(name -> interpret(value, env, args)) + } + interpret(body, newEnv, args) case Ref(name, _) => env.lookup(name) case ApplyBinaryPrimOp(op, l, r) => val lValue = interpret(l, env, args) @@ -746,9 +746,9 @@ object Interpret { val oldIndices = old.typ.asInstanceOf[TStruct].fields.map(f => f.name -> f.index).toMap Row.fromSeq(fds.map(name => newValues.getOrElse(name, struct.asInstanceOf[Row].get(oldIndices(name))))) case None => - var t = old.typ + var t = old.typ.asInstanceOf[TStruct] fields.foreach { case (name, body) => - val (newT, ins) = t.insert(body.typ, name) + val (newT, ins) = t.insert(body.typ, FastSeq(name)) t = newT.asInstanceOf[TStruct] struct = ins(struct, interpret(body, env, args)) } @@ -992,12 +992,14 @@ object Interpret { val rv = value.rvd.combine[WrappedByteArray, RegionValue]( ctx, mkZero, itF, read, write, combOpF, isCommutative, useTreeAggregate) - val (Some(PTypeReferenceSingleCodeType(rTyp: PTuple)), f) = CompileWithAggregators[AsmFunction2RegionLongLong]( - ctx, - extracted.states, - FastSeq(("global", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(value.globals.t)))), - FastSeq(classInfo[Region], LongInfo), LongInfo, - Let(res, extracted.results, MakeTuple.ordered(FastSeq(extracted.postAggIR)))) + val (Some(PTypeReferenceSingleCodeType(rTyp: PTuple)), f) = + CompileWithAggregators[AsmFunction2RegionLongLong]( + ctx, + extracted.states, + FastSeq(("global", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(value.globals.t)))), + FastSeq(classInfo[Region], LongInfo), LongInfo, + Let(FastSeq(res -> extracted.results), MakeTuple.ordered(FastSeq(extracted.postAggIR))) + ) assert(rTyp.types(0).virtualType == query.typ) ctx.r.pool.scopedRegion { r => diff --git a/hail/src/main/scala/is/hail/expr/ir/LowerMatrixIR.scala b/hail/src/main/scala/is/hail/expr/ir/LowerMatrixIR.scala index d06a1dfa0d4..4d3a2ac151a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/LowerMatrixIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/LowerMatrixIR.scala @@ -212,60 +212,61 @@ object LowerMatrixIR { case AggFilter(filt, body, true) => val ab = new BoxedArrayBuilder[(String, IR)] val liftedBody = lift(body, ab) - val uid = genUID() val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggFilterIR = AggFilter(filt, structResult, true) - builder += ((uid, aggFilterIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + val uid = Ref(genUID(), structResult.typ) + builder += (uid.name -> AggFilter(filt, structResult, true)) + Let(aggs.map { case (name, _) => name -> GetField(uid, name) }, liftedBody) case AggExplode(a, name, body, true) => val ab = new BoxedArrayBuilder[(String, IR)] val liftedBody = lift(body, ab) - val uid = genUID() val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggExplodeIR = AggExplode(a, name, structResult, true) - builder += ((uid, aggExplodeIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + val uid = Ref(genUID(), structResult.typ) + builder += (uid.name -> AggExplode(a, name, structResult, true)) + Let(aggs.map { case (name, _) => name -> GetField(uid, name) }, liftedBody) case AggGroupBy(a, body, true) => val ab = new BoxedArrayBuilder[(String, IR)] val liftedBody = lift(body, ab) - val uid = genUID() val aggs = ab.result() - val structResult = MakeStruct(aggs) - val aggIR = AggGroupBy(a, structResult, true) - builder += ((uid, aggIR)) - val eltUID = genUID() + + val aggIR = AggGroupBy(a, MakeStruct(aggs), true) + val uid = Ref(genUID(), aggIR.typ) + builder += (uid.name -> aggIR) val valueUID = genUID() val elementType = aggIR.typ.asInstanceOf[TDict].elementType - val valueType = elementType.asInstanceOf[TBaseStruct].types(1) - ToDict(StreamMap(ToStream(Ref(uid, aggIR.typ)), eltUID, Let(valueUID, GetField(Ref(eltUID, elementType), "value"), - MakeTuple.ordered(FastSeq(GetField(Ref(eltUID, elementType), "key"), - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(valueUID, valueType), name), acc) }))))) + val valueType = elementType.types(1) + ToDict(mapIR(ToStream(uid)) { eltUID => + Let( + (valueUID -> GetField(eltUID, "value")) +: + aggs.map { case (name, _) => name -> GetField(Ref(valueUID, valueType), name) }, + MakeTuple.ordered(FastSeq(GetField(eltUID, "key"), liftedBody)) + ) + }) case AggArrayPerElement(a, elementName, indexName, body, knownLength, true) => val ab = new BoxedArrayBuilder[(String, IR)] val liftedBody = lift(body, ab) - val uid = genUID() + val aggs = ab.result() - val structResult = MakeStruct(aggs) - val aggIR = AggArrayPerElement(a, elementName, indexName, structResult, knownLength, true) - builder += ((uid, aggIR)) - val eltUID = genUID() - val t = aggIR.typ.asInstanceOf[TArray] - ToArray(StreamMap(ToStream(Ref(uid, t)), eltUID, aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(eltUID, structResult.typ), name), acc) })) + val aggIR = AggArrayPerElement(a, elementName, indexName, MakeStruct(aggs), knownLength, true) + val uid = Ref(genUID(), aggIR.typ) + builder += (uid.name -> aggIR) + + ToArray(mapIR(ToStream(uid)) { eltUID => + Let(aggs.map { case (name, _) => name -> GetField(eltUID, name) }, liftedBody) + }) case AggLet(name, value, body, true) => val ab = new BoxedArrayBuilder[(String, IR)] val liftedBody = lift(body, ab) - val uid = genUID() val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggIR = AggLet(name, value, structResult, true) - builder += ((uid, aggIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + val uid = genUID() + builder += (uid -> AggLet(name, value, structResult, true)) + Let(aggs.map { case (name, _) => name -> GetField(Ref(uid, structResult.typ), name) }, liftedBody) case _ => MapIR(lift(_, builder))(ir) @@ -325,76 +326,80 @@ object LowerMatrixIR { case AggFilter(filt, body, isScan) => val ab = new BoxedArrayBuilder[(String, IR)] - val (liftedBody, builder) = if (isScan) - (lift(body, ab, aggBindings), scanBindings) - else - (lift(body, scanBindings, ab), aggBindings) - val uid = genUID() + val (liftedBody, builder) = + if (isScan) (lift(body, ab, aggBindings), scanBindings) + else (lift(body, scanBindings, ab), aggBindings) + val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggFilterIR = AggFilter(filt, structResult, isScan) - builder += ((uid, aggFilterIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + + val uid = Ref(genUID(), structResult.typ) + builder += (uid.name -> AggFilter(filt, structResult, isScan)) + Let(aggs.map { case (name, _) => name -> GetField(uid, name) }, liftedBody) case AggExplode(a, name, body, isScan) => val ab = new BoxedArrayBuilder[(String, IR)] - val (liftedBody, builder) = if (isScan) - (lift(body, ab, aggBindings), scanBindings) - else - (lift(body, scanBindings, ab), aggBindings) - val uid = genUID() + val (liftedBody, builder) = + if (isScan) (lift(body, ab, aggBindings), scanBindings) + else (lift(body, scanBindings, ab), aggBindings) + val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggExplodeIR = AggExplode(a, name, structResult, isScan) - builder += ((uid, aggExplodeIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + val uid = Ref(genUID(), structResult.typ) + builder += (uid.name -> AggExplode(a, name, structResult, isScan)) + Let(aggs.map { case (name, _) => name -> GetField(uid, name) }, liftedBody) case AggGroupBy(a, body, isScan) => val ab = new BoxedArrayBuilder[(String, IR)] - val (liftedBody, builder) = if (isScan) - (lift(body, ab, aggBindings), scanBindings) - else - (lift(body, scanBindings, ab), aggBindings) - val uid = genUID() + val (liftedBody, builder) = + if (isScan) (lift(body, ab, aggBindings), scanBindings) + else (lift(body, scanBindings, ab), aggBindings) + val aggs = ab.result() - val structResult = MakeStruct(aggs) - val aggIR = AggGroupBy(a, structResult, isScan) - builder += ((uid, aggIR)) - val eltUID = genUID() + val aggIR = AggGroupBy(a, MakeStruct(aggs), isScan) + val uid = Ref(genUID(), aggIR.typ) + builder += (uid.name -> aggIR) val valueUID = genUID() val elementType = aggIR.typ.asInstanceOf[TDict].elementType - val valueType = elementType.asInstanceOf[TBaseStruct].types(1) - ToDict(StreamMap(ToStream(Ref(uid, aggIR.typ)), eltUID, Let(valueUID, GetField(Ref(eltUID, elementType), "value"), - MakeTuple.ordered(FastSeq(GetField(Ref(eltUID, elementType), "key"), - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(valueUID, valueType), name), acc) } ))))) + val valueType = elementType.types(1) + ToDict(mapIR(ToStream(uid)) { eltUID => + MakeTuple.ordered( + FastSeq(GetField(eltUID, "key"), + Let( + (valueUID -> GetField(eltUID, "value")) +: + aggs.map { case (name, _) => name -> GetField(Ref(valueUID, valueType), name) }, + liftedBody + ) + ) + ) + }) case AggArrayPerElement(a, elementName, indexName, body, knownLength, isScan) => val ab = new BoxedArrayBuilder[(String, IR)] - val (liftedBody, builder) = if (isScan) - (lift(body, ab, aggBindings), scanBindings) - else - (lift(body, scanBindings, ab), aggBindings) - val uid = genUID() + val (liftedBody, builder) = + if (isScan) (lift(body, ab, aggBindings), scanBindings) + else (lift(body, scanBindings, ab), aggBindings) + val aggs = ab.result() - val structResult = MakeStruct(aggs) - val aggIR = AggArrayPerElement(a, elementName, indexName, structResult, knownLength, isScan) - builder += ((uid, aggIR)) - val eltUID = genUID() - val t = aggIR.typ.asInstanceOf[TArray] - ToArray(StreamMap(ToStream(Ref(uid, t)), eltUID, aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(eltUID, structResult.typ), name), acc) })) + val aggIR = AggArrayPerElement(a, elementName, indexName, MakeStruct(aggs), knownLength, isScan) + val uid = Ref(genUID(), aggIR.typ) + builder += (uid.name -> aggIR) + ToArray(mapIR(ToStream(uid)) { eltUID => + Let(aggs.map { case (name, _) => name -> GetField(eltUID, name) }, liftedBody) + }) case AggLet(name, value, body, isScan) => val ab = new BoxedArrayBuilder[(String, IR)] - val (liftedBody, builder) = if (isScan) - (lift(body, ab, aggBindings), scanBindings) - else - (lift(body, scanBindings, ab), aggBindings) - val uid = genUID() + val (liftedBody, builder) = + if (isScan) (lift(body, ab, aggBindings), scanBindings) + else (lift(body, scanBindings, ab), aggBindings) + val aggs = ab.result() val structResult = MakeStruct(aggs) - val aggIR = AggLet(name, value, structResult, isScan) - builder += ((uid, aggIR)) - aggs.foldLeft[IR](liftedBody) { case (acc, (name, _)) => Let(name, GetField(Ref(uid, structResult.typ), name), acc) } + + val uid = Ref(genUID(), structResult.typ) + builder += (uid.name -> AggLet(name, value, structResult, isScan)) + Let(aggs.map { case (name, _) => name -> GetField(uid, name) }, liftedBody) case x: StreamAgg => x case x: StreamAggScan => x diff --git a/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala b/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala index 7e08faa9901..4b45e5bd4df 100644 --- a/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/MatrixIR.scala @@ -182,27 +182,6 @@ abstract class MatrixHybridReader extends TableReaderWithExtraUID with MatrixRea } tr } - - def makeGlobalValue(ctx: ExecuteContext, requestedType: TStruct, values: => IndexedSeq[Row]): BroadcastRow = { - assert(fullType.globalType.size == 1) - val colType = requestedType.fieldOption(LowerMatrixIR.colsFieldName) - .map(fd => fd.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct]) - - colType match { - case Some(ct) => - assert(requestedType.size == 1) - val containedFields = ct.fieldNames.toSet - val colValueIndices = fullMatrixType.colType.fields - .filter(f => containedFields.contains(f.name)) - .map(_.index) - .toArray - val arr = values.map(r => Row.fromSeq(colValueIndices.map(r.get))).toFastSeq - BroadcastRow(ctx, Row(arr), requestedType) - case None => - assert(requestedType == TStruct.empty) - BroadcastRow(ctx, Row(), requestedType) - } - } } object MatrixNativeReader { @@ -755,7 +734,7 @@ case class MatrixAnnotateColsTable( root: String ) extends MatrixIR { override def typecheck(): Unit = { - assert(child.typ.colType.fieldOption(root).isEmpty) + assert(child.typ.colType.selfField(root).isEmpty) } lazy val childrenSeq: IndexedSeq[BaseIR] = FastSeq(child, table) @@ -765,7 +744,7 @@ case class MatrixAnnotateColsTable( override def partitionCounts: Option[IndexedSeq[Long]] = child.partitionCounts lazy val typ: MatrixType = child.typ.copy( - colType = child.typ.colType.structInsert(table.typ.valueType, List(root))) + colType = child.typ.colType.structInsert(table.typ.valueType, FastSeq(root))) def copy(newChildren: IndexedSeq[BaseIR]): MatrixAnnotateColsTable = { MatrixAnnotateColsTable( @@ -828,22 +807,13 @@ case class MatrixExplodeRows(child: MatrixIR, path: IndexedSeq[String]) extends override def columnCount: Option[Int] = child.columnCount lazy val typ: MatrixType = { - // FIXME: compute row type directly - val newRow: InsertFields = { - val refs = path.init.scanLeft(Ref("va", child.typ.rowType))((struct, name) => - Ref(genUID(), tcoerce[TStruct](struct.typ).field(name).typ)) - - path.zip(refs).zipWithIndex.foldRight[IR](Ref(genUID(), TInt32)) { - case (((field, ref), i), arg) => - InsertFields(ref, FastSeq(field -> - (if (i == refs.length - 1) - ArrayRef(ToArray(ToStream(GetField(ref, field))), arg) - else - Let(refs(i + 1).name, GetField(ref, field), arg)))) - }.asInstanceOf[InsertFields] + val rowType = child.typ.rowType + val f = rowType.fieldOption(path).getOrElse { + throw new AssertionError( + s"No such row field at path '${path.mkString("/")}' in matrix row type '$rowType'." + ) } - - child.typ.copy(rowType = newRow.typ) + child.typ.copy(rowType = rowType.structInsert(TIterable.elementType(f.typ), path)) } } @@ -1011,10 +981,14 @@ case class MatrixExplodeCols(child: MatrixIR, path: IndexedSeq[String]) extends lazy val rowCountUpperBound: Option[Long] = child.rowCountUpperBound lazy val typ: MatrixType = { - val (keysType, _) = child.typ.colType.queryTyped(path.toList) - val keyType = keysType.asInstanceOf[TContainer].elementType - child.typ.copy( - colType = child.typ.colType.structInsert(keyType, path.toList)) + val colType = child.typ.colType + val f = colType.fieldOption(path).getOrElse { + throw new AssertionError( + s"No such column field at path '${path.mkString("/")}' in matrix row type '$colType'." + ) + } + + child.typ.copy(colType = colType.structInsert(TIterable.elementType(f.typ), path)) } } diff --git a/hail/src/main/scala/is/hail/expr/ir/MatrixValue.scala b/hail/src/main/scala/is/hail/expr/ir/MatrixValue.scala index fb9c551c85c..994206776d0 100644 --- a/hail/src/main/scala/is/hail/expr/ir/MatrixValue.scala +++ b/hail/src/main/scala/is/hail/expr/ir/MatrixValue.scala @@ -283,7 +283,7 @@ object MatrixValue { } val fileData = RVD.writeRowsSplitFiles(ctx, mvs.map(_.rvd), paths, bufferSpec, stageLocally) - for ((mv, path, fd) <- (mvs, paths, fileData).zipped) { + (mvs, paths, fileData).zipped.foreach { case (mv, path, fd) => mv.finalizeWrite(ctx, path, bufferSpec, fd, consoleInfo = false) } } diff --git a/hail/src/main/scala/is/hail/expr/ir/MatrixWriter.scala b/hail/src/main/scala/is/hail/expr/ir/MatrixWriter.scala index a6360bd888c..aa4d2fdd945 100644 --- a/hail/src/main/scala/is/hail/expr/ir/MatrixWriter.scala +++ b/hail/src/main/scala/is/hail/expr/ir/MatrixWriter.scala @@ -1632,20 +1632,19 @@ case class MatrixNativeMultiWriter( Begin(FastSeq( Begin(components.map(_.setup)), - TableStage.wrapInBindings( + Let(components.flatMap(_.stage.letBindings), bindIR(cdaIR(concatenatedContexts, allBroadcasts, "matrix_multi_writer") { case (ctx, globals) => bindIR(GetField(ctx, "options")) { options => Switch(GetField(ctx, "matrixId"), default = Die("MatrixId exceeds matrix count", components.head.writePartitionType), cases = components.zipWithIndex.map { case (component, i) => - val writePartition = - bindIR(GetTupleElement(options, i)) { ctxRef => - component.writePartition(component.stage.partition(ctxRef), ctxRef) - } - - component.stage.broadcastVals.foldRight(writePartition) { case ((name, _), ir) => - Let(name, GetField(globals, name), ir) + val binds = component.stage.broadcastVals.map { case (name, _) => + name -> GetField(globals, name) } + + Let(binds, bindIR(GetTupleElement(options, i)) { ctxRef => + component.writePartition(component.stage.partition(ctxRef), ctxRef) + }) } ) } @@ -1656,8 +1655,7 @@ case class MatrixNativeMultiWriter( Begin(components.zipWithIndex.map { case (c, i) => c.finalizeWrite(ArraySlice(cdaResult, partitionCountScan(i), Some(partitionCountScan(i + 1))), c.stage.globals) }) - }, - components.flatMap(_.stage.letBindings) + } ) )) } diff --git a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala index 740bf040597..67db3bb4158 100644 --- a/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala +++ b/hail/src/main/scala/is/hail/expr/ir/NormalizeNames.scala @@ -29,12 +29,23 @@ class NormalizeNames(normFunction: Int => String, allowFreeVariables: Boolean = call(normalizeIR(next, env, context :+ ir.getClass().getName()).asInstanceOf[StackFrame[IR]]) ir match { - case Let(name, value, body) => - val newName = gen() + case Let(bindings, body) => + val newBindings: Array[(String, IR)] = + Array.ofDim(bindings.length) + for { - newValue <- normalize(value) - newBody <- normalize(body, env.bindEval(name, newName)) - } yield Let(newName, newValue, newBody) + (env, _) <- bindings.foldLeft(done((env, 0))) { + case (get, (name, value)) => + for { + (env, idx) <- get + newValue <- normalize(value, env) + newName = gen() + _ = newBindings(idx) = newName -> newValue + } yield (env.bindEval(name, newName), idx + 1) + } + newBody <- normalize(body, env) + } yield Let(newBindings, newBody) + case Ref(name, typ) => val newName = env.eval.lookupOption(name) match { case Some(n) => n diff --git a/hail/src/main/scala/is/hail/expr/ir/Optimize.scala b/hail/src/main/scala/is/hail/expr/ir/Optimize.scala index 45061ba1131..49412f0d91b 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Optimize.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Optimize.scala @@ -16,7 +16,7 @@ object Optimize { } ctx.timer.time("Optimize") { - val normalizeNames = new NormalizeNames(_.toString, allowFreeVariables = true) + val normalizeNames = new NormalizeNames(_ => genUID(), allowFreeVariables = true) while (iter < maxIter && ir != last) { last = ir runOpt(FoldConstants(ctx, _), iter, "FoldConstants") diff --git a/hail/src/main/scala/is/hail/expr/ir/Parser.scala b/hail/src/main/scala/is/hail/expr/ir/Parser.scala index 668ca0d7f20..4edd4f9c5f8 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Parser.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Parser.scala @@ -3,7 +3,7 @@ package is.hail.expr.ir import is.hail.HailContext import is.hail.backend.ExecuteContext import is.hail.expr.ir.agg._ -import is.hail.expr.ir.functions.{IRFunctionRegistry, RelationalFunctions} +import is.hail.expr.ir.functions.RelationalFunctions import is.hail.expr.{JSONAnnotationImpex, Nat, ParserUtils} import is.hail.io.{BufferSpec, TypedCodecSpec} import is.hail.rvd.{RVDPartitioner, RVDType} @@ -825,11 +825,17 @@ object IRParser { cases <- ir_value_children(env)(it) } yield Switch(x, default, cases) case "Let" => - val name = identifier(it) - for { - value <- ir_value_expr(env)(it) + val names = repUntilNonStackSafe(it, identifier, PunctuationToken("(")) + val values = new Array[IR](names.length) + for { + _ <- names.indices.foldLeft(done(())) { case (update, i) => + for { + _ <- update + value <- ir_value_expr(env)(it) + } yield values.update(i, value) + } body <- ir_value_expr(env)(it) - } yield Let(name, value, body) + } yield Let(names.zip(values).toFastSeq, body) case "AggLet" => val name = identifier(it) val isScan = boolean_literal(it) diff --git a/hail/src/main/scala/is/hail/expr/ir/Pretty.scala b/hail/src/main/scala/is/hail/expr/ir/Pretty.scala index c2bb7922ea9..1b36a307e93 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Pretty.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Pretty.scala @@ -178,7 +178,7 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, else "") case EncodedLiteral(codec, _) => single(codec.encodedVirtualType.parsableString()) - case Let(name, _, _) if !elideBindings => single(prettyIdentifier(name)) + case Let(bindings, _) if !elideBindings => bindings.map(b => text(prettyIdentifier(b._1))) case AggLet(name, _, _, isScan) => if (elideBindings) single(Pretty.prettyBooleanLiteral(isScan)) else @@ -667,9 +667,13 @@ class Pretty(width: Int, ribbonWidth: Int, elideLiterals: Boolean, maxLen: Int, } def pretty(ir: BaseIR, bindings: Env[String]): (Doc, Doc) = ir match { - case Let(name, value, body) => - val (valueDoc, valueIdent) = prettyWithIdent(value, bindings, "%") - val (bodyPre, bodyHead) = pretty(body, bindings.bind(name, valueIdent)) + case Let(binds, body) => + val (valueDoc, newBindings) = + binds.foldLeft((empty, bindings)) { case ((valueDoc, bindings), (name, value)) => + val (doc, ident) = prettyWithIdent(value, bindings, "%") + (concat(valueDoc, doc), bindings.bind(name, ident)) + } + val (bodyPre, bodyHead) = pretty(body, newBindings) (concat(valueDoc, bodyPre), bodyHead) case RelationalLet(name, value, body) => val (valueDoc, valueIdent) = prettyWithIdent(value, bindings, "%") diff --git a/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala b/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala index eea3d832126..67263fb8e78 100644 --- a/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala +++ b/hail/src/main/scala/is/hail/expr/ir/PruneDeadFields.scala @@ -366,9 +366,9 @@ object PruneDeadFields { if (lkSet.contains(f)) Some(f -> left.typ.rowType.field(f).typ) else - requestedType.rowType.fieldOption(f).map(reqF => f -> reqF.typ)): _*), + requestedType.rowType.selfField(f).map(reqF => f -> reqF.typ)): _*), globalType = TStruct(left.typ.globalType.fieldNames.flatMap(f => - requestedType.globalType.fieldOption(f).map(reqF => f -> reqF.typ)): _*)) + requestedType.globalType.selfField(f).map(reqF => f -> reqF.typ)): _*)) memoizeTableIR(ctx, left, leftDep, memo) val rk = right.typ.key.take(joinKey + math.max(0, requestedType.key.length - left.typ.key.length)) @@ -379,12 +379,12 @@ object PruneDeadFields { if (rightKeyFields.contains(f)) Some(f -> right.typ.rowType.field(f).typ) else - requestedType.rowType.fieldOption(f).map(reqF => f -> reqF.typ)): _*), + requestedType.rowType.selfField(f).map(reqF => f -> reqF.typ)): _*), globalType = TStruct(right.typ.globalType.fieldNames.flatMap(f => - requestedType.globalType.fieldOption(f).map(reqF => f -> reqF.typ)): _*)) + requestedType.globalType.selfField(f).map(reqF => f -> reqF.typ)): _*)) memoizeTableIR(ctx, right, rightDep, memo) case TableLeftJoinRightDistinct(left, right, root) => - val fieldDep = requestedType.rowType.fieldOption(root).map(_.typ.asInstanceOf[TStruct]) + val fieldDep = requestedType.rowType.selfField(root).map(_.typ.asInstanceOf[TStruct]) fieldDep match { case Some(struct) => val rightDep = TableType( @@ -408,7 +408,7 @@ object PruneDeadFields { memoizeTableIR(ctx, left, requestedType, memo) } case TableIntervalJoin(left, right, root, product) => - val fieldDep = requestedType.rowType.fieldOption(root).map { field => + val fieldDep = requestedType.rowType.selfField(root).map { field => if (product) field.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct] else @@ -437,17 +437,17 @@ object PruneDeadFields { memoizeTableIR(ctx, left, requestedType, memo) } case TableMultiWayZipJoin(children, fieldName, globalName) => - val gType = requestedType.globalType.fieldOption(globalName) + val gType = requestedType.globalType.selfField(globalName) .map(_.typ.asInstanceOf[TArray].elementType) .getOrElse(TStruct.empty).asInstanceOf[TStruct] - val rType = requestedType.rowType.fieldOption(fieldName) + val rType = requestedType.rowType.selfField(fieldName) .map(_.typ.asInstanceOf[TArray].elementType) .getOrElse(TStruct.empty).asInstanceOf[TStruct] val child1 = children.head val dep = TableType( key = child1.typ.key, rowType = TStruct(child1.typ.rowType.fieldNames.flatMap(f => - child1.typ.keyType.fieldOption(f).orElse(rType.fieldOption(f)).map(reqF => f -> reqF.typ) + child1.typ.keyType.selfField(f).orElse(rType.selfField(f)).map(reqF => f -> reqF.typ) ): _*), globalType = gType) children.foreach(memoizeTableIR(ctx, _, dep, memo)) @@ -465,7 +465,7 @@ object PruneDeadFields { case e: AnnotationPathException => minimal(preExplosionFieldType) } val dep = requestedType.copy(rowType = unify(child.typ.rowType, - requestedType.rowType.insert(prunedPreExlosionFieldType, path.toList)._1.asInstanceOf[TStruct])) + requestedType.rowType.insert(prunedPreExlosionFieldType, path)._1.asInstanceOf[TStruct])) memoizeTableIR(ctx, child, dep, memo) case TableFilter(child, pred) => val irDep = memoizeAndGetDep(ctx, pred, pred.typ, child.typ, memo) @@ -566,11 +566,11 @@ object PruneDeadFields { colKey = requestedType.key.drop(child.typ.rowKey.length), globalType = requestedType.globalType, colType = TStruct( - child.typ.colType.fields.flatMap(f => requestedType.rowType.fieldOption(f.name).map(f2 => f.name -> f2.typ)): _*), + child.typ.colType.fields.flatMap(f => requestedType.rowType.selfField(f.name).map(f2 => f.name -> f2.typ)): _*), rowType = TStruct( - child.typ.rowType.fields.flatMap(f => requestedType.rowType.fieldOption(f.name).map(f2 => f.name -> f2.typ)): _*), + child.typ.rowType.fields.flatMap(f => requestedType.rowType.selfField(f.name).map(f2 => f.name -> f2.typ)): _*), entryType = TStruct( - child.typ.entryType.fields.flatMap(f => requestedType.rowType.fieldOption(f.name).map(f2 => f.name -> f2.typ)): _*) + child.typ.entryType.fields.flatMap(f => requestedType.rowType.selfField(f.name).map(f2 => f.name -> f2.typ)): _*) ) memoizeMatrixIR(ctx, child, mtDep, memo) case TableUnion(children) => @@ -702,7 +702,7 @@ object PruneDeadFields { if (colKeySet.contains(f.name)) Some(f.name -> f.typ) else { - requestedColType.fieldOption(f.name) + requestedColType.selfField(f.name) .map(requestedField => f.name -> requestedField.typ.asInstanceOf[TArray].elementType) } }: _*), @@ -732,7 +732,7 @@ object PruneDeadFields { entryType = irDepEntry.entryType) memoizeMatrixIR(ctx, child, childDep, memo) case MatrixAnnotateRowsTable(child, table, root, product) => - val fieldDep = requestedType.rowType.fieldOption(root).map { field => + val fieldDep = requestedType.rowType.selfField(root).map { field => if (product) field.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct] else @@ -760,7 +760,7 @@ object PruneDeadFields { memoizeMatrixIR(ctx, child, requestedType, memo) } case MatrixAnnotateColsTable(child, table, uid) => - val fieldDep = requestedType.colType.fieldOption(uid).map(_.typ.asInstanceOf[TStruct]) + val fieldDep = requestedType.colType.selfField(uid).map(_.typ.asInstanceOf[TStruct]) fieldDep match { case Some(struct) => val tk = table.typ.key @@ -794,7 +794,7 @@ object PruneDeadFields { case e: AnnotationPathException => minimal(preExplosionFieldType) } val dep = requestedType.copy(rowType = unify(child.typ.rowType, - requestedType.rowType.insert(prunedPreExlosionFieldType, path.toList)._1.asInstanceOf[TStruct])) + requestedType.rowType.insert(prunedPreExlosionFieldType, path)._1.asInstanceOf[TStruct])) memoizeMatrixIR(ctx, child, dep, memo) case MatrixExplodeCols(child, path) => def getExplodedField(typ: MatrixType): Type = typ.colType.queryTyped(path.toList)._1 @@ -810,7 +810,7 @@ object PruneDeadFields { case e: AnnotationPathException => minimal(preExplosionFieldType) } val dep = requestedType.copy(colType = unify(child.typ.colType, - requestedType.colType.insert(prunedPreExplosionFieldType, path.toList)._1.asInstanceOf[TStruct])) + requestedType.colType.insert(prunedPreExplosionFieldType, path)._1.asInstanceOf[TStruct])) memoizeMatrixIR(ctx, child, dep, memo) case MatrixRepartition(child, _, _) => memoizeMatrixIR(ctx, child, requestedType, memo) @@ -1027,13 +1027,15 @@ object PruneDeadFields { ) case Coalesce(values) => unifyEnvsSeq(values.map(memoizeValueIR(ctx, _, requestedType, memo))) case Consume(value) => memoizeValueIR(ctx, value, value.typ, memo) - case Let(name, value, body) => + case Let(bindings, body) => val bodyEnv = memoizeValueIR(ctx, body, requestedType, memo) - val valueType = unifySeq(value.typ, uses(name, bodyEnv.eval)) - unifyEnvs( - bodyEnv.deleteEval(name), - memoizeValueIR(ctx, value, valueType, memo) - ) + bindings.foldRight(bodyEnv) { case ((name, value), bodyEnv) => + val valueType = unifySeq(value.typ, uses(name, bodyEnv.eval)) + unifyEnvs( + bodyEnv.deleteEval(name), + memoizeValueIR(ctx, value, valueType, memo) + ) + } case AggLet(name, value, body, isScan) => val bodyEnv = memoizeValueIR(ctx, body, requestedType, memo) if (isScan) { @@ -1415,7 +1417,7 @@ object PruneDeadFields { val sType = requestedType.asInstanceOf[TStruct] unifyEnvsSeq(fields.flatMap { case (fname, fir) => // ignore unreachable fields, these are eliminated on the upwards pass - sType.fieldOption(fname).map(f => memoizeValueIR(ctx, fir, f.typ, memo)) + sType.selfField(fname).map(f => memoizeValueIR(ctx, fir, f.typ, memo)) }) case InsertFields(old, fields, _) => val sType = requestedType.asInstanceOf[TStruct] @@ -1429,20 +1431,20 @@ object PruneDeadFields { if (rightDep.hasField(f.name)) Some(f.name -> minimal(f.typ)) else - sType.fieldOption(f.name).map(f.name -> _.typ) + sType.selfField(f.name).map(f.name -> _.typ) }: _*) unifyEnvsSeq( FastSeq(memoizeValueIR(ctx, old, leftDep, memo)) ++ // ignore unreachable fields, these are eliminated on the upwards pass fields.flatMap { case (fname, fir) => - rightDep.fieldOption(fname).map(f => memoizeValueIR(ctx, fir, f.typ, memo)) + rightDep.selfField(fname).map(f => memoizeValueIR(ctx, fir, f.typ, memo)) } ) case SelectFields(old, fields) => val sType = requestedType.asInstanceOf[TStruct] val oldReqType = TStruct(old.typ.asInstanceOf[TStruct] .fieldNames - .flatMap(fn => sType.fieldOption(fn).map(fd => (fd.name, fd.typ))): _*) + .flatMap(fn => sType.selfField(fn).map(fd => (fd.name, fd.typ))): _*) memoizeValueIR(ctx, old, oldReqType, memo) case GetField(o, name) => memoizeValueIR(ctx, o, TStruct(name -> requestedType), memo) @@ -1479,8 +1481,8 @@ object PruneDeadFields { memoizeTableIR(ctx, child, TableType( key = child.typ.key, rowType = unify(child.typ.rowType, - rStruct.fieldOption("rows").map(_.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct]).getOrElse(TStruct.empty)), - globalType = rStruct.fieldOption("global").map(_.typ.asInstanceOf[TStruct]).getOrElse(TStruct.empty)), + rStruct.selfField("rows").map(_.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct]).getOrElse(TStruct.empty)), + globalType = rStruct.selfField("global").map(_.typ.asInstanceOf[TStruct]).getOrElse(TStruct.empty)), memo) BindingEnv.empty case TableToValueApply(child, _) => @@ -1897,13 +1899,15 @@ object PruneDeadFields { case Consume(value) => val value2 = rebuildIR(ctx, value, env, memo) Consume(value2) - case Let(name, value, body) => - val value2 = rebuildIR(ctx, value, env, memo) - Let( - name, - value2, - rebuildIR(ctx, body, env.bindEval(name, value2.typ), memo) - ) + case Let(bindings, body) => + val newBindings = new Array[(String, IR)](bindings.length) + val (_, newEnv) = bindings.foldLeft((0, env)) { case ((idx, env), (name, value)) => + val newValue = rebuildIR(ctx, value, env, memo) + newBindings(idx) = (name -> newValue) + (idx + 1, env.bindEval(name -> newValue.typ)) + } + + Let(newBindings, rebuildIR(ctx, body, newEnv, memo)) case AggLet(name, value, body, isScan) => val value2 = rebuildIR(ctx, value, if (isScan) env.promoteScan else env.promoteAgg, memo) AggLet( @@ -2118,7 +2122,7 @@ object PruneDeadFields { MakeStruct(FastSeq()) else { val rRowType = TIterable.elementType(rStruct.fieldType("rows")).asInstanceOf[TStruct] - val rGlobType = rStruct.fieldOption("global").map(_.typ.asInstanceOf[TStruct]).getOrElse(TStruct()) + val rGlobType = rStruct.selfField("global").map(_.typ.asInstanceOf[TStruct]).getOrElse(TStruct()) TableCollect(upcastTable(ctx, rebuild(ctx, child, memo), TableType(rowType = rRowType, FastSeq(), rGlobType), upcastRow = true, upcastGlobals = false)) } @@ -2203,15 +2207,12 @@ object PruneDeadFields { else { val result = ir.typ match { case _: TStruct => - val rs = rType.asInstanceOf[TStruct] - val uid = genUID() - val ref = Ref(uid, ir.typ) - val ms = MakeStruct( - rs.fields.map { f => + bindIR(ir) { ref => + val ms = MakeStruct(rType.asInstanceOf[TStruct].fields.map { f => f.name -> upcast(ctx, GetField(ref, f.name), f.typ) - } - ) - Let(uid, ir, If(IsNA(ref), NA(ms.typ), ms)) + }) + If(IsNA(ref), NA(ms.typ), ms) + } case ts: TStream => val ra = rType.asInstanceOf[TStream] val uid = genUID() @@ -2223,13 +2224,12 @@ object PruneDeadFields { val ref = Ref(uid, ts.elementType) ToArray(StreamMap(ToStream(ir), uid, upcast(ctx, ref, ra.elementType))) case _: TTuple => - val rt = rType.asInstanceOf[TTuple] - val uid = genUID() - val ref = Ref(uid, ir.typ) - val mt = MakeTuple(rt._types.map { tupleField => - tupleField.index -> upcast(ctx, GetTupleElement(ref, tupleField.index), tupleField.typ) - }) - Let(uid, ir, If(IsNA(ref), NA(mt.typ), mt)) + bindIR(ir) { ref => + val mt = MakeTuple(rType.asInstanceOf[TTuple]._types.map { tupleField => + tupleField.index -> upcast(ctx, GetTupleElement(ref, tupleField.index), tupleField.typ) + }) + If(IsNA(ref), NA(mt.typ), mt) + } case _: TDict => val rd = rType.asInstanceOf[TDict] ToDict(upcast(ctx, ToStream(ir), TArray(rd.elementType))) diff --git a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala index 59f9ebbf519..16ce778504a 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Requiredness.scala @@ -72,7 +72,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { } if (node.typ != TVoid) { cache.bind(node, BaseTypeWithRequiredness(node.typ)) - if (usesAndDefs.free == null || !re.t.isInstanceOf[BaseRef] || !usesAndDefs.free.contains(re.asInstanceOf[RefEquality[BaseRef]])) + if (usesAndDefs.free.isEmpty || !re.t.isInstanceOf[BaseRef] || !usesAndDefs.free.contains(re.asInstanceOf[RefEquality[BaseRef]])) q += re } } @@ -82,10 +82,10 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { usesAndDefs.uses.m.keys.foreach { n => if (supportedType(n.t)) addBindingRelations(n.t) } - if (usesAndDefs.free != null) - usesAndDefs.free.foreach { re => - lookup(re.t).fromPType(env.lookup(re.t.name)) - } + + usesAndDefs.free.foreach { re => + lookup(re.t).fromPType(env.lookup(re.t.name)) + } } def run(): Unit = { @@ -154,7 +154,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { } node match { case AggLet(name, value, body, isScan) => addBinding(name, value) - case Let(name, value, body) => addBinding(name, value) + case Let(bindings, _) => bindings.foreach(Function.tupled(addBinding)) case RelationalLet(name, value, body) => addBinding(name, value) case RelationalLetTable(name, value, body) => addBinding(name, value) case TailLoop(loopName, params, _, body) => @@ -538,7 +538,7 @@ class Requiredness(val usesAndDefs: UsesAndDefs, ctx: ExecuteContext) { requiredness.unionFrom(cases.map(lookup)) case AggLet(name, value, body, isScan) => requiredness.unionFrom(lookup(body)) - case Let(name, value, body) => + case Let(_, body) => requiredness.unionFrom(lookup(body)) case RelationalLet(name, value, body) => requiredness.unionFrom(lookup(body)) diff --git a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala index 1f9d0d3cffd..68faa4e0829 100644 --- a/hail/src/main/scala/is/hail/expr/ir/Simplify.scala +++ b/hail/src/main/scala/is/hail/expr/ir/Simplify.scala @@ -121,99 +121,102 @@ object Simplify { private def numericRules: IR => Option[IR] = { - def integralBinaryIdentities(pure: Int => IR) = (ir: IR) => ir match { - case ApplyBinaryPrimOp(op, x, y) if ir.typ.isInstanceOf[TIntegral] => - op match { - case Add() => - if (x == y) Some(ApplyBinaryPrimOp(Multiply(), pure(2), x)) - else None - - case Subtract() => - if (x == y) Some(pure(0)) - else None - - case Multiply() => - if (x == pure(0) || y == pure(0)) Some(pure(0)) - else None - - case RoundToNegInfDivide() => - if (x == y) Some(pure(1)) - else if (x == pure(0)) Some(pure(0)) - else if (y == pure(0)) Some(Die("division by zero", ir.typ)) - else None - - case _: LeftShift | _:RightShift | _: LogicalRightShift => - if (x == pure(0)) Some(pure(0)) - else if (y == I32(0)) Some(x) - else None - - case BitAnd() => - if (x == pure(0) || y == pure(0)) Some(pure(0)) - else if (x == pure(-1)) Some(y) - else if (y == pure(-1)) Some(x) - else None - - case BitOr() => - if (x == pure(-1) || y == pure(-1)) Some(pure(-1)) - else if (x == pure(0)) Some(y) - else if (y == pure(0)) Some(x) - else None - - case BitXOr() => - if (x == y) Some(pure(0)) - else if (x == pure(0)) Some(y) - else if (y == pure(0)) Some(x) - else None - - case _ => - None - } - case _ => - None - } + def integralBinaryIdentities(pure: Int => IR)(ir: IR): Option[IR] = + ir match { + case ApplyBinaryPrimOp(op, x, y) if ir.typ.isInstanceOf[TIntegral] => + op match { + case Add() => + if (x == y) Some(ApplyBinaryPrimOp(Multiply(), pure(2), x)) + else None + + case Subtract() => + if (x == y) Some(pure(0)) + else None + + case Multiply() => + if (x == pure(0) || y == pure(0)) Some(pure(0)) + else None + + case RoundToNegInfDivide() => + if (x == y) Some(pure(1)) + else if (x == pure(0)) Some(pure(0)) + else if (y == pure(0)) Some(Die("division by zero", ir.typ)) + else None + + case _: LeftShift | _: RightShift | _: LogicalRightShift => + if (x == pure(0)) Some(pure(0)) + else if (y == I32(0)) Some(x) + else None + + case BitAnd() => + if (x == pure(0) || y == pure(0)) Some(pure(0)) + else if (x == pure(-1)) Some(y) + else if (y == pure(-1)) Some(x) + else None + + case BitOr() => + if (x == pure(-1) || y == pure(-1)) Some(pure(-1)) + else if (x == pure(0)) Some(y) + else if (y == pure(0)) Some(x) + else None + + case BitXOr() => + if (x == y) Some(pure(0)) + else if (x == pure(0)) Some(y) + else if (y == pure(0)) Some(x) + else None + + case _ => + None + } + case _ => + None + } - def hoistUnaryOp = (ir: IR) => ir match { - case ApplyUnaryPrimOp(f@(Negate | BitNot | Bang), x) => x match { - case ApplyUnaryPrimOp(g, y) if g == f => Some(y) + def hoistUnaryOp(ir: IR): Option[IR] = + ir match { + case ApplyUnaryPrimOp(f@(Negate | BitNot | Bang), x) => x match { + case ApplyUnaryPrimOp(g, y) if g == f => Some(y) + case _ => None + } case _ => None } - case _ => None - } - def commonBinaryIdentities(pure: Int => IR) = (ir: IR) => ir match { - case ApplyBinaryPrimOp(f, x, y) => - f match { - case Add() => - if (x == pure(0)) Some(y) - else if (y == pure(0)) Some(x) - else None - - case Subtract() => - if (x == pure(0)) Some(ApplyUnaryPrimOp(Negate, y)) - else if (y == pure(0)) Some(x) - else None - - case Multiply() => - if (x == pure(1)) Some(y) - else if (x == pure(-1)) Some(ApplyUnaryPrimOp(Negate, y)) - else if (y == pure(1)) Some(x) - else if (y == pure(-1)) Some(ApplyUnaryPrimOp(Negate, x)) - else None - - case RoundToNegInfDivide() => - if (y == pure(1)) Some(x) - else if (y == pure(-1)) Some(ApplyUnaryPrimOp(Negate, x)) - else None - - case _ => - None - } - case _ => - None - } + def commonBinaryIdentities(pure: Int => IR)(ir: IR): Option[IR] = + ir match { + case ApplyBinaryPrimOp(f, x, y) => + f match { + case Add() => + if (x == pure(0)) Some(y) + else if (y == pure(0)) Some(x) + else None + + case Subtract() => + if (x == pure(0)) Some(ApplyUnaryPrimOp(Negate, y)) + else if (y == pure(0)) Some(x) + else None + + case Multiply() => + if (x == pure(1)) Some(y) + else if (x == pure(-1)) Some(ApplyUnaryPrimOp(Negate, y)) + else if (y == pure(1)) Some(x) + else if (y == pure(-1)) Some(ApplyUnaryPrimOp(Negate, x)) + else None + + case RoundToNegInfDivide() => + if (y == pure(1)) Some(x) + else if (y == pure(-1)) Some(ApplyUnaryPrimOp(Negate, x)) + else None + + case _ => + None + } + case _ => + None + } Array( - hoistUnaryOp, + hoistUnaryOp(_), (ir: IR) => integralBinaryIdentities(Literal.coerce(ir.typ, _))(ir), (ir: IR) => commonBinaryIdentities(Literal.coerce(ir.typ, _))(ir), ).reduce((f, g) => ir => f(ir).orElse(g(ir))) @@ -237,7 +240,8 @@ object Simplify { case x@StreamMap(NA(_), _, _) => NA(x.typ) case StreamZip(as, names, body, _, _) if as.length == 1 => StreamMap(as.head, names.head, body) - case StreamMap(StreamZip(as, names, zipBody, b, errorID), name, mapBody) => StreamZip(as, names, Let(name, zipBody, mapBody), b, errorID) + case StreamMap(StreamZip(as, names, zipBody, b, errorID), name, mapBody) => + StreamZip(as, names, Let(FastSeq(name -> zipBody), mapBody), b, errorID) case StreamMap(StreamFlatMap(child, flatMapName, flatMapBody), mapName, mapBody) => StreamFlatMap(child, flatMapName, StreamMap(flatMapBody, mapName, mapBody)) case x@StreamFlatMap(NA(_), _, _) => NA(x.typ) @@ -292,7 +296,7 @@ object Simplify { case ArrayLen(MakeArray(args, _)) => I32(args.length) case StreamLen(MakeStream(args, _, _)) => I32(args.length) - case StreamLen(Let(name, value, body)) => Let(name, value, StreamLen(body)) + case StreamLen(Let(bindings, body)) => Let(bindings, StreamLen(body)) case StreamLen(StreamMap(s, _, _)) => StreamLen(s) case StreamLen(StreamFlatMap(a, name, body)) => streamSumIR(StreamMap(a, name, StreamLen(body))) case StreamLen(StreamGrouped(a, groupSize)) => bindIR(groupSize)(groupSizeRef => (StreamLen(a) + groupSizeRef - 1) floorDiv groupSizeRef) @@ -323,15 +327,16 @@ object Simplify { case StreamFor(_, _, Begin(Seq())) => Begin(FastSeq()) // FIXME: Unqualify when StreamFold supports folding over stream of streams - case StreamFold(StreamMap(a, n1, b), zero, accumName, valueName, body) if a.typ.asInstanceOf[TStream].elementType.isRealizable => StreamFold(a, zero, accumName, n1, Let(valueName, b, body)) + case StreamFold(StreamMap(a, n1, b), zero, accumName, valueName, body) if a.typ.asInstanceOf[TStream].elementType.isRealizable => + StreamFold(a, zero, accumName, n1, Let(FastSeq(valueName -> b), body)) case StreamFlatMap(StreamMap(a, n1, b1), n2, b2) => - StreamFlatMap(a, n1, Let(n2, b1, b2)) + StreamFlatMap(a, n1, Let(FastSeq(n2 -> b1), b2)) case StreamMap(a, elt, r: Ref) if r.name == elt => a case StreamMap(StreamMap(a, n1, b1), n2, b2) => - StreamMap(a, n1, Let(n2, b1, b2)) + StreamMap(a, n1, Let(FastSeq(n2 -> b1), b2)) case StreamFilter(ArraySort(a, left, right, lessThan), name, cond) => ArraySort(StreamFilter(a, name, cond), left, right, lessThan) @@ -345,8 +350,8 @@ object Simplify { case ToStream(ToArray(s), false) if s.typ.isInstanceOf[TStream] => s - case ToStream(Let(name, value, ToArray(x)), false) if x.typ.isInstanceOf[TStream] => - Let(name, value, x) + case ToStream(Let(bindings, ToArray(x)), false) if x.typ.isInstanceOf[TStream] => + Let(bindings, x) case MakeNDArray(ToArray(someStream), shape, rowMajor, errorId) => MakeNDArray(someStream, shape, rowMajor, errorId) case MakeNDArray(ToStream(someArray, _), shape, rowMajor, errorId) => MakeNDArray(someArray, shape, rowMajor, errorId) @@ -355,9 +360,9 @@ object Simplify { } case NDArrayShape(NDArrayMap(nd, _, _)) => NDArrayShape(nd) - case NDArrayMap(NDArrayMap(child, innerName, innerBody), outerName, outerBody) => { - NDArrayMap(child, innerName, Let(outerName, innerBody, outerBody)) - } + case NDArrayMap(NDArrayMap(child, innerName, innerBody), outerName, outerBody) => + NDArrayMap(child, innerName, Let(FastSeq(outerName -> innerBody), outerBody)) + case GetField(MakeStruct(fields), name) => val (_, x) = fields.find { case (n, _) => n == name }.get @@ -402,14 +407,45 @@ object Simplify { case InsertFields(struct, Seq(), None) => struct case InsertFields(SelectFields(old, _), Seq(), Some(insertFieldOrder)) => SelectFields(old, insertFieldOrder) - case top@Let(x, Let(y, yVal, yBody), xBody) if (x != y) => Let(y, yVal, Let(x, yBody, xBody)) + case Let(Seq(), body) => + body + + case Let(xs, Let(ys, body)) => + Let(xs ++ ys, body) + + // assumes `NormalizeNames` has been run before this. + case Let(Let.Nested(before, after), body) => + def numBindings(b: (String, IR)): Int = + b._2 match { + case let: Let => 1 + let.bindings.length + case _ => 1 + } + + val newBindings = + new BoxedArrayBuilder[(String, IR)]( + after.foldLeft(before.length) { (sum, binding) => + sum + numBindings(binding) + } + ) + + newBindings ++= before + + after.foreach { + case (name: String, ir: Let) => + newBindings ++= ir.bindings + newBindings += name -> ir.body + case (name, value) => + newBindings += name -> value + } + + Let(newBindings.underlying(), body) - case l@Let(name, x@InsertFields(old, newFields, fieldOrder), body) if x.typ.size < 500 && { + case Let(Let.Insert(before, (name, x@InsertFields(old, newFields, _)) +: after), body) if x.typ.size < 500 && { val r = Ref(name, x.typ) val nfSet = newFields.map(_._1).toSet def allRefsCanBePassedThrough(ir1: IR): Boolean = ir1 match { - case GetField(`r`, fd) => true + case GetField(`r`, _) => true case InsertFields(`r`, inserted, _) => inserted.forall { case (_, toInsert) => allRefsCanBePassedThrough(toInsert) } case SelectFields(`r`, fds) => fds.forall(f => !nfSet.contains(f)) case `r` => false // if the binding is referenced in any other context, don't rewrite @@ -423,7 +459,7 @@ object Simplify { } } - allRefsCanBePassedThrough(body) + allRefsCanBePassedThrough(Let(after.toFastSeq, body)) } => val r = Ref(name, x.typ) val fieldNames = newFields.map(_._1).toArray @@ -431,7 +467,9 @@ object Simplify { val newFieldRefs = newFieldMap.map { case (k, ir) => (k, Ref(genUID(), ir.typ)) } // cannot be mapValues, or genUID() gets run for every usage! - def copiedNewFieldRefs(): IndexedSeq[(String, IR)] = fieldNames.map(name => (name, newFieldRefs(name).deepCopy())).toFastSeq + + def copiedNewFieldRefs(): IndexedSeq[(String, IR)] = + fieldNames.map(name => (name, newFieldRefs(name).deepCopy())).toFastSeq def rewrite(ir1: IR): IR = ir1 match { case GetField(Ref(`name`, _), fd) => newFieldRefs.get(fd) match { @@ -444,6 +482,7 @@ object Simplify { copiedNewFieldRefs().filter { case (name, _) => !newFieldSet.contains(name) } ++ fields.map { case (name, ir) => (name, rewrite(ir)) }, Some(ins.typ.fieldNames.toFastSeq)) + case SelectFields(Ref(`name`, _), fds) => SelectFields(InsertFields(Ref(name, old.typ), copiedNewFieldRefs(), Some(x.typ.fieldNames.toFastSeq)), fds) case ta: TableAggregate => ta @@ -454,10 +493,10 @@ object Simplify { } } - val rw = fieldNames.foldLeft[IR](Let(name, old, rewrite(body))) { case (comb, fieldName) => - Let(newFieldRefs(fieldName).name, newFieldMap(fieldName), comb) - } - ForwardLets(ctx)(rw) + Let( + before.toFastSeq ++ fieldNames.map(f => newFieldRefs(f).name -> newFieldMap(f)) ++ FastSeq(name -> old), + rewrite(Let(after.toFastSeq, body)) + ) case SelectFields(old, fields) if tcoerce[TStruct](old.typ).fieldNames sameElements fields => old @@ -541,23 +580,21 @@ object Simplify { case TableGetGlobals(TableHead(child, _)) => TableGetGlobals(child) case TableGetGlobals(TableRepartition(child, _, _)) => TableGetGlobals(child) case TableGetGlobals(TableJoin(child1, child2, _, _)) => - val g1 = TableGetGlobals(child1) - val g2 = TableGetGlobals(child2) - val g1s = genUID() - val g2s = genUID() - Let(g1s, g1, - Let(g2s, g2, - MakeStruct( - g1.typ.asInstanceOf[TStruct].fields.map(f => f.name -> (GetField(Ref(g1s, g1.typ), f.name): IR)) ++ - g2.typ.asInstanceOf[TStruct].fields.map(f => f.name -> (GetField(Ref(g2s, g2.typ), f.name): IR))))) + bindIRs(TableGetGlobals(child1), TableGetGlobals(child2)) { case Seq(g1, g2) => + MakeStruct( + g1.typ.asInstanceOf[TStruct].fields.map(f => f.name -> GetField(g1, f.name)) ++ + g2.typ.asInstanceOf[TStruct].fields.map(f => f.name -> GetField(g2, f.name)) + ) + } + case TableGetGlobals(x@TableMultiWayZipJoin(children, _, globalName)) => MakeStruct(FastSeq(globalName -> MakeArray(children.map(TableGetGlobals), TArray(children.head.typ.globalType)))) case TableGetGlobals(TableLeftJoinRightDistinct(child, _, _)) => TableGetGlobals(child) case TableGetGlobals(TableMapRows(child, _)) => TableGetGlobals(child) case TableGetGlobals(TableMapGlobals(child, newGlobals)) => - val uid = genUID() - val ref = Ref(uid, child.typ.globalType) - Let(uid, TableGetGlobals(child), Subst(newGlobals, BindingEnv(Env.empty[IR].bind("global", ref)))) + bindIR(TableGetGlobals(child)) { ref => + Subst(newGlobals, BindingEnv(Env.empty[IR].bind("global", ref))) + } case TableGetGlobals(TableExplode(child, _)) => TableGetGlobals(child) case TableGetGlobals(TableUnion(children)) => TableGetGlobals(children.head) case TableGetGlobals(TableDistinct(child)) => TableGetGlobals(child) @@ -567,13 +604,12 @@ object Simplify { case TableGetGlobals(TableRename(child, _, globalMap)) => if (globalMap.isEmpty) TableGetGlobals(child) - else { - val uid = genUID() - val ref = Ref(uid, child.typ.globalType) - Let(uid, TableGetGlobals(child), MakeStruct(child.typ.globalType.fieldNames.map { f => - globalMap.getOrElse(f, f) -> GetField(ref, f) - })) - } + else + bindIR(TableGetGlobals(child)) { ref => + MakeStruct(child.typ.globalType.fieldNames.map { f => + globalMap.getOrElse(f, f) -> GetField(ref, f) + }) + } case TableCollect(TableParallelize(x, _)) => x case x@TableCollect(TableOrderBy(child, sortFields)) if sortFields.forall(_.sortOrder == Ascending) @@ -599,8 +635,7 @@ object Simplify { ApplyComparisonOp(LT(sortType), GetField(Ref(left, kvElement.typ), "key"), GetField(Ref(right, kvElement.typ), "key"))) - Let(uid, - TableCollect(TableKeyBy(child, FastSeq())), + Let(FastSeq(uid -> TableCollect(TableKeyBy(child, FastSeq()))), MakeStruct(FastSeq( ("rows", ToArray(StreamMap(ToStream(sorted), uid3, @@ -715,18 +750,22 @@ object Simplify { case TableFilter(TableParallelize(rowsAndGlobal, nPartitions), pred) => val newRowsAndGlobal = rowsAndGlobal match { case MakeStruct(Seq(("rows", rows), ("global", globalVal))) => - Let("global", globalVal, + Let(FastSeq("global" -> globalVal), MakeStruct(FastSeq( ("rows", ToArray(StreamFilter(ToStream(rows), "row", pred))), ("global", Ref("global", globalVal.typ))))) case _ => val uid = genUID() - Let(uid, rowsAndGlobal, - Let("global", GetField(Ref(uid, rowsAndGlobal.typ), "global"), - MakeStruct(FastSeq( - ("rows", ToArray(StreamFilter(ToStream(GetField(Ref(uid, rowsAndGlobal.typ), "rows")), "row", pred))), - ("global", Ref("global", rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("global"))) - )))) + Let( + FastSeq( + uid -> rowsAndGlobal, + "global" -> GetField(Ref(uid, rowsAndGlobal.typ), "global") + ), + MakeStruct(FastSeq( + "rows" -> ToArray(StreamFilter(ToStream(GetField(Ref(uid, rowsAndGlobal.typ), "rows")), "row", pred)), + "global" -> Ref("global", rowsAndGlobal.typ.asInstanceOf[TStruct].fieldType("global")) + )) + ) } TableParallelize(newRowsAndGlobal, nPartitions) @@ -755,7 +794,7 @@ object Simplify { TableRename(child, Map(renamedPairs: _*), Map.empty) case TableMapRows(TableMapRows(child, newRow1), newRow2) if !ContainsScan(newRow2) => - TableMapRows(child, Let("row", newRow1, newRow2)) + TableMapRows(child, Let(FastSeq("row" -> newRow1), newRow2)) case TableMapGlobals(child, Ref("global", _)) => child @@ -815,8 +854,9 @@ object Simplify { case TableRepartition(TableRange(nRows, _), nParts, _) => TableRange(nRows, nParts) case TableMapGlobals(TableMapGlobals(child, ng1), ng2) => - val uid = genUID() - TableMapGlobals(child, Let(uid, ng1, Subst(ng2, BindingEnv(Env("global" -> Ref(uid, ng1.typ)))))) + TableMapGlobals(child, bindIR(ng1) { uid => + Subst(ng2, BindingEnv(Env("global" -> uid))) + }) case TableHead(MatrixColsTable(child), n) if child.typ.colKey.isEmpty => if (n > Int.MaxValue) MatrixColsTable(child) else MatrixColsTable(MatrixColsHead(child, n.toInt)) @@ -976,10 +1016,10 @@ object Simplify { assert(child.typ == x.typ) child - case x@MatrixMapEntries(MatrixMapEntries(child, newEntries1), newEntries2) => - val uid = genUID() - val ne2 = Subst(newEntries2, BindingEnv(Env("g" -> Ref(uid, newEntries1.typ)))) - MatrixMapEntries(child, Let(uid, newEntries1, ne2)) + case MatrixMapEntries(MatrixMapEntries(child, newEntries1), newEntries2) => + MatrixMapEntries(child, bindIR(newEntries1) { uid => + Subst(newEntries2, BindingEnv(Env("g" -> uid))) + }) case MatrixMapGlobals(child, Ref("global", _)) => child @@ -1009,8 +1049,7 @@ object Simplify { case MatrixFilterEntries(MatrixFilterEntries(child, pred1), pred2) => MatrixFilterEntries(child, ApplySpecial("land", FastSeq(), FastSeq(pred1, pred2), TBoolean, ErrorIDs.NO_ERROR)) case MatrixMapGlobals(MatrixMapGlobals(child, ng1), ng2) => - val uid = genUID() - MatrixMapGlobals(child, Let(uid, ng1, Subst(ng2, BindingEnv(Env("global" -> Ref(uid, ng1.typ)))))) + MatrixMapGlobals(child, bindIR(ng1) { uid => Subst(ng2, BindingEnv(Env("global" -> uid))) }) // Note: the following MMR and MMC fusing rules are much weaker than they could be. If they contain aggregations // but those aggregations that mention "row" / "sa" but do not depend on the updated value, we should locally @@ -1020,19 +1059,25 @@ object Simplify { case a: ApplyAggOp => a.initOpArgs.exists(Mentions(_, "va")) // Lowering produces invalid IR case _ => false }) => - val uid = genUID() - MatrixMapRows(child, Let(uid, newRow1, - Subst(newRow2, BindingEnv[IR](Env(("va", Ref(uid, newRow1.typ))), + MatrixMapRows(child, bindIR(newRow1) { uid => + Subst(newRow2, BindingEnv[IR]( + Env("va" -> uid), agg = Some(Env.empty[IR]), - scan = Some(Env.empty[IR]))))) + scan = Some(Env.empty[IR]) + )) + }) case MatrixMapCols(MatrixMapCols(child, newCol1, nk1), newCol2, nk2) if !Mentions.inAggOrScan(newCol2, "sa") => - val uid = genUID() - MatrixMapCols(child, Let(uid, newCol1, - Subst(newCol2, BindingEnv[IR](Env(("sa", Ref(uid, newCol1.typ))), - agg = Some(Env.empty[IR]), - scan = Some(Env.empty[IR])))), - if (nk2.isDefined) nk2 else nk1) + MatrixMapCols(child, + bindIR(newCol1) { uid => + Subst(newCol2, BindingEnv[IR]( + Env("sa" -> uid), + agg = Some(Env.empty[IR]), + scan = Some(Env.empty[IR])) + ) + }, + nk2.orElse(nk1) + ) // bubble up MatrixColsHead node case MatrixColsHead(MatrixMapCols(child, newCol, newKey), n) => MatrixMapCols(MatrixColsHead(child, n), newCol, newKey) diff --git a/hail/src/main/scala/is/hail/expr/ir/TableIR.scala b/hail/src/main/scala/is/hail/expr/ir/TableIR.scala index fa1e40fbc08..b284f29344b 100644 --- a/hail/src/main/scala/is/hail/expr/ir/TableIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/TableIR.scala @@ -16,12 +16,12 @@ import is.hail.io.index.StagedIndexReader import is.hail.linalg.{BlockMatrix, BlockMatrixMetadata, BlockMatrixReadRowBlockedRDD} import is.hail.rvd._ import is.hail.sparkextras.ContextRDD +import is.hail.types._ import is.hail.types.physical._ import is.hail.types.physical.stypes._ import is.hail.types.physical.stypes.concrete._ import is.hail.types.physical.stypes.interfaces._ import is.hail.types.physical.stypes.primitives.{SInt64, SInt64Value} -import is.hail.types._ import is.hail.types.virtual._ import is.hail.utils._ import is.hail.utils.prettyPrint.ArrayOfByteArrayInputStream @@ -198,7 +198,7 @@ object LoweredTableReader { "token" -> invokeSeeded("rand_unif", 1, TFloat64, RNGStateLiteral(), F64(0.0), F64(1.0)), "prevkey" -> ApplyScanOp(FastSeq(), FastSeq(Ref("key", keyType)), prevkey)))), "x", - Let("n", ApplyAggOp(FastSeq(), FastSeq(), count), + Let(FastSeq("n" -> ApplyAggOp(FastSeq(), FastSeq(), count)), AggLet("key", GetField(Ref("x", xType), "key"), MakeStruct(FastSeq( "n" -> Ref("n", TInt64), @@ -273,7 +273,7 @@ object LoweredTableReader { val partDataElt = tcoerce[TArray](sortedPartDataIR.typ).elementType val summary = - Let("sortedPartData", sortedPartDataIR, + Let(FastSeq("sortedPartData" -> sortedPartDataIR), MakeStruct(FastSeq( "ksorted" -> invoke("land", TBoolean, @@ -2325,7 +2325,7 @@ case class TableLeftJoinRightDistinct(left: TableIR, right: TableIR, root: Strin lazy val childrenSeq: IndexedSeq[BaseIR] = Array(left, right) lazy val typ: TableType = left.typ.copy( - rowType = left.typ.rowType.structInsert(right.typ.valueType, List(root))) + rowType = left.typ.rowType.structInsert(right.typ.valueType, FastSeq(root))) override def partitionCounts: Option[IndexedSeq[Long]] = left.partitionCounts @@ -2539,7 +2539,7 @@ case class TableMapRows(child: TableIR, newRow: IR) extends TableIR { FastSeq(("global", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(tv.globals.t))), ("row", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(tv.rvd.rowPType)))), FastSeq(classInfo[Region], LongInfo, LongInfo), LongInfo, - Let(scanRef, extracted.results, + Let(FastSeq(scanRef -> extracted.results), Coalesce(FastSeq( extracted.postAggIR, Die("Internal error: TableMapRows: row expression missing", extracted.postAggIR.typ))))) @@ -2815,7 +2815,7 @@ case class TableExplode(child: TableIR, path: IndexedSeq[String]) extends TableI (if (i == refs.length - 1) ArrayRef(CastToArray(GetField(ref, field)), arg) else - Let(refs(i + 1).name, GetField(ref, field), arg)))) + Let(FastSeq(refs(i + 1).name -> GetField(ref, field)), arg)))) }.asInstanceOf[InsertFields] } @@ -3037,7 +3037,7 @@ case class TableKeyByAndAggregate( extracted.states, FastSeq(("global", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(prev.globals.t)))), FastSeq(classInfo[Region], LongInfo), LongInfo, - Let(res, extracted.results, extracted.postAggIR)) + Let(FastSeq(res -> extracted.results), extracted.postAggIR)) assert(rTyp.virtualType == typ.valueType, s"$rTyp, ${ typ.valueType }") val serialize = extracted.serialize(ctx, spec) @@ -3178,7 +3178,7 @@ case class TableAggregateByKey(child: TableIR, expr: IR) extends TableIR { FastSeq(classInfo[Region], LongInfo, LongInfo), UnitInfo, extracted.seqPerElt) - val valueIR = Let(res, extracted.results, extracted.postAggIR) + val valueIR = Let(FastSeq(res -> extracted.results), extracted.postAggIR) val keyType = prevRVD.typ.kType val key = Ref(genUID(), keyType.virtualType) @@ -3188,7 +3188,7 @@ case class TableAggregateByKey(child: TableIR, expr: IR) extends TableIR { FastSeq(("global", SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(prev.globals.t))), (key.name, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(keyType)))), FastSeq(classInfo[Region], LongInfo, LongInfo), LongInfo, - Let(value.name, valueIR, + Let(FastSeq(value.name -> valueIR), InsertFields(key, typ.valueType.fieldNames.map(n => n -> GetField(value, n))))) assert(rowType.virtualType == typ.rowType, s"$rowType, ${ typ.rowType }") diff --git a/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala b/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala index 582baeb089a..6afb42d8fb2 100644 --- a/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala +++ b/hail/src/main/scala/is/hail/expr/ir/TypeCheck.scala @@ -2,7 +2,6 @@ package is.hail.expr.ir import is.hail.backend.ExecuteContext import is.hail.expr.Nat -import is.hail.expr.ir.streams.StreamUtils import is.hail.types.tcoerce import is.hail.types.virtual._ import is.hail.utils.StackSafe._ @@ -42,7 +41,7 @@ object TypeCheck { } private def checkVoidTypedChild(ctx: ExecuteContext, ir: BaseIR, i: Int, env: BindingEnv[Type]): Unit = ir match { - case _: Let if i == 1 => + case l: Let if i == l.bindings.length => case _: StreamFor if i == 1 => case _: RunAggScan if (i == 1 || i == 2) => case _: StreamBufferedAggregate if (i == 1 || i == 3) => @@ -95,7 +94,7 @@ object TypeCheck { case Switch(x, default, cases) => assert(x.typ == TInt32) assert(cases.forall(_.typ == default.typ)) - case x@Let(_, _, body) => + case x@Let(_, body) => assert(x.typ == body.typ) case x@AggLet(_, _, body, _) => assert(x.typ == body.typ) diff --git a/hail/src/main/scala/is/hail/expr/ir/agg/Extract.scala b/hail/src/main/scala/is/hail/expr/ir/agg/Extract.scala index 84720692095..24f15e5880e 100644 --- a/hail/src/main/scala/is/hail/expr/ir/agg/Extract.scala +++ b/hail/src/main/scala/is/hail/expr/ir/agg/Extract.scala @@ -155,8 +155,8 @@ class Aggs(original: IR, rewriteMap: Memo[IR], bindingNodesReferenced: Memo[Unit // only support let nodes here -- other binders like stream operators are undefined behavior RewriteTopDown.rewriteTopDown(original, { case ir if RefEquality(ir) == rewriteRoot => - val Let(name, value, body) = ir - Let(name, value, f(rewriteMap.lookup(body))) + val Let(bindings, body) = ir + Let(bindings, f(rewriteMap.lookup(body))) }).asInstanceOf[IR] } } @@ -337,7 +337,7 @@ object Extract { def addLets(ir: IR, lets: Array[AggLet]): IR = { assert(lets.areDistinct()) - lets.foldRight[IR](ir) { case (al, comb) => Let(al.name, al.value, comb) } + Let(lets.map(al => al.name -> al.value), ir) } def getResultType(aggSig: AggSignature): Type = aggSig match { @@ -465,7 +465,7 @@ object Extract { val signature = PhysicalAggSig(op, foldStateSig) ab += InitOp(i, initOpArgs, signature) -> signature // So seqOp has to be able to reference accumName. - val seqWithLet = Let(accumName, ResultOp(i, signature), SeqOp(i, seqOpArgs, signature)) + val seqWithLet = Let(FastSeq(accumName -> ResultOp(i, signature)), SeqOp(i, seqOpArgs, signature)) seqBuilder += seqWithLet i }) @@ -531,29 +531,26 @@ object Extract { ab += InitOp(i, knownLength.map(FastSeq(_)).getOrElse(FastSeq[IR]()) :+ Begin(initOps), checkSig) -> checkSig seqBuilder += Let( - aRef.name, a, + FastSeq(aRef.name -> a), Begin(FastSeq( SeqOp(i, FastSeq(ArrayLen(aRef)), checkSig), StreamFor( StreamRange(I32(0), ArrayLen(aRef), I32(1)), indexName, Let( - elementName, - ArrayRef(aRef, Ref(indexName, TInt32)), + FastSeq(elementName -> ArrayRef(aRef, Ref(indexName, TInt32))), addLets(SeqOp(i, FastSeq(Ref(indexName, TInt32), Begin(newSeq.result().toFastSeq)), eltSig), dependent)))))) val rUID = Ref(genUID(), rt) Let( - rUID.name, - GetTupleElement(result, i), + FastSeq(rUID.name -> GetTupleElement(result, i)), ToArray(StreamMap( StreamRange(0, ArrayLen(rUID), 1), indexName, Let( - newRef.name, - ArrayRef(rUID, Ref(indexName, TInt32)), + FastSeq(newRef.name -> ArrayRef(rUID, Ref(indexName, TInt32))), transformed)))) case x: StreamAgg => diff --git a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala index 73812b9860a..9e66752dd19 100644 --- a/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala +++ b/hail/src/main/scala/is/hail/expr/ir/analyses/SemanticHash.scala @@ -30,7 +30,7 @@ case object SemanticHash extends Logging { // Running the algorithm on the name-normalised IR // removes sensitivity to compiler-generated names val nameNormalizedIR = ctx.timer.time("NormalizeNames") { - new NormalizeNames(_.toString, allowFreeVariables = true)(ctx, root) + new NormalizeNames(iruid(_), allowFreeVariables = true)(ctx, root) } val semhash = ctx.timer.time("Hash") { @@ -211,8 +211,12 @@ case object SemanticHash extends Logging { } case MatrixWrite(_, writer) => + buffer ++= Bytes.fromClass(writer.getClass) buffer ++= writer.path.getBytes + case MatrixMultiWrite(_, writer) => + buffer ++= writer.paths.flatMap(_.getBytes) + case NDArrayReindex(_, indices) => indices.foreach(buffer ++= Bytes.fromInt(_)) diff --git a/hail/src/main/scala/is/hail/expr/ir/functions/ArrayFunctions.scala b/hail/src/main/scala/is/hail/expr/ir/functions/ArrayFunctions.scala index b6ea3e46c50..0ebacf8c8b4 100644 --- a/hail/src/main/scala/is/hail/expr/ir/functions/ArrayFunctions.scala +++ b/hail/src/main/scala/is/hail/expr/ir/functions/ArrayFunctions.scala @@ -164,11 +164,12 @@ object ArrayFunctions extends RegistryFunctions { def ref(i: IR) = ArrayRef(a, i, errorID) def div(a: IR, b: IR): IR = ApplyBinaryPrimOp(BinaryOp.defaultDivideOp(t), a, b) - Let(a.name, ArraySort(StreamFilter(ToStream(array), v.name, !IsNA(v))), + Let( + FastSeq(a.name -> ArraySort(StreamFilter(ToStream(array), v.name, !IsNA(v)))), If(IsNA(a), NA(t), - Let(size.name, - ArrayLen(a), + Let( + FastSeq(size.name -> ArrayLen(a)), If(size.ceq(0), NA(t), If(invoke("mod", TInt32, size, 2).cne(0), @@ -187,22 +188,23 @@ object ArrayFunctions extends RegistryFunctions { def updateAccum(min: IR, midx: IR): IR = MakeStruct(FastSeq("m" -> min, "midx" -> midx)) - val body = - Let(value, ArrayRef(a, Ref(idx, TInt32), errorID), - Let(m, GetField(Ref(accum, tAccum), "m"), - If(IsNA(Ref(value, t)), - Ref(accum, tAccum), - If(IsNA(Ref(m, t)), - updateAccum(Ref(value, t), Ref(idx, TInt32)), - If(ApplyComparisonOp(op(t), Ref(value, t), Ref(m, t)), - updateAccum(Ref(value, t), Ref(idx, TInt32)), - Ref(accum, tAccum)))))) GetField(StreamFold( StreamRange(I32(0), ArrayLen(a), I32(1)), NA(tAccum), accum, idx, - body + Let( + FastSeq( + value -> ArrayRef(a, Ref(idx, TInt32), errorID), + m -> GetField(Ref(accum, tAccum), "m"), + ), + If(IsNA(Ref(value, t)), + Ref(accum, tAccum), + If(IsNA(Ref(m, t)), + updateAccum(Ref(value, t), Ref(idx, TInt32)), + If(ApplyComparisonOp(op(t), Ref(value, t), Ref(m, t)), + updateAccum(Ref(value, t), Ref(idx, TInt32)), + Ref(accum, tAccum))))) ), "midx") } @@ -222,9 +224,17 @@ object ArrayFunctions extends RegistryFunctions { def updateAccum(m: IR, midx: IR, count: IR): IR = MakeStruct(FastSeq("m" -> m, "midx" -> midx, "count" -> count)) - val body = - Let(value, ArrayRef(a, Ref(idx, TInt32), errorID), - Let(m, GetField(Ref(accum, tAccum), "m"), + val fold = + StreamFold( + StreamRange(I32(0), ArrayLen(a), I32(1)), + NA(tAccum), + accum, + idx, + Let( + FastSeq( + value -> ArrayRef(a, Ref(idx, TInt32), errorID), + m -> GetField(Ref(accum, tAccum), "m") + ), If(IsNA(Ref(value, t)), Ref(accum, tAccum), If(IsNA(Ref(m, t)), @@ -236,17 +246,13 @@ object ArrayFunctions extends RegistryFunctions { Ref(value, t), Ref(idx, TInt32), ApplyBinaryPrimOp(Add(), GetField(Ref(accum, tAccum), "count"), I32(1))), - Ref(accum, tAccum))))))) + Ref(accum, tAccum)))))) + ) - Let(result, StreamFold( - StreamRange(I32(0), ArrayLen(a), I32(1)), - NA(tAccum), - accum, - idx, - body - ), If(ApplyComparisonOp(EQ(TInt32), GetField(Ref(result, tAccum), "count"), I32(1)), - GetField(Ref(result, tAccum), "midx"), - NA(TInt32))) + Let(FastSeq(result -> fold), + If(ApplyComparisonOp(EQ(TInt32), GetField(Ref(result, tAccum), "count"), I32(1)), + GetField(Ref(result, tAccum), "midx"), + NA(TInt32))) } registerIR1("uniqueMinIndex", TArray(tv("T")), TInt32)((_, a, errorID) => uniqueIndex(a, LT(_), errorID)) diff --git a/hail/src/main/scala/is/hail/expr/ir/functions/DictFunctions.scala b/hail/src/main/scala/is/hail/expr/ir/functions/DictFunctions.scala index a06c10cd294..4dc8e257a76 100644 --- a/hail/src/main/scala/is/hail/expr/ir/functions/DictFunctions.scala +++ b/hail/src/main/scala/is/hail/expr/ir/functions/DictFunctions.scala @@ -11,8 +11,7 @@ object DictFunctions extends RegistryFunctions { If(IsNA(dict), NA(TBoolean), - Let(i.name, - LowerBoundOnOrderedCollection(dict, key, onKey = true), + Let(FastSeq(i.name -> LowerBoundOnOrderedCollection(dict, key, onKey = true)), If(i.ceq(ArrayLen(CastToArray(dict))), False(), ApplyComparisonOp( @@ -26,8 +25,7 @@ object DictFunctions extends RegistryFunctions { If(IsNA(dict), NA(default.typ), - Let(i.name, - LowerBoundOnOrderedCollection(dict, key, onKey=true), + Let(FastSeq(i.name -> LowerBoundOnOrderedCollection(dict, key, onKey=true)), If(i.ceq(ArrayLen(CastToArray(dict))), default, If(ApplyComparisonOp(EQWithNA(key.typ), GetField(ArrayRef(CastToArray(dict), i), "key"), key), diff --git a/hail/src/main/scala/is/hail/expr/ir/functions/SetFunctions.scala b/hail/src/main/scala/is/hail/expr/ir/functions/SetFunctions.scala index 5ad071382aa..90d8188d5c3 100644 --- a/hail/src/main/scala/is/hail/expr/ir/functions/SetFunctions.scala +++ b/hail/src/main/scala/is/hail/expr/ir/functions/SetFunctions.scala @@ -10,8 +10,7 @@ object SetFunctions extends RegistryFunctions { If(IsNA(set), NA(TBoolean), - Let(i.name, - LowerBoundOnOrderedCollection(set, elem, onKey = false), + Let(FastSeq(i.name -> LowerBoundOnOrderedCollection(set, elem, onKey = false)), If(i.ceq(ArrayLen(CastToArray(set))), False(), ApplyComparisonOp(EQWithNA(elem.typ), ArrayRef(CastToArray(set), i), elem)))) @@ -93,11 +92,10 @@ object SetFunctions extends RegistryFunctions { val len: IR = ArrayLen(a) def div(a: IR, b: IR): IR = ApplyBinaryPrimOp(BinaryOp.defaultDivideOp(t), a, b) - Let(a.name, CastToArray(s), + Let(FastSeq(a.name -> CastToArray(s)), If(IsNA(a), NA(t), - Let(size.name, - If(len.ceq(0), len, If(IsNA(ref(len - 1)), len - 1, len)), + Let(FastSeq(size.name -> If(len.ceq(0), len, If(IsNA(ref(len - 1)), len - 1, len))), If(size.ceq(0), NA(t), If(invoke("mod", TInt32, size, 2).cne(0), diff --git a/hail/src/main/scala/is/hail/expr/ir/functions/StringFunctions.scala b/hail/src/main/scala/is/hail/expr/ir/functions/StringFunctions.scala index b4da0f6a423..087d769b0d0 100644 --- a/hail/src/main/scala/is/hail/expr/ir/functions/StringFunctions.scala +++ b/hail/src/main/scala/is/hail/expr/ir/functions/StringFunctions.scala @@ -249,25 +249,32 @@ object StringFunctions extends RegistryFunctions { val len = Ref(genUID(), TInt32) val s = Ref(genUID(), TInt32) val e = Ref(genUID(), TInt32) - Let(len.name, invoke("length", TInt32, str), - Let(s.name, softBounds(start, len), - Let(e.name, softBounds(end, len), - invoke("substring", TString, str, s, If(e < s, s, e))))) + Let( + FastSeq( + len.name -> invoke("length", TInt32, str), + s.name -> softBounds(start, len), + e.name -> softBounds(end, len), + ), + invoke("substring", TString, str, s, If(e < s, s, e)) + ) } registerIR2("index", TString, TInt32, TString) { (_, s, i, errorID) => val len = Ref(genUID(), TInt32) val idx = Ref(genUID(), TInt32) - Let(len.name, invoke("length", TInt32, s), - Let(idx.name, - If((i < -len) || (i >= len), + Let( + FastSeq( + len.name -> invoke("length", TInt32, s), + idx.name -> If((i < -len) || (i >= len), Die(invoke("concat", TString, Str("string index out of bounds: "), invoke("concat", TString, invoke("str", TString, i), invoke("concat", TString, Str(" / "), invoke("str", TString, len)))), TInt32, errorID), - If(i < 0, i + len, i)), - invoke("substring", TString, s, idx, idx + 1))) + If(i < 0, i + len, i)) + ), + invoke("substring", TString, s, idx, idx + 1) + ) } registerIR2("sliceRight", TString, TInt32, TString) { (_, s, start, _) => invoke("slice", TString, s, start, invoke("length", TInt32, s)) } diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala index 5ea8fb96ef8..22313ba27b1 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerAndExecuteShuffles.scala @@ -48,12 +48,16 @@ object LowerAndExecuteShuffles { val insGlobName = genUID() def insGlob = Ref(insGlobName, ts.typ.globalType) - val partiallyAggregated = TableMapPartitions(ts, insGlob.name, streamName, Let("global", - GetField(insGlob, "oldGlobals"), - StreamBufferedAggregate(Ref(streamName, streamTyp), bindIR(GetField(insGlob, "__initState")) { states => - Begin(aggSigs.indices.map { aIdx => InitFromSerializedValue(aIdx, GetTupleElement(states, aIdx), aggSigs(aIdx).state) }) - }, newKey, seq, "row", aggSigs, bufferSize)), - 0, 0).noSharing(ctx) + val partiallyAggregated = + TableMapPartitions(ts, insGlob.name, streamName, + Let(FastSeq("global" -> GetField(insGlob, "oldGlobals")), + StreamBufferedAggregate(Ref(streamName, streamTyp), bindIR(GetField(insGlob, "__initState")) { states => + Begin(aggSigs.indices.map { aIdx => + InitFromSerializedValue(aIdx, GetTupleElement(states, aIdx), aggSigs(aIdx).state) + }) + }, newKey, seq, "row", aggSigs, bufferSize) + ), + 0, 0).noSharing(ctx) val analyses = LoweringAnalyses(partiallyAggregated, ctx) @@ -78,7 +82,7 @@ object LowerAndExecuteShuffles { val partStream = Ref(genUID(), TStream(shuffleRead.typ.rowType)) val tmp = TableMapPartitions(shuffleRead, insGlob.name, partStream.name, - Let("global", GetField(insGlob, "oldGlobals"), + Let(FastSeq("global" -> GetField(insGlob, "oldGlobals")), mapIR(StreamGroupByKey(partStream, newKeyType.fieldNames.toIndexedSeq, missingEqual = true)) { groupRef => RunAgg(Begin(FastSeq( bindIR(GetField(insGlob, "__initState")) { states => @@ -93,20 +97,23 @@ object LowerAndExecuteShuffles { }))) })), Let( - resultUID, - ResultOp.makeTuple(aggs.aggs), - Let(postAggUID, postAggIR, - Let(resultFromTakeUID, - result, { - val keyIRs: IndexedSeq[(String, IR)] = newKeyType.fieldNames.map(keyName => keyName -> GetField(ArrayRef(Ref(resultFromTakeUID, result.typ), 0), keyName)) - MakeStruct(keyIRs ++ expr.typ.asInstanceOf[TStruct].fieldNames.map { f => (f, GetField(Ref(postAggUID, postAggIR.typ), f)) - }) - } - ) - ) - ), + FastSeq( + resultUID -> ResultOp.makeTuple(aggs.aggs), + postAggUID -> postAggIR, + resultFromTakeUID -> result + ), { + val keyIRs: IndexedSeq[(String, IR)] = + newKeyType.fieldNames.map(keyName => keyName -> GetField(ArrayRef(Ref(resultFromTakeUID, result.typ), 0), keyName)) + + MakeStruct(keyIRs ++ expr.typ.asInstanceOf[TStruct].fieldNames.map { f => + (f, GetField(Ref(postAggUID, postAggIR.typ), f)) + }) + }), aggStateSigsPlusTake) - }), newKeyType.size, newKeyType.size - 1) + } + ), + newKeyType.size, newKeyType.size - 1 + ) Some(TableMapGlobals(tmp, GetField(Ref("global", insGlob.typ), "oldGlobals"))) case _ => None }) diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerBlockMatrixIR.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerBlockMatrixIR.scala index f6eccec3926..c6141dd21cb 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerBlockMatrixIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerBlockMatrixIR.scala @@ -39,9 +39,7 @@ abstract class BlockMatrixStage(val broadcastVals: IndexedSeq[Ref], val ctxType: val bcFields = broadcastVals.filter { ref => bodyFreeVars.eval.lookupOption(ref.name).isDefined } val bcVals = MakeStruct(bcFields.map { ref => ref.name -> ref }) val bcRef = Ref(genUID(), bcVals.typ) - val wrappedBody = bcFields.foldLeft(body) { case (accum, Ref(f, _)) => - Let(f, GetField(bcRef, f), accum) - } + val wrappedBody = Let(bcFields.map(ref => ref.name -> GetField(bcRef, ref.name)), body) CollectDistributedArray(ctxs, bcVals, ctxRef.name, bcRef.name, wrappedBody, dynamicID, staticID) } @@ -73,7 +71,7 @@ abstract class BlockMatrixStage(val broadcastVals: IndexedSeq[Ref], val ctxType: }) } - Let(blockResults.name, cda, NDArrayConcat(rows, 0)) + Let(FastSeq(blockResults.name -> cda), NDArrayConcat(rows, 0)) } def addContext(newTyp: Type)(newCtx: ((Int, Int)) => IR): BlockMatrixStage = { @@ -545,7 +543,7 @@ class BlockMatrixStage2 private ( if (ctx.name == ctxRefName) _blockIR else - Let(ctxRefName, ctx, _blockIR) + Let(FastSeq(ctxRefName -> ctx), _blockIR) } def ctxType: Type = contexts.elementType @@ -557,11 +555,12 @@ class BlockMatrixStage2 private ( override def blockContext(idx: (Int, Int)): IR = contexts(idx._1, idx._2) override def blockBody(ctxRef: Ref): IR = - Let(ctxRefName, ctxRef, _blockIR) + Let(FastSeq(ctxRefName -> ctxRef), _blockIR) } } - def getBlock(i: IR, j: IR): IR = Let(ctxRefName, contexts(i, j), _blockIR) + def getBlock(i: IR, j: IR): IR = + Let(FastSeq(ctxRefName -> contexts(i, j)), _blockIR) def getElement(i: IR, j: IR): IR = { assert(i.typ == TInt64) @@ -639,10 +638,8 @@ class BlockMatrixStage2 private ( } def mapBody(f: IR => IR): BlockMatrixStage2 = { - val blockRef = Ref(genUID(), _blockIR.typ) - val newBlockIR = Let(blockRef.name, _blockIR, f(blockRef)) + val newBlockIR = bindIR(_blockIR)(f) val newType = typ.copy(elementType = newBlockIR.typ.asInstanceOf[TNDArray].elementType) - new BlockMatrixStage2(broadcastVals, newType, contexts, ctxRefName, newBlockIR) } @@ -812,7 +809,7 @@ class BlockMatrixStage2 private ( val s = makestruct( "blockRow" -> GetTupleElement(newCtxRef, 0), "blockCol" -> GetTupleElement(newCtxRef, 1), - "block" -> Let(ctxRefName, GetTupleElement(newCtxRef, 2), _blockIR)) + "block" -> Let(FastSeq(ctxRefName -> GetTupleElement(newCtxRef, 2)), _blockIR)) MakeStream(FastSeq(s), TStream(s.typ)) } @@ -834,16 +831,19 @@ class BlockMatrixStage2 private ( ): IR = { val posRef = Ref(genUID(), TInt32) val newCtxRef = Ref(genUID(), TTuple(TInt32, ctxType)) - val body = Let(posRef.name, GetTupleElement(newCtxRef, 0), - Let(ctxRefName, GetTupleElement(newCtxRef, 1), - f(ctxRef, posRef, _blockIR))) + val body = Let( + FastSeq( + posRef.name -> GetTupleElement(newCtxRef, 0), + ctxRefName -> GetTupleElement(newCtxRef, 1), + ), + f(ctxRef, posRef, _blockIR) + ) + val bodyFreeVars = FreeVariables(body, supportsAgg = false, supportsScan = false) val bcFields = broadcastVals.filter { case Ref(f, _) => bodyFreeVars.eval.lookupOption(f).isDefined } val bcVals = MakeStruct(bcFields.map { ref => ref.name -> ref }) val bcRef = Ref(genUID(), bcVals.typ) - val wrappedBody = bcFields.foldLeft(body) { case (accum, Ref(f, _)) => - Let(f, GetField(bcRef, f), accum) - } + val wrappedBody = Let(bcFields.map(ref => ref.name -> GetField(bcRef, ref.name)), body) val cdaContexts = ToStream(contexts.map(ib) { (rowIdx, colIdx, pos, oldContext) => maketuple(pos, oldContext) diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala index f0d168dc829..8d1b801d2f6 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIR.scala @@ -49,9 +49,6 @@ object TableStage { new TableStage(letBindings, broadcastVals, globals, partitioner, dependency, contexts, ctxRef.name, partition(ctxRef)) } - def wrapInBindings(body: IR, letBindings: IndexedSeq[(String, IR)]): IR = letBindings.foldRight[IR](body) { - case ((name, value), body) => Let(name, value, body) - } def concatenate(ctx: ExecuteContext, children: IndexedSeq[TableStage]): TableStage = { val keyType = children.head.kType @@ -167,7 +164,7 @@ class TableStage( def partition(ctx: IR): IR = { require(ctx.typ == ctxType) - Let(ctxRefName, ctx, partitionIR) + Let(FastSeq(ctxRefName -> ctx), partitionIR) } def numPartitions: Int = partitioner.numPartitions @@ -260,11 +257,11 @@ class TableStage( val cda = CollectDistributedArray( contexts, broadcastRefs, ctxRefName, glob.name, - broadcastVals.foldLeft(mapF(partitionIR, Ref(ctxRefName, ctxType))) { case (accum, (name, _)) => - Let(name, GetField(glob, name), accum) - }, dynamicID, staticID, Some(dependency)) + Let(broadcastVals.map { case (name, _) => name -> GetField(glob, name) }, + mapF(partitionIR, Ref(ctxRefName, ctxType)) + ), dynamicID, staticID, Some(dependency)) - TableStage.wrapInBindings(bindIR(cda) { cdaRef => body(cdaRef, globals) }, letBindings) + Let(letBindings, bindIR(cda) { cdaRef => body(cdaRef, globals) }) } def collectWithGlobals(staticID: String, dynamicID: IR = NA(TString)): IR = @@ -276,9 +273,11 @@ class TableStage( def countPerPartition(): IR = mapCollect("count_per_partition")(part => Cast(StreamLen(part), TInt64)) - def getGlobals(): IR = TableStage.wrapInBindings(globals, letBindings) + def getGlobals(): IR = + Let(letBindings, globals) - def getNumPartitions(): IR = TableStage.wrapInBindings(StreamLen(contexts), letBindings) + def getNumPartitions(): IR = + Let(letBindings, StreamLen(contexts)) def changePartitionerNoRepartition(newPartitioner: RVDPartitioner): TableStage = { require(partitioner.numPartitions == newPartitioner.numPartitions) @@ -349,9 +348,7 @@ class TableStage( val prevContextUID = genUID() val mappingUID = genUID() val idxUID = genUID() - val newContexts = Let( - prevContextUID, - ToArray(contexts), + val newContexts = Let(FastSeq(prevContextUID -> ToArray(contexts)), StreamMap( ToStream( Literal( @@ -660,7 +657,7 @@ object LowerTableIR { val lc = lower(child) - val initState = Let("global", lc.globals, + val initState = Let(FastSeq("global" -> lc.globals), RunAgg( aggs.init, MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), @@ -687,7 +684,7 @@ object LowerTableIR { val writer = ETypeValueWriter(codecSpec) val reader = ETypeValueReader(codecSpec) lcWithInitBinding.mapCollectWithGlobals("table_aggregate")({ part: IR => - Let("global", lc.globals, + Let(FastSeq("global" -> lc.globals), RunAgg( Begin(FastSeq( initFromSerializedStates, @@ -756,9 +753,7 @@ object LowerTableIR { )) { finalParts => RunAgg( combineGroup(finalParts, true), - Let("global", globals, - Let(resultUID, results, - aggs.postAggIR)), + Let(FastSeq("global" -> globals, resultUID -> results), aggs.postAggIR), aggs.states ) } @@ -766,7 +761,7 @@ object LowerTableIR { } else { lcWithInitBinding.mapCollectWithGlobals("table_aggregate_singlestage")({ part: IR => - Let("global", lc.globals, + Let(FastSeq("global" -> lc.globals), RunAgg( Begin(FastSeq( initFromSerializedStates, @@ -779,8 +774,7 @@ object LowerTableIR { aggs.states )) }) { case (collected, globals) => - Let("global", - globals, + Let(FastSeq("global" -> globals), RunAgg( Begin(FastSeq( initFromSerializedStates, @@ -788,12 +782,10 @@ object LowerTableIR { Begin(aggs.aggs.zipWithIndex.map { case (sig, i) => CombOpValue(i, GetTupleElement(state, i), sig) }) } )), - Let( - resultUID, - results, - aggs.postAggIR), + Let(FastSeq(resultUID -> results), aggs.postAggIR), aggs.states - )) + ) + ) } } @@ -891,7 +883,7 @@ object LowerTableIR { } }, body = in => lowerIR { - val rows = Let(cname, GetTupleElement(in, 2), Let(gname, loweredGlobals, body)) + val rows = Let(FastSeq(cname -> GetTupleElement(in, 2), gname -> loweredGlobals), body) if (partitioner.kType.fields.isEmpty) rows else bindIR(GetTupleElement(in, 1)) { interval => mapIR(rows) { row => @@ -931,7 +923,7 @@ object LowerTableIR { }) case TableMapGlobals(child, newGlobals) => - lower(child).mapGlobals(old => Let("global", old, newGlobals)) + lower(child).mapGlobals(old => Let(FastSeq("global" -> old), newGlobals)) case TableAggregateByKey(child, expr) => val loweredChild = lower(child) @@ -939,7 +931,7 @@ object LowerTableIR { loweredChild.repartitionNoShuffle(ctx, loweredChild.partitioner.coarsen(child.typ.key.length).strictify()) .mapPartition(Some(child.typ.key)) { partition => - Let("global", loweredChild.globals, + Let(FastSeq("global" -> loweredChild.globals), mapIR(StreamGroupByKey(partition, child.typ.key, missingEqual = true)) { groupRef => StreamAgg( groupRef, @@ -971,7 +963,7 @@ object LowerTableIR { case TableFilter(child, cond) => val loweredChild = lower(child) loweredChild.mapPartition(None) { rows => - Let("global", loweredChild.globals, + Let(FastSeq("global" -> loweredChild.globals), StreamFilter(rows, "row", cond)) } @@ -1146,8 +1138,7 @@ object LowerTableIR { } } - val letBindNewCtx = TableStage.wrapInBindings(newCtxs, loweredChild.letBindings) - val bindRelationLetsNewCtx = ToArray(letBindNewCtx) + val bindRelationLetsNewCtx = Let(loweredChild.letBindings, ToArray(newCtxs)) val newCtxSeq = CompileAndEvaluate(ctx, bindRelationLetsNewCtx).asInstanceOf[IndexedSeq[Any]] val numNewParts = newCtxSeq.length val newIntervals = loweredChild.partitioner.rangeBounds.slice(0, numNewParts) @@ -1252,7 +1243,7 @@ object LowerTableIR { } } - val letBindNewCtx = ToArray(TableStage.wrapInBindings(newCtxs, loweredChild.letBindings)) + val letBindNewCtx = Let(loweredChild.letBindings, ToArray(newCtxs)) val newCtxSeq = CompileAndEvaluate(ctx, letBindNewCtx).asInstanceOf[IndexedSeq[Any]] val numNewParts = newCtxSeq.length val oldParts = loweredChild.partitioner.rangeBounds @@ -1273,8 +1264,12 @@ object LowerTableIR { val lc = lower(child) if (!ContainsScan(newRow)) { lc.mapPartition(Some(child.typ.key)) { rows => - Let("global", lc.globals, - mapIR(rows)(row => Let("row", row, newRow))) + Let( + FastSeq("global" -> lc.globals), + mapIR(rows) { row => + Let(FastSeq("row" -> row), newRow) + } + ) } } else { val resultUID = genUID() @@ -1282,7 +1277,7 @@ object LowerTableIR { val results: IR = ResultOp.makeTuple(aggs.aggs) val initState = RunAgg( - Let("global", lc.globals, aggs.init), + Let(FastSeq("global" -> lc.globals), aggs.init), MakeTuple.ordered(aggs.aggs.zipWithIndex.map { case (sig, i) => AggStateValue(i, sig.state) }), aggs.states ) @@ -1303,7 +1298,7 @@ object LowerTableIR { val writer = ETypeValueWriter(codecSpec) val reader = ETypeValueReader(codecSpec) val partitionPrefixSumFiles = lcWithInitBinding.mapCollectWithGlobals("table_scan_write_prefix_sums")({ part: IR => - Let("global", lcWithInitBinding.globals, + Let(FastSeq("global" -> lcWithInitBinding.globals), RunAgg( Begin(FastSeq( initFromSerializedStates, @@ -1449,7 +1444,7 @@ object LowerTableIR { } else { val partitionAggs = lcWithInitBinding.mapCollectWithGlobals("table_scan_prefix_sums_singlestage")({ part: IR => - Let("global", lc.globals, + Let(FastSeq("global" -> lc.globals), RunAgg( Begin(FastSeq( initFromSerializedStates, @@ -1462,8 +1457,7 @@ object LowerTableIR { aggs.states )) }) { case (collected, globals) => - Let("global", - globals, + Let(FastSeq("global" -> globals), ToArray(StreamTake({ val acc = Ref(genUID(), initStateRef.typ) val value = Ref(genUID(), collected.typ.asInstanceOf[TArray].elementType) @@ -1505,8 +1499,7 @@ object LowerTableIR { partition = { (partitionRef: Ref) => bindIRs(GetField(partitionRef, "oldContext"), GetField(partitionRef, "scanState")) { case Seq(oldContext, rawPrefixSum) => bindIR(transformPrefixSum(rawPrefixSum)) { scanState => - - Let("global", lc.globals, + Let(FastSeq("global" -> lc.globals), RunAggScan( lc.partition(oldContext), "row", @@ -1514,10 +1507,7 @@ object LowerTableIR { InitFromSerializedValue(i, GetTupleElement(scanState, i), agg.state) }), aggs.seqPerElt, - Let( - resultUID, - results, - aggs.postAggIR), + Let(FastSeq(resultUID -> results), aggs.postAggIR), aggs.states ) ) @@ -1672,12 +1662,12 @@ object LowerTableIR { refs(i + 1) = Ref(genUID(), roots(i).typ) i += 1 } - refs.tail.zip(roots).foldRight( + Let(refs.tail.zip(roots).map { case (ref, root) => ref.name -> root }, mapIR(ToStream(refs.last, true)) { elt => path.zip(refs.init).foldRight[IR](elt) { case ((p, ref), inserted) => InsertFields(ref, FastSeq(p -> inserted)) } - }) { case ((ref, root), accum) => Let(ref.name, root, accum) } + }) } } @@ -1720,7 +1710,7 @@ object LowerTableIR { val loweredChild = lower(child).strictify(ctx, allowedOverlap) loweredChild.mapPartition(Some(child.typ.key)) { part => - Let(globalName, loweredChild.globals, Let(partitionStreamName, part, body)) + Let(FastSeq(globalName -> loweredChild.globals, partitionStreamName -> part), body) } case TableLiteral(typ, rvd, enc, encodedGlobals) => diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIRHelpers.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIRHelpers.scala index c2b79972fc4..a2751ee4312 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIRHelpers.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LowerTableIRHelpers.scala @@ -22,9 +22,9 @@ object LowerTableIRHelpers { loweredRight, joinKey, joinType, (lGlobals, rGlobals) => { val rGlobalType = rGlobals.typ.asInstanceOf[TStruct] - val rGlobalRef = Ref(genUID(), rGlobalType) - Let(rGlobalRef.name, rGlobals, - InsertFields(lGlobals, rGlobalType.fieldNames.map(f => f -> GetField(rGlobalRef, f)))) + bindIR(rGlobals) { rGlobalRef => + InsertFields(lGlobals, rGlobalType.fieldNames.map(f => f -> GetField(rGlobalRef, f))) + } }, (lEltRef, rEltRef) => { MakeStruct( diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala index 351ca79574a..ad84438972c 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/LoweringPass.scala @@ -116,17 +116,18 @@ case object LowerArrayAggsToRunAggsPass extends LoweringPass { val newNode = aggs.rewriteFromInitBindingRoot { root => Let( - res, - RunAgg( - Begin(FastSeq( - aggs.init, - StreamFor( - a, - name, - aggs.seqPerElt))), - aggs.results, - aggs.states), - root) + FastSeq( + res -> RunAgg( + Begin(FastSeq( + aggs.init, + StreamFor(a, name, aggs.seqPerElt) + )), + aggs.results, + aggs.states + ) + ), + root + ) } if (newNode.typ != x.typ) @@ -141,7 +142,7 @@ case object LowerArrayAggsToRunAggsPass extends LoweringPass { name, aggs.init, aggs.seqPerElt, - Let(res, aggs.results, root), + Let(FastSeq(res -> aggs.results), root), aggs.states ) } diff --git a/hail/src/main/scala/is/hail/expr/ir/lowering/RVDToTableStage.scala b/hail/src/main/scala/is/hail/expr/ir/lowering/RVDToTableStage.scala index 53067c69100..2686b497313 100644 --- a/hail/src/main/scala/is/hail/expr/ir/lowering/RVDToTableStage.scala +++ b/hail/src/main/scala/is/hail/expr/ir/lowering/RVDToTableStage.scala @@ -101,13 +101,12 @@ object TableStageToRVD { .asSpark("TableStageToRVD") .sc - val baseStruct = MakeStruct(FastSeq( - ("globals", ts.globals), - ("broadcastVals", MakeStruct(ts.broadcastVals)), - ("contexts", ToArray(ts.contexts)))) - val globalsAndBroadcastVals = ts.letBindings.foldRight[IR](baseStruct) { case ((name, value), acc) => - Let(name, value, acc) - } + val globalsAndBroadcastVals = + Let(ts.letBindings, MakeStruct(FastSeq( + "globals" -> ts.globals, + "broadcastVals" -> MakeStruct(ts.broadcastVals), + "contexts" -> ToArray(ts.contexts)) + )) val (Some(PTypeReferenceSingleCodeType(gbPType: PStruct)), f) = Compile[AsmFunction1RegionLong](ctx, FastSeq(), FastSeq(classInfo[Region]), LongInfo, globalsAndBroadcastVals) val gbAddr = f(ctx.theHailClassLoader, ctx.fs, ctx.taskContext, ctx.r)(ctx.r) @@ -142,9 +141,11 @@ object TableStageToRVD { val (newRowPType: PStruct, makeIterator) = CompileIterator.forTableStageToRVD( ctx, decodedContextPType, decodedBcValsPType, - ts.broadcastVals.map(_._1).foldRight[IR](ts.partition(In(0, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(decodedContextPType))))) { case (bcVal, acc) => - Let(bcVal, GetField(In(1, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(decodedBcValsPType))), bcVal), acc) - }) + Let( + ts.broadcastVals.map(_._1).map(bcVal => bcVal -> GetField(In(1, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(decodedBcValsPType))), bcVal)), + ts.partition(In(0, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(decodedContextPType)))) + ) + ) val fsBc = ctx.fsBc diff --git a/hail/src/main/scala/is/hail/expr/ir/package.scala b/hail/src/main/scala/is/hail/expr/ir/package.scala index 4e25e2cc232..398a15a94fd 100644 --- a/hail/src/main/scala/is/hail/expr/ir/package.scala +++ b/hail/src/main/scala/is/hail/expr/ir/package.scala @@ -17,25 +17,24 @@ package object ir { var uidCounter: Long = 0 def genUID(): String = { - val uid = s"__iruid_$uidCounter" + val uid = iruid(uidCounter) uidCounter += 1 uid } + def iruid(i: Long): String = + s"__iruid_$i" + def uuid4(): String = UUID.randomUUID().toString def genSym(base: String): Sym = Sym.gen(base) // Build consistent expression for a filter-condition with keep polarity, // using Let to manage missing-ness. - def filterPredicateWithKeep(irPred: ir.IR, keep: Boolean): ir.IR = { - val pred = genUID() - ir.Let(pred, - if (keep) irPred else ir.ApplyUnaryPrimOp(ir.Bang, irPred), - ir.If(ir.IsNA(ir.Ref(pred, TBoolean)), - ir.False(), - ir.Ref(pred, TBoolean))) - } + def filterPredicateWithKeep(irPred: ir.IR, keep: Boolean): ir.IR = + bindIR(if (keep) irPred else ir.ApplyUnaryPrimOp(ir.Bang, irPred)) { pred => + ir.If(ir.IsNA(pred), ir.False(), pred) + } def invoke(name: String, rt: Type, typeArgs: Seq[Type], errorID: Int, args: IR*): IR = IRFunctionRegistry.lookupUnseeded(name, rt, typeArgs, args.map(_.typ)) match { @@ -78,15 +77,12 @@ package object ir { } def bindIRs(values: IR*)(body: Seq[Ref] => IR): IR = { - val valuesArray = values.toArray - val refs = values.map(v => Ref(genUID(), v.typ)) - values.indices.foldLeft(body(refs)) { case (acc, i) => Let(refs(i).name, valuesArray(i), acc) } + val bindings = values.toFastSeq.map(genUID() -> _) + Let(bindings, body(bindings.map(b => Ref(b._1, b._2.typ)))) } - def bindIR(v: IR)(body: Ref => IR): IR = { - val ref = Ref(genUID(), v.typ) - Let(ref.name, v, body(ref)) - } + def bindIR(v: IR)(body: Ref => IR): IR = + bindIRs(v) { case Seq(ref) => body(ref) } def iota(start: IR, step: IR): IR = StreamIota(start, step) diff --git a/hail/src/main/scala/is/hail/expr/ir/streams/EmitStream.scala b/hail/src/main/scala/is/hail/expr/ir/streams/EmitStream.scala index 1e9704faff6..28034169641 100644 --- a/hail/src/main/scala/is/hail/expr/ir/streams/EmitStream.scala +++ b/hail/src/main/scala/is/hail/expr/ir/streams/EmitStream.scala @@ -304,10 +304,16 @@ object EmitStream { SStreamValue(producer) } - case Let(name, value, body) => - cb.withScopedMaybeStreamValue(EmitCode.fromI(cb.emb)(cb => emit(value, cb)), s"let_$name") { ev => - produce(body, cb, env = env.bind(name, ev)) + case Let(bindings, body) => + def go(env: EmitEnv): IndexedSeq[(String, IR)] => IEmitCode = { + case (name, value) +: rest => + cb.withScopedMaybeStreamValue(EmitCode.fromI(cb.emb)(cb => emit(value, cb, env = env)), s"let_$name") { ev => + go(env.bind(name, ev))(rest) + } + case Seq() => + produce(body, cb, env = env) } + go(env)(bindings) case In(n, _) => // this, Code[Region], ... diff --git a/hail/src/main/scala/is/hail/io/bgen/BgenSettings.scala b/hail/src/main/scala/is/hail/io/bgen/BgenSettings.scala index f14fad043e6..327a3dc1965 100644 --- a/hail/src/main/scala/is/hail/io/bgen/BgenSettings.scala +++ b/hail/src/main/scala/is/hail/io/bgen/BgenSettings.scala @@ -91,7 +91,7 @@ case class BgenSettings( require(PruneDeadFields.isSupertype(requestedType, MatrixBGENReader.fullMatrixType(rg).canonicalTableType)) val entryType: Option[TStruct] = requestedType.rowType - .fieldOption(MatrixType.entriesIdentifier) + .selfField(MatrixType.entriesIdentifier) .map(f => f.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct]) val rowPType: PCanonicalStruct = PCanonicalStruct(required = true, diff --git a/hail/src/main/scala/is/hail/io/bgen/LoadBgen.scala b/hail/src/main/scala/is/hail/io/bgen/LoadBgen.scala index 3194ebfacac..825fe702a4e 100644 --- a/hail/src/main/scala/is/hail/io/bgen/LoadBgen.scala +++ b/hail/src/main/scala/is/hail/io/bgen/LoadBgen.scala @@ -479,7 +479,7 @@ class MatrixBGENReader( VirtualTypeWithReq(PType.canonical(requestedType.globalType, required = true)) override def lowerGlobals(ctx: ExecuteContext, requestedGlobalType: TStruct): IR = { - requestedGlobalType.fieldOption(LowerMatrixIR.colsFieldName) match { + requestedGlobalType.selfField(LowerMatrixIR.colsFieldName) match { case Some(f) => val ta = f.typ.asInstanceOf[TArray] MakeStruct(FastSeq((LowerMatrixIR.colsFieldName, { @@ -677,7 +677,9 @@ case class BgenPartitionReaderWithVariantFilter(fileMetadata: Array[BgenFileMeta val bound = SStackStruct.constructFromArgs(cb, vs.elementRegion, TTuple(nextVariant.st.virtualType, TInt32), EmitValue.present(if (nextVariant.st.size == 1) nextVariant.insert(cb, elementRegion, - nextVariant.st.virtualType.insert(TArray(TString), "alleles")._1.asInstanceOf[TStruct], ("alleles", EmitValue.missing(SJavaArrayString(true)))) + nextVariant.st.virtualType.asInstanceOf[TStruct].structInsert(TArray(TString), FastSeq("alleles")), + ("alleles", EmitValue.missing(SJavaArrayString(true))) + ) else nextVariant), EmitValue.present(primitive(const(nextVariant.st.size))) diff --git a/hail/src/main/scala/is/hail/io/bgen/StagedBGENReader.scala b/hail/src/main/scala/is/hail/io/bgen/StagedBGENReader.scala index b1fc7e1e9d8..27f73478732 100644 --- a/hail/src/main/scala/is/hail/io/bgen/StagedBGENReader.scala +++ b/hail/src/main/scala/is/hail/io/bgen/StagedBGENReader.scala @@ -193,7 +193,7 @@ object StagedBGENReader { structFieldCodes += EmitCode.present(cb.emb, primitive(fileIdx)) cb.assign(dataSize, cbfis.invoke[Int]("readInt")) - requestedType.fieldOption(LowerMatrixIR.entriesFieldName) match { + requestedType.selfField(LowerMatrixIR.entriesFieldName) match { case None => cb += Code.toUnit(cbfis.invoke[Long, Long]("skipBytes", dataSize.toL)) case Some(t) => diff --git a/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala b/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala index ee8112b534c..4324f15249c 100644 --- a/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala +++ b/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala @@ -1212,11 +1212,11 @@ class ParseLineContext( val fileNum: Int, val entriesName: String ) { - val entryType: TStruct = rowType.fieldOption(entriesName) match { + val entryType: TStruct = rowType.selfField(entriesName) match { case Some(entriesArray) => entriesArray.typ.asInstanceOf[TArray].elementType.asInstanceOf[TStruct] case None => TStruct.empty } - val infoSignature = rowType.fieldOption("info").map(_.typ.asInstanceOf[TStruct]).orNull + val infoSignature = rowType.selfField("info").map(_.typ.asInstanceOf[TStruct]).orNull val hasQual = rowType.hasField("qual") val hasFilters = rowType.hasField("filters") val hasEntryFields = entryType.size > 0 diff --git a/hail/src/main/scala/is/hail/methods/PCA.scala b/hail/src/main/scala/is/hail/methods/PCA.scala index 97961b09ac1..5db87c9aff2 100644 --- a/hail/src/main/scala/is/hail/methods/PCA.scala +++ b/hail/src/main/scala/is/hail/methods/PCA.scala @@ -90,9 +90,9 @@ case class PCA(entryField: String, k: Int, computeLoadings: Boolean) extends Mat ContextRDD.empty() val rvd = RVD.coerce(ctx, RVDType(rowType, mv.typ.rowKey), crdd) - val (t1, f1) = mv.typ.globalType.insert(TArray(TFloat64), "eigenvalues") - val (globalScoreType, f3) = mv.typ.colKeyStruct.insert(TArray(TFloat64), "scores") - val (newGlobalType, f2) = t1.insert(TArray(globalScoreType), "scores") + val (t1, f1) = mv.typ.globalType.insert(TArray(TFloat64), FastSeq("eigenvalues")) + val (globalScoreType, f3) = mv.typ.colKeyStruct.insert(TArray(TFloat64), FastSeq("scores")) + val (newGlobalType, f2) = t1.insert(TArray(globalScoreType), FastSeq("scores")) val data = if (!svd.V.isTransposed) diff --git a/hail/src/main/scala/is/hail/types/physical/PType.scala b/hail/src/main/scala/is/hail/types/physical/PType.scala index c5fa249c2fe..2ecabd1d1e5 100644 --- a/hail/src/main/scala/is/hail/types/physical/PType.scala +++ b/hail/src/main/scala/is/hail/types/physical/PType.scala @@ -418,7 +418,7 @@ abstract class PType extends Serializable with Requiredness { case x@PCanonicalStruct(fields, r) => val ts = t.asInstanceOf[TStruct] assert(ts.fieldNames.forall(x.fieldNames.contains)) - PCanonicalStruct(r, fields.flatMap { pf => ts.fieldOption(pf.name).map { vf => (pf.name, pf.typ.subsetTo(vf.typ)) } }: _*) + PCanonicalStruct(r, fields.flatMap { pf => ts.selfField(pf.name).map { vf => (pf.name, pf.typ.subsetTo(vf.typ)) } }: _*) case PCanonicalTuple(fields, r) => val tt = t.asInstanceOf[TTuple] PCanonicalTuple(fields.flatMap { pf => tt.fieldIndex.get(pf.index).map(vi => PTupleField(pf.index, pf.typ.subsetTo(tt.types(vi)))) }, r) diff --git a/hail/src/main/scala/is/hail/types/virtual/TStruct.scala b/hail/src/main/scala/is/hail/types/virtual/TStruct.scala index a104c2c399d..d88a97eaa0f 100644 --- a/hail/src/main/scala/is/hail/types/virtual/TStruct.scala +++ b/hail/src/main/scala/is/hail/types/virtual/TStruct.scala @@ -1,15 +1,15 @@ package is.hail.types.virtual -import is.hail.annotations.{Annotation, AnnotationPathException, _} +import is.hail.annotations._ import is.hail.backend.HailStateManager import is.hail.expr.ir.{Env, IRParser, IntArrayBuilder} -import is.hail.types.physical.{PField, PStruct} import is.hail.utils._ import org.apache.spark.sql.Row import org.json4s.CustomSerializer import org.json4s.JsonAST.JString import scala.collection.JavaConverters._ +import scala.reflect.ClassTag class TStructSerializer extends CustomSerializer[TStruct](format => ( { case JString(s) => IRParser.parseStructType(s) }, @@ -82,15 +82,14 @@ final case class TStruct(fields: IndexedSeq[Field]) extends TBaseStruct { def fieldType(name: String): Type = types(fieldIdx(name)) - override def fieldOption(path: List[String]): Option[Field] = - if (path.isEmpty) - None - else { - val f = selfField(path.head) - if (path.length == 1) - f - else - f.flatMap(_.typ.fieldOption(path.tail)) + def fieldOption(path: IndexedSeq[String]): Option[Field] = + if (path.isEmpty) None + else (1 until path.length).foldLeft(selfField(path.head)) { + case (Some(f), i) => f.typ match { + case s: TStruct => s.selfField(path(i)) + case _ => return None + } + case _ => return None } override def queryTyped(p: List[String]): (Type, Querier) = { @@ -111,43 +110,69 @@ final case class TStruct(fields: IndexedSeq[Field]) extends TBaseStruct { } } - override def insert(signature: Type, p: List[String]): (Type, Inserter) = { - if (p.isEmpty) - (signature, (a, toIns) => toIns) - else { - val key = p.head - val f = selfField(key) - val keyIndex = f.map(_.index) - val (newKeyType, keyF) = f - .map(_.typ) - .getOrElse(TStruct.empty) - .insert(signature, p.tail) - - val newSignature = keyIndex match { - case Some(i) => updateKey(key, i, newKeyType) - case None => appendKey(key, newKeyType) - } + def insert(signature: Type, path: IndexedSeq[String]): (TStruct, Inserter) = { + if (path.isEmpty) + throw new IllegalArgumentException(s"Empty path to new field of type '$signature'.") - val localSize = fields.size + val missing: Annotation = + null.asInstanceOf[Annotation] - val inserter: Inserter = (a, toIns) => { - val r = if (a == null || localSize == 0) // localsize == 0 catches cases where we overwrite a path - Row.fromSeq(Array.fill[Any](localSize)(null)) - else - a.asInstanceOf[Row] - keyIndex match { - case Some(i) => r.update(i, keyF(r.get(i), toIns)) - case None => r.append(keyF(null, toIns)) - } + + def updateField(typ: TStruct, idx: Int)(f: Inserter)(a: Annotation, v: Any): Annotation = + a match { + case r: Row => + r.update(idx, f(r.get(idx), v)) + case _ => + val arr = new Array[Any](typ.size) + arr.update(idx, f(missing, v)) + Row.fromSeq(arr) + } + + def addField(typ: TStruct)(f: Inserter)(a: Annotation, v: Any): Annotation = { + val arr = new Array[Any](typ.size + 1) + a match { + case r: Row => + for (i <- 0 until typ.size) { + arr.update(i, r.get(i)) + } + case _ => } - (newSignature, inserter) + arr(typ.size) = f(missing, v) + Row.fromSeq(arr) } + + val (newType, inserter) = + path + .view + .scanLeft((this, identity[Type] _, identity[Inserter] _)) { + case ((parent, _, _), name) => + parent.selfField(name) match { + case Some(Field(name, t, idx)) => + ( + t match { case s: TStruct => s case _ => TStruct.empty }, + typ => parent.updateKey(name, idx, typ), + updateField(parent, idx) + ) + case None => + ( + TStruct.empty, + typ => parent.appendKey(name, typ), + addField(parent) + ) + } + } + .foldRight((signature, ((_, toIns) => toIns): Inserter)) { + case ((_, insertField, transform), (newType, inserter)) => + (insertField(newType), transform(inserter)) + } + + (newType.asInstanceOf[TStruct], inserter) } - def structInsert(signature: Type, p: List[String]): TStruct = { + def structInsert(signature: Type, p: IndexedSeq[String]): TStruct = { require(p.nonEmpty || signature.isInstanceOf[TStruct], s"tried to remap top-level struct to non-struct $signature") - val (t, f) = insert(signature, p) - t.asInstanceOf[TStruct] + val (t, _) = insert(signature, p) + t } def updateKey(key: String, i: Int, sig: Type): TStruct = { diff --git a/hail/src/main/scala/is/hail/types/virtual/Type.scala b/hail/src/main/scala/is/hail/types/virtual/Type.scala index 2cac80b8d61..285b1c0d92e 100644 --- a/hail/src/main/scala/is/hail/types/virtual/Type.scala +++ b/hail/src/main/scala/is/hail/types/virtual/Type.scala @@ -1,12 +1,11 @@ package is.hail.types.virtual import is.hail.annotations._ -import is.hail.asm4s._ import is.hail.backend.HailStateManager import is.hail.check.{Arbitrary, Gen} import is.hail.expr.ir._ -import is.hail.types._ import is.hail.expr.{JSONAnnotationImpex, SparkAnnotationImpex} +import is.hail.types._ import is.hail.utils import is.hail.utils._ import is.hail.variant.ReferenceGenome @@ -125,15 +124,6 @@ abstract class Type extends BaseType with Serializable { def subst(): Type = this - def insert(signature: Type, fields: String*): (Type, Inserter) = insert(signature, fields.toList) - - def insert(signature: Type, path: List[String]): (Type, Inserter) = { - if (path.nonEmpty) - TStruct.empty.insert(signature, path) - else - (signature, (a, toIns) => toIns) - } - def query(fields: String*): Querier = query(fields.toList) def query(path: List[String]): Querier = { @@ -160,11 +150,6 @@ abstract class Type extends BaseType with Serializable { sb.append(_toPretty) } - def fieldOption(fields: String*): Option[Field] = fieldOption(fields.toList) - - def fieldOption(path: List[String]): Option[Field] = - None - def schema: DataType = SparkAnnotationImpex.exportType(this) def str(a: Annotation): String = if (a == null) "NA" else a.toString diff --git a/hail/src/test/scala/is/hail/HailSuite.scala b/hail/src/test/scala/is/hail/HailSuite.scala index 334607d933a..fec61bbe882 100644 --- a/hail/src/test/scala/is/hail/HailSuite.scala +++ b/hail/src/test/scala/is/hail/HailSuite.scala @@ -215,7 +215,7 @@ class HailSuite extends TestNGSuite { ): Unit = { val arrayIR = if (expected == null) nd else { val refs = Array.fill(nd.typ.asInstanceOf[TNDArray].nDims) { Ref(genUID(), TInt32) } - Let("nd", nd, + Let(FastSeq("nd" -> nd), dims.zip(refs).foldRight[IR](NDArrayRef(Ref("nd", nd.typ), refs.map(Cast(_, TInt64)), -1)) { case ((n, ref), accum) => ToArray(StreamMap(rangeIR(n.toInt), ref.name, accum)) diff --git a/hail/src/test/scala/is/hail/expr/ir/Aggregators2Suite.scala b/hail/src/test/scala/is/hail/expr/ir/Aggregators2Suite.scala index fef3d8e794d..a8ce8e9e16c 100644 --- a/hail/src/test/scala/is/hail/expr/ir/Aggregators2Suite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/Aggregators2Suite.scala @@ -62,9 +62,8 @@ class Aggregators2Suite extends HailSuite { Array(aggSig.state), FastSeq((argRef.name, SingleCodeEmitParamType(true, PTypeReferenceSingleCodeType(argT)))), FastSeq(classInfo[Region], LongInfo), UnitInfo, - args.map(_._1).foldLeft[IR](foo) { case (op, name) => - Let(name, GetField(argRef, name), op) - })._2 + Let(args.map { case (n, _) => n -> GetField(argRef, n) }, foo) + )._2 } val serialize = SerializeAggs(0, 0, spec, Array(aggSig.state)) @@ -414,13 +413,15 @@ class Aggregators2Suite extends HailSuite { def seqOpOverArray(aggIdx: Int, a: IR, seqOps: IR => IR, alstate: ArrayLenAggSig): IR = { val idx = Ref(genUID(), TInt32) - val elt = Ref(genUID(), tcoerce[TArray](a.typ).elementType) Begin(FastSeq( SeqOp(aggIdx, FastSeq(ArrayLen(a)), alstate), StreamFor(StreamRange(0, ArrayLen(a), 1), idx.name, - Let(elt.name, ArrayRef(a, idx), - SeqOp(aggIdx, FastSeq(idx, seqOps(elt)), AggElementsAggSig(alstate.nested)))))) + bindIR(ArrayRef(a, idx)) { elt => + SeqOp(aggIdx, FastSeq(idx, seqOps(elt)), AggElementsAggSig(alstate.nested)) + } + ) + )) } @Test def testMin() { @@ -679,8 +680,7 @@ class Aggregators2Suite extends HailSuite { val ir = TableCollect(MatrixColsTable(MatrixMapCols( MatrixRead(t, false, false, MatrixRangeReader(10, 10, None)), InsertFields(Ref("sa", t.colType), FastSeq(("foo", - Let("bar", - GetField(Ref("sa", t.colType), "col_idx") + I32(1), + Let(FastSeq("bar" -> (GetField(Ref("sa", t.colType), "col_idx") + I32(1))), AggFilter( GetField(Ref("va", t.rowType), "row_idx") < I32(5), Ref("bar", TInt32).toL + Ref("bar", TInt32).toL + ApplyAggOp( @@ -769,16 +769,17 @@ class Aggregators2Suite extends HailSuite { implicit val execStrats = ExecStrategy.compileOnly val takeSig = PhysicalAggSig(Take(), TakeStateSig(VirtualTypeWithReq(PInt64(true)))) val x = Let( - "x", - RunAgg( + FastSeq("x" -> RunAgg( Begin(FastSeq( InitOp(0, FastSeq(I32(10)), takeSig), SeqOp(0, FastSeq(NA(TInt64)), takeSig), SeqOp(0, FastSeq(I64(-1l)), takeSig), SeqOp(0, FastSeq(I64(2l)), takeSig) - )), + ) + ), AggStateValue(0, takeSig.state), - FastSeq(takeSig.state)), + FastSeq(takeSig.state) + )), RunAgg( Begin(FastSeq( InitOp(0, FastSeq(I32(10)), takeSig), diff --git a/hail/src/test/scala/is/hail/expr/ir/EmitStreamSuite.scala b/hail/src/test/scala/is/hail/expr/ir/EmitStreamSuite.scala index e437cac5023..60f12c6476f 100644 --- a/hail/src/test/scala/is/hail/expr/ir/EmitStreamSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/EmitStreamSuite.scala @@ -248,14 +248,14 @@ class EmitStreamSuite extends HailSuite { } } - @Test def testEmitLet() { + @Test def testEmitLet(): Unit = { val ir = - Let("end", 10, + Let(FastSeq("start" -> 3, "end" -> 10), StreamFlatMap( - Let("start", 3, - StreamRange(Ref("start", TInt32), Ref("end", TInt32), 1)), + StreamRange(Ref("start", TInt32), Ref("end", TInt32), 1), "i", - MakeStream(IndexedSeq(Ref("i", TInt32), Ref("end", TInt32)), TStream(TInt32))) + MakeStream(IndexedSeq(Ref("i", TInt32), Ref("end", TInt32)), TStream(TInt32)) + ) ) assert(evalStream(ir) == (3 until 10).flatMap { i => IndexedSeq(i, 10) }, Pretty(ctx, ir)) assert(evalStreamLen(ir).isEmpty, Pretty(ctx, ir)) @@ -861,7 +861,7 @@ class EmitStreamSuite extends HailSuite { for ((ir, v) <- IndexedSeq( StreamRange(0, 10, 1) -> 0, target -> 1, - Let("x", True(), target) -> 1, + Let(FastSeq("x" -> True()), target) -> 1, StreamMap(target, "i", i) -> 1, StreamMap(StreamMap(target, "i", i), "i", i * i) -> 1, StreamFilter(target, "i", StreamFold(StreamRange(0, i, 1), 0, "a", "i", i)) -> 1, diff --git a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala index 35534bb6380..6d01f6873fc 100644 --- a/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/ForwardLetsSuite.scala @@ -6,9 +6,17 @@ import is.hail.expr.Nat import is.hail.expr.ir.DeprecatedIRBuilder.{applyAggOp, let, _} import is.hail.types.virtual._ import is.hail.utils._ -import org.testng.annotations.{DataProvider, Test} +import org.scalatest.AppendedClues.convertToClueful +import org.scalatest.Matchers.{be, convertToAnyShouldWrapper} +import org.testng.annotations.{BeforeMethod, DataProvider, Test} class ForwardLetsSuite extends HailSuite { + + @BeforeMethod + def resetUidCounter(): Unit = { + is.hail.expr.ir.uidCounter = 0 + } + @DataProvider(name = "nonForwardingOps") def nonForwardingOps(): Array[Array[IR]] = { val a = ToArray(StreamRange(I32(0), I32(10), I32(1))) @@ -25,7 +33,7 @@ class ForwardLetsSuite extends HailSuite { MakeTuple.ordered(FastSeq(ApplyBinaryPrimOp(Add(), x, I32(1)), ApplyBinaryPrimOp(Add(), x, I32(2)))), ApplyBinaryPrimOp(Add(), ApplyBinaryPrimOp(Add(), x, x), I32(1)), StreamAgg(ToStream(a), "y", ApplyAggOp(Sum())(x + y)) - ).map(ir => Array[IR](Let("x", In(0, TInt32) + In(0, TInt32), ir))) + ).map(ir => Array[IR](Let(FastSeq("x" -> (In(0, TInt32) + In(0, TInt32))), ir))) } @DataProvider(name = "nonForwardingNonEvalOps") @@ -36,7 +44,7 @@ class ForwardLetsSuite extends HailSuite { NDArrayMap(In(1, TNDArray(TInt32, Nat(1))), "y", x + y), NDArrayMap2(In(1, TNDArray(TInt32, Nat(1))), In(2, TNDArray(TInt32, Nat(1))), "y", "z", x + y + Ref("z", TInt32), ErrorIDs.NO_ERROR), TailLoop("f", FastSeq("y" -> I32(0)), TInt32, If(y < x, Recur("f", FastSeq[IR](y - I32(1)), TInt32), x)) - ).map(ir => Array[IR](Let("x", In(0, TInt32) + In(0, TInt32), ir))) + ).map(ir => Array[IR](Let(FastSeq("x" -> (In(0, TInt32) + In(0, TInt32))), ir))) } def aggMin(value: IR): ApplyAggOp = ApplyAggOp(FastSeq(), FastSeq(value), AggSignature(Min(), FastSeq(), FastSeq(value.typ))) @@ -63,7 +71,7 @@ class ForwardLetsSuite extends HailSuite { ApplyUnaryPrimOp(Negate, x), ToArray(StreamMap(StreamRange(I32(0), x, I32(1)), "foo", Ref("foo", TInt32))), ToArray(StreamFilter(StreamRange(I32(0), x, I32(1)), "foo", Ref("foo", TInt32) <= I32(0))) - ).map(ir => Array[IR](Let("x", In(0, TInt32) + In(0, TInt32), ir))) + ).map(ir => Array[IR](Let(FastSeq("x" -> (In(0, TInt32) + In(0, TInt32))), ir))) } @DataProvider(name = "forwardingAggOps") @@ -116,15 +124,76 @@ class ForwardLetsSuite extends HailSuite { assert(!after.isInstanceOf[AggLet]) } - @Test def testLetNoMention(): Unit = { - val ir = Let("x", I32(1), I32(2)) - assert(ForwardLets[IR](ctx)(ir) == I32(2)) - } + @DataProvider(name = "TrivialIRCases") + def trivalIRCases: Array[Array[Any]] = { + val pi = Math.atan(1) * 4 - @Test def testLetRefRewrite(): Unit = { - val ir = Let("x", I32(1), Ref("x", TInt32)) - assert(ForwardLets[IR](ctx)(ir) == I32(1)) - } + Array( + Array( + Let(FastSeq("x" -> I32(0)), I32(2)), + I32(2), + """"x" is unused.""" + ), + Array( + Let(FastSeq("x" -> I32(0)), Ref("x", TInt32)), + I32(0), + """"x" is constant and is used once.""" + ), + Array( + Let(FastSeq("x" -> I32(2)), Ref("x", TInt32) * Ref("x", TInt32)), + I32(2) * I32(2), + """"x" is a primitive constant (ForwardLets does not evaluate).""" + ), + Array( + bindIRs(I32(2), F64(pi), Ref("r", TFloat64)) { case Seq(two, pi, r) => + ApplyBinaryPrimOp(Multiply(), + ApplyBinaryPrimOp(Multiply(), Cast(two, TFloat64), pi), + r + ) + }, + ApplyBinaryPrimOp(Multiply(), + ApplyBinaryPrimOp(Multiply(), Cast(I32(2), TFloat64), F64(pi)), + Ref("r", TFloat64) + ), + """Forward constant primitive values and simple use ref.""" + ), + Array( + Let( + FastSeq( + iruid(0) -> I32(2), + iruid(1) -> Cast(Ref(iruid(0), TInt32), TFloat64), + iruid(2) -> ApplyBinaryPrimOp(FloatingPointDivide(), Ref(iruid(1), TFloat64), F64(2)), + iruid(3) -> F64(pi), + iruid(4) -> ApplyBinaryPrimOp(Multiply(), Ref(iruid(3), TFloat64), Ref(iruid(1), TFloat64)), + iruid(5) -> ApplyBinaryPrimOp(Multiply(), Ref(iruid(2), TFloat64), Ref(iruid(2), TFloat64)), + iruid(6) -> ApplyBinaryPrimOp(Multiply(), Ref(iruid(3), TFloat64), Ref(iruid(5), TFloat64)) + ), + MakeStruct(FastSeq( + "radius" -> Ref(iruid(2), TFloat64), + "circumference" -> Ref(iruid(4), TFloat64), + "area" -> Ref(iruid(6), TFloat64), + )) + ), + Let(FastSeq( + iruid(1) -> Cast(I32(2), TFloat64), + iruid(2) -> ApplyBinaryPrimOp(FloatingPointDivide(), Ref(iruid(1), TFloat64), F64(2)), + ), + MakeStruct(FastSeq( + "radius" -> Ref(iruid(2), TFloat64), + "circumference" -> ApplyBinaryPrimOp(Multiply(), F64(pi), Ref(iruid(1), TFloat64)), + "area" -> ApplyBinaryPrimOp(Multiply(), F64(pi), + ApplyBinaryPrimOp(Multiply(), Ref(iruid(2), TFloat64), Ref(iruid(2), TFloat64)) + ) + )) + ), + "Cascading Let-bindings are forwarded" + ) + ) + } + + @Test(dataProvider = "TrivialIRCases") + def testTrivialCases(input: IR, expected: IR, reason: String): Unit = + ForwardLets(ctx)(input) should be(expected) withClue reason @Test def testAggregators(): Unit = { val aggEnv = Env[Type]("row" -> TStruct("idx" -> TInt32)) @@ -148,16 +217,15 @@ class ForwardLetsSuite extends HailSuite { @Test def testLetsDoNotForwardInsideArrayAggWithNoOps(): Unit = { val x = Let( - "x", + FastSeq( + "x" -> StreamAgg(ToStream(In(0, TArray(TInt32))), "foo", Ref("y", TInt32)) + ), StreamAgg( - ToStream(In(0, TArray(TInt32))), - "foo", - Ref( - "y", TInt32)), - StreamAgg(ToStream(In(1, TArray(TInt32))), + ToStream(In(1, TArray(TInt32))), "bar", - Ref("y", TInt32) + Ref("x", TInt32 - ))) + Ref("y", TInt32) + Ref("x", TInt32) + ) + ) TypeCheck(ctx, x, BindingEnv(Env("y" -> TInt32))) TypeCheck(ctx, ForwardLets(ctx)(x), BindingEnv(Env("y" -> TInt32))) diff --git a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala index 519e4f0258c..1b4efd82c17 100644 --- a/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/IRSuite.scala @@ -593,28 +593,29 @@ class IRSuite extends HailSuite { assertEvalsTo(Switch(x, default, cases), result) @Test def testLet() { - assertEvalsTo(Let("v", I32(5), Ref("v", TInt32)), 5) - assertEvalsTo(Let("v", NA(TInt32), Ref("v", TInt32)), null) - assertEvalsTo(Let("v", I32(5), NA(TInt32)), null) - assertEvalsTo(ToArray(StreamMap(Let("v", I32(5), StreamRange(0, Ref("v", TInt32), 1)), "x", Ref("x", TInt32) + I32(2))), - FastSeq(2, 3, 4, 5, 6)) + assertEvalsTo(Let(FastSeq("v" -> I32(5)), Ref("v", TInt32)), 5) + assertEvalsTo(Let(FastSeq("v" -> NA(TInt32)), Ref("v", TInt32)), null) + assertEvalsTo(Let(FastSeq("v" -> I32(5)), NA(TInt32)), null) assertEvalsTo( - ToArray(StreamMap(Let("q", I32(2), - StreamMap(Let("v", Ref("q", TInt32) + I32(3), + ToArray(mapIR(Let(FastSeq("v" -> I32(5)), StreamRange(0, Ref("v", TInt32), 1))) { x => x + I32(2) }), + FastSeq(2, 3, 4, 5, 6) + ) + assertEvalsTo( + ToArray(StreamMap(Let(FastSeq("q" -> I32(2)), + StreamMap(Let(FastSeq("v" -> (Ref("q", TInt32) + I32(3))), StreamRange(0, Ref("v", TInt32), 1)), "x", Ref("x", TInt32) + Ref("q", TInt32))), "y", Ref("y", TInt32) + I32(3))), FastSeq(5, 6, 7, 8, 9)) // test let binding streams - assertEvalsTo(Let("s", MakeStream(IndexedSeq(I32(0), I32(5)), TStream(TInt32)), ToArray(Ref("s", TStream(TInt32)))), - FastSeq(0, 5)) - assertEvalsTo(Let("s", NA(TStream(TInt32)), ToArray(Ref("s", TStream(TInt32)))), - null) + assertEvalsTo(Let(FastSeq("s" -> MakeStream(IndexedSeq(I32(0), I32(5)), TStream(TInt32))), ToArray(Ref("s", TStream(TInt32)))), + FastSeq(0, 5)) + assertEvalsTo(Let(FastSeq("s" -> NA(TStream(TInt32))), ToArray(Ref("s", TStream(TInt32)))), + null) assertEvalsTo( - ToArray(Let("s", - MakeStream(IndexedSeq(I32(0), I32(5)), TStream(TInt32)), - StreamTake(Ref("s", TStream(TInt32)), I32(1)))), + ToArray(Let(FastSeq("s" -> MakeStream(IndexedSeq(I32(0), I32(5)), TStream(TInt32))), + StreamTake(Ref("s", TStream(TInt32)), I32(1)))), FastSeq(0)) } @@ -1560,7 +1561,7 @@ class IRSuite extends HailSuite { assertEvalsTo(ToArray(StreamMap(a, "a", ApplyBinaryPrimOp(Add(), Ref("a", TInt32), I32(1)))), FastSeq(4, null, 8)) - assertEvalsTo(ToArray(Let("a", I32(5), + assertEvalsTo(ToArray(Let(FastSeq("a" -> I32(5)), StreamMap(a, "a", Ref("a", TInt32)))), FastSeq(3, null, 7)) } @@ -1604,14 +1605,14 @@ class IRSuite extends HailSuite { assertEvalsTo(ToArray(StreamFlatMap(StreamRange(I32(0), I32(3), I32(1)), "i", ToStream(ArrayRef(ToArray(a), Ref("i", TInt32))))), FastSeq(7, null, 2)) - assertEvalsTo(ToArray(Let("a", I32(5), StreamFlatMap(a, "a", ToStream(Ref("a", ta))))), FastSeq(7, null, 2)) + assertEvalsTo(ToArray(Let(FastSeq("a" -> I32(5)), StreamFlatMap(a, "a", ToStream(Ref("a", ta))))), FastSeq(7, null, 2)) val b = MakeStream(FastSeq( MakeArray(FastSeq(I32(7), I32(0)), ta), NA(ta), MakeArray(FastSeq(I32(2)), ta)), tsa) - assertEvalsTo(ToArray(Let("a", I32(5), StreamFlatMap(b, "b", ToStream(Ref("b", ta))))), FastSeq(7, 0, 2)) + assertEvalsTo(ToArray(Let(FastSeq("a" -> I32(5)), StreamFlatMap(b, "b", ToStream(Ref("b", ta))))), FastSeq(7, 0, 2)) val st = MakeStream(FastSeq(I32(1), I32(5), I32(2), NA(TInt32)), TStream(TInt32)) val expected = FastSeq(-1, 0, -1, 0, 1, 2, 3, 4, -1, 0, 1) @@ -1998,19 +1999,21 @@ class IRSuite extends HailSuite { val joinF = { (l: IR, r: IR) => def getL(field: String): IR = GetField(Ref("_left", l.typ), field) def getR(field: String): IR = GetField(Ref("_right", r.typ), field) - Let("_right", r, - Let("_left", l, - MakeStruct( - (lKeys, rKeys).zipped.map { (lk, rk) => lk -> Coalesce(IndexedSeq(getL(lk), getR(rk))) } - ++ tcoerce[TStruct](l.typ).fields.filter(f => !lKeys.contains(f.name)).map { f => - f.name -> GetField(Ref("_left", l.typ), f.name) - } ++ tcoerce[TStruct](r.typ).fields.filter(f => !rKeys.contains(f.name)).map { f => - f.name -> GetField(Ref("_right", r.typ), f.name) - }))) + + Let(FastSeq("_right" -> r, "_left" -> l), + MakeStruct( + (lKeys, rKeys).zipped.map { (lk, rk) => lk -> Coalesce(IndexedSeq(getL(lk), getR(rk))) } + ++ tcoerce[TStruct](l.typ).fields.filter(f => !lKeys.contains(f.name)).map { f => + f.name -> GetField(Ref("_left", l.typ), f.name) + } ++ tcoerce[TStruct](r.typ).fields.filter(f => !rKeys.contains(f.name)).map { f => + f.name -> GetField(Ref("_right", r.typ), f.name) + } + ) + ) } ToArray(StreamJoin.apply(left, right, lKeys, rKeys, "_l", "_r", - joinF(Ref("_l", tcoerce[TStream](left.typ).elementType), Ref("_r", tcoerce[TStream](right.typ).elementType)), - joinType, requiresMemoryManagement = false, rightKeyIsDistinct = rightDistinct)) + joinF(Ref("_l", tcoerce[TStream](left.typ).elementType), Ref("_r", tcoerce[TStream](right.typ).elementType)), + joinType, requiresMemoryManagement = false, rightKeyIsDistinct = rightDistinct)) } @Test def testStreamZipJoin() { @@ -2403,9 +2406,7 @@ class IRSuite extends HailSuite { @Test def testArrayAggContexts() { implicit val execStrats = ExecStrategy.compileOnly - val ir = Let( - "x", - In(0, TInt32) * In(0, TInt32), // multiply to prevent forwarding + val ir = Let(FastSeq("x" -> (In(0, TInt32) * In(0, TInt32))), // multiply to prevent forwarding StreamAgg( StreamRange(I32(0), I32(10), I32(1)), "elt", @@ -2748,7 +2749,7 @@ class IRSuite extends HailSuite { If(b, i, j), Switch(i, j, 0 until 7 map I32), Coalesce(FastSeq(i, I32(1))), - Let("v", i, v), + Let(FastSeq("v" -> i), v), AggLet("v", i, collect(v), false) -> (_.createAgg), Ref("x", TInt32) -> (_.bindEval("x", TInt32)), ApplyBinaryPrimOp(Add(), i, j), @@ -3447,10 +3448,10 @@ class IRSuite extends HailSuite { val writer = ETypeValueWriter(spec) val reader = ETypeValueReader(spec) val prefix = ctx.createTmpPath("test-read-write-value-dist") - val readArray = Let("files", + val readArray = Let(FastSeq("files" -> CollectDistributedArray(StreamMap(StreamRange(0, 10, 1), "x", node), MakeStruct(FastSeq()), "ctx", "globals", - WriteValue(Ref("ctx", node.typ), Str(prefix) + UUID4(), writer), NA(TString), "test"), + WriteValue(Ref("ctx", node.typ), Str(prefix) + UUID4(), writer), NA(TString), "test")), StreamMap(ToStream(Ref("files", TArray(TString))), "filename", ReadValue(Ref("filename", TString), reader, pt.virtualType))) for (v <- Array(value, null)) { diff --git a/hail/src/test/scala/is/hail/expr/ir/LiftLiteralsSuite.scala b/hail/src/test/scala/is/hail/expr/ir/LiftLiteralsSuite.scala index ce66d325fd2..0fad01d0c62 100644 --- a/hail/src/test/scala/is/hail/expr/ir/LiftLiteralsSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/LiftLiteralsSuite.scala @@ -15,9 +15,7 @@ class LiftLiteralsSuite extends HailSuite { val ir = TableGetGlobals( TableMapGlobals( tab, - Let( - "global", - I64(1), + Let(FastSeq("global" -> I64(1)), MakeStruct( FastSeq( "x" -> ApplyBinaryPrimOp( diff --git a/hail/src/test/scala/is/hail/expr/ir/PruneSuite.scala b/hail/src/test/scala/is/hail/expr/ir/PruneSuite.scala index bde41c3efde..97186fdddda 100644 --- a/hail/src/test/scala/is/hail/expr/ir/PruneSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/PruneSuite.scala @@ -158,7 +158,7 @@ class PruneSuite extends HailSuite { split.tail.foreach { field => ir = GetField(ir, field) } - let = Let(genUID(), ir, let) + let = Let(FastSeq(genUID() -> ir), let) } let } @@ -181,7 +181,7 @@ class PruneSuite extends HailSuite { split.tail.foreach { field => ir = GetField(ir, field) } - let = Let(genUID(), ir, let) + let = Let(FastSeq(genUID() -> ir), let) } let } @@ -590,8 +590,8 @@ class PruneSuite extends HailSuite { } @Test def testLetMemo() { - checkMemo(Let("foo", ref, Ref("foo", ref.typ)), justA, Array(justA, null)) - checkMemo(Let("foo", ref, True()), TBoolean, Array(empty, null)) + checkMemo(Let(FastSeq("foo" -> ref), Ref("foo", ref.typ)), justA, Array(justA, null)) + checkMemo(Let(FastSeq("foo" -> ref), True()), TBoolean, Array(empty, null)) } @Test def testAggLetMemo() { @@ -654,20 +654,33 @@ class PruneSuite extends HailSuite { checkMemo(StreamZip( FastSeq(st, a2, a3), FastSeq("foo", "bar", "baz"), - Let("foo1", GetField(Ref("foo", ref.typ), "b"), Let("bar2", GetField(Ref("bar", ref.typ), "a"), False())), b), + Let( + FastSeq( + "foo1" -> GetField(Ref("foo", ref.typ), "b"), + "bar2" -> GetField(Ref("bar", ref.typ), "a") + ), + False() + ), b + ), TStream(TBoolean), Array(TStream(justB), TStream(justA), TStream(empty), null)) } checkMemo(StreamZip( FastSeq(st, a2, a3), FastSeq("foo", "bar", "baz"), - Let("foo1", GetField(Ref("foo", ref.typ), "b"), Let("bar2", GetField(Ref("bar", ref.typ), "a"), False())), + Let( + FastSeq( + "foo1" -> GetField(Ref("foo", ref.typ), "b"), + "bar2" -> GetField(Ref("bar", ref.typ), "a") + ), + False() + ), ArrayZipBehavior.AssumeSameLength), TStream(TBoolean), Array(TStream(justB), TStream(justA), null, null)) } @Test def testStreamFilterMemo() { - checkMemo(StreamFilter(st, "foo", Let("foo2", GetField(Ref("foo", ref.typ), "b"), False())), + checkMemo(StreamFilter(st, "foo", Let(FastSeq("foo2" -> GetField(Ref("foo", ref.typ), "b")), False())), TStream(empty), Array(TStream(justB), null)) checkMemo(StreamFilter(st, "foo", False()), TStream(empty), Array(TStream(empty), null)) @@ -1182,10 +1195,10 @@ class PruneSuite extends HailSuite { } @Test def testLetRebuild() { - checkRebuild(Let("x", NA(ts), Ref("x", ts)), subsetTS("b"), + checkRebuild(Let(FastSeq("x" -> NA(ts)), Ref("x", ts)), subsetTS("b"), (_: BaseIR, r: BaseIR) => { val ir = r.asInstanceOf[Let] - ir.value.typ == subsetTS("b") + ir.bindings.head._2.typ == subsetTS("b") }) } @@ -1263,14 +1276,26 @@ class PruneSuite extends HailSuite { checkRebuild(StreamZip( FastSeq(st, a2, a3), FastSeq("foo", "bar", "baz"), - Let("foo1", GetField(Ref("foo", ref.typ), "b"), Let("bar2", GetField(Ref("bar", ref.typ), "a"), False())), b), + Let( + FastSeq( + "foo1" -> GetField(Ref("foo", ref.typ), "b"), + "bar2" -> GetField(Ref("bar", ref.typ), "a") + ), + False() + ), b), TStream(TBoolean), (_: BaseIR, r: BaseIR) => r.asInstanceOf[StreamZip].as.length == 3) } checkRebuild(StreamZip( FastSeq(st, a2, a3), FastSeq("foo", "bar", "baz"), - Let("foo1", GetField(Ref("foo", ref.typ), "b"), Let("bar2", GetField(Ref("bar", ref.typ), "a"), False())), + Let( + FastSeq( + "foo1" -> GetField(Ref("foo", ref.typ), "b"), + "bar2" -> GetField(Ref("bar", ref.typ), "a") + ), + False() + ), ArrayZipBehavior.AssumeSameLength), TStream(TBoolean), (_: BaseIR, r: BaseIR) => r.asInstanceOf[StreamZip].as.length == 2) diff --git a/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala b/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala index c75c4aef7dd..ce7ade253f7 100644 --- a/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/RequirednessSuite.scala @@ -175,6 +175,26 @@ class RequirednessSuite extends HailSuite { // test bindings nodes += Array(bindIR(nestedarray(required, optional, optional)) { v => ArrayRef(v, I32(0)) }, PCanonicalArray(PInt32(optional), optional)) + nodes += { + val arr = array(required, required) + val elemType = TIterable.elementType(arr.typ) + Array( + Let( + FastSeq( + iruid(0) -> int(optional), + iruid(1) -> arr, + iruid(2) -> ToStream(Ref(iruid(1), arr.typ)), + iruid(4) -> StreamMap(Ref(iruid(2), TStream(elemType)), iruid(3), + ApplyBinaryPrimOp(Multiply(), Ref(iruid(3), elemType), Ref(iruid(0), elemType)) + ), + iruid(5) -> ToArray(Ref(iruid(4), TStream(elemType))), + iruid(6) -> int(required) + ), + ArrayRef(Ref(iruid(5), arr.typ), Ref(iruid(6), TInt32)) + ), + pint(optional) + ) + } // filter nodes += Array(StreamFilter(stream(optional, optional), "x", Ref("x", TInt32).ceq(0)), EmitType(SStream(EmitType(SInt32, optional)), optional)) @@ -495,7 +515,7 @@ class RequirednessSuite extends HailSuite { @Test def sharedNodesWorkCorrectly(): Unit = { val n1 = Ref("foo", TInt32) - val n2 = Let("foo", I32(1), MakeStruct(FastSeq("a" -> n1, "b" -> n1))) + val n2 = Let(FastSeq("foo" -> I32(1)), MakeStruct(FastSeq("a" -> n1, "b" -> n1))) val node = InsertFields(n2, FastSeq("c" -> GetField(n2, "a"), "d" -> GetField(n2, "b"))) val res = Requiredness.apply(node, ctx) val actual = tcoerce[TypeWithRequiredness](res.r.lookup(node)).canonicalPType(node.typ) diff --git a/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala b/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala index b3a7c5c6e56..e0bbad38c06 100644 --- a/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/SimplifySuite.scala @@ -6,11 +6,17 @@ import is.hail.utils.{FastSeq, Interval} import is.hail.variant.Locus import is.hail.{ExecStrategy, HailSuite} import org.apache.spark.sql.Row -import org.testng.annotations.{DataProvider, Test} +import org.scalatest.Matchers.{be, convertToAnyShouldWrapper} +import org.testng.annotations.{BeforeMethod, DataProvider, Test} class SimplifySuite extends HailSuite { implicit val execStrats = ExecStrategy.interpretOnly + @BeforeMethod + def resetUidCounter(): Unit = { + is.hail.expr.ir.uidCounter = 0 + } + @Test def testTableMultiWayZipJoinGlobalsRewrite() { hc val tmwzj = TableGetGlobals(TableMultiWayZipJoin( @@ -95,53 +101,132 @@ class SimplifySuite extends HailSuite { assertEvalsTo(TableCount(ir), 1L) } - @Test def testNestedInsertsSimplify() { - val r = Ref("row", TStruct(("x", TInt32))) - val r2 = Ref("row2", TStruct(("x", TInt32), ("y", TFloat64))) - - val ir1 = Let("row2", InsertFields(r, FastSeq(("y", F64(0.0)))), InsertFields(r2, FastSeq(("z", GetField(r2, "x").toD)))) - val ir2 = Let("row2", InsertFields(r, FastSeq(("y", F64(0.0)))), InsertFields(r2, FastSeq(("z", GetField(r2, "x").toD + GetField(r2, "y"))))) - val ir3 = Let("row2", InsertFields(r, FastSeq(("y", F64(0.0)))), InsertFields(Ref("something_else", TStruct.empty), FastSeq(("z", GetField(r2, "y").toI)))) - - assert(Simplify(ctx, ir1) == InsertFields(r, FastSeq(("y", F64(0)), ("z", GetField(r, "x").toD)), Some(FastSeq("x", "y", "z")))) - assert(Simplify(ctx, ir2) == InsertFields(r, FastSeq(("y", F64(0.0)), ("z", GetField(r, "x").toD)), Some(FastSeq("x", "y", "z")))) - - assert(Optimize[IR](ir3, "direct", ctx) == InsertFields(Ref("something_else", TStruct.empty), FastSeq(("z", I32(0))))) + @DataProvider(name = "NestedInserts") + def nestedInserts: Array[Array[Any]] = { + val r = Ref("row", TStruct("x" -> TInt32)) + val r2 = Ref("row2", TStruct("x" -> TInt32, "y" -> TFloat64)) + val r3 = Ref("row3", TStruct("x" -> TInt32, "y" -> TFloat64, "w" -> TInt32)) - val shouldNotRewrite = Let("row2", InsertFields(r, FastSeq(("y", Ref("other", TFloat64)))), InsertFields(r2, FastSeq(("z", invoke("str", TString, r2))))) - - assert(Simplify(ctx, shouldNotRewrite) == shouldNotRewrite) - } - - @Test def testNestedInsertsSimplifyAcrossLets() { - val l = Let("a", - Let("b", - I32(1) + Ref("OTHER_1", TInt32), - InsertFields( - Ref("TOP", TStruct("foo" -> TInt32)), + Array( + Array( + Let(FastSeq(r2.name -> InsertFields(r, FastSeq("y" -> F64(0)))), + InsertFields(r2, FastSeq("z" -> GetField(r2, "x").toD)) + ), + Let(FastSeq(iruid(0L) -> F64(0), r2.name -> r), + InsertFields(Ref(r2.name, r.typ), + FastSeq( + "y" -> Ref(iruid(0L), TFloat64), + "z" -> GetField(Ref(r2.name, r.typ), "x").toD + ), + Some(FastSeq("x", "y", "z")) + ) + ) + ), + Array( + Let(FastSeq(r2.name -> InsertFields(r, FastSeq("y" -> F64(0)))), + InsertFields(r2, FastSeq("z" -> (GetField(r2, "x").toD + GetField(r2, "y")))) + ), + Let(FastSeq(iruid(0) -> F64(0), r2.name -> r), + InsertFields(Ref(r2.name, r.typ), + FastSeq( + "y" -> Ref(iruid(0), TFloat64), + "z" -> (GetField(Ref(r2.name, r.typ), "x").toD + Ref(iruid(0), TFloat64)) + ), + Some(FastSeq("x", "y", "z")) + ) + ) + ), + Array( + Let(FastSeq(r2.name -> InsertFields(r, FastSeq("y" -> F64(0)))), + InsertFields(Ref("something_else", TStruct.empty), FastSeq("z" -> GetField(r2, "y").toI)) + ), + Let(FastSeq(iruid(0) -> F64(0), r2.name -> r), + InsertFields(Ref("something_else", TStruct.empty), FastSeq("z" -> Ref(iruid(0), TFloat64).toI)) + ) + ), + Array.fill(2) { // unrewriteable + Let(FastSeq(r2.name -> InsertFields(r, FastSeq("y" -> Ref("other", TFloat64)))), + InsertFields(r2, FastSeq(("z", invoke("str", TString, r2)))) + ) + }, + Array( + Let( FastSeq( - ("field0", Ref("b", TInt32)), - ("field1", I32(1) + Ref("b", TInt32))))), - InsertFields( - Ref("a", TStruct("foo" -> TInt32, "field0" -> TInt32, "field1" -> TInt32)), - FastSeq( - ("field2", I32(1) + GetField(Ref("a", TStruct("foo" -> TInt32, "field0" -> TInt32, "field1" -> TInt32)), "field1")) + "a" -> I32(32), + r2.name -> InsertFields(r, FastSeq("y" -> F64(0))), + r3.name -> InsertFields(r2, FastSeq("w" -> Ref("a", TInt32))) + ), + InsertFields(r3, FastSeq("z" -> (GetField(r3, "x").toD + GetField(r3, "y")))) + ), + Let( + FastSeq( + "a" -> I32(32), + iruid(0) -> F64(0), + r2.name -> r, + iruid(1) -> Ref(iruid(0), TFloat64), + iruid(2) -> Ref("a", TInt32), + r3.name -> Ref(r2.name, r.typ) + ), + InsertFields( + Ref(r3.name, r.typ), + FastSeq( + "y" -> Ref(iruid(1), TFloat64), + "w" -> Ref(iruid(2), TInt32), + "z" -> (GetField(Ref(r3.name, r.typ), "x").toD + Ref(iruid(1), TFloat64)) + ), + Some(FastSeq("x", "y", "w", "z")) + ) ) - ) - ) - val simplified = new NormalizeNames(_.toString, true)(ctx, Simplify(ctx, l)) - val expected = Let("1", - I32(1) + Ref("OTHER_1", TInt32), - Let("2", I32(1) + Ref("1", TInt32), - InsertFields(Ref("TOP", TStruct("foo" -> TInt32)), + ), + Array( + Let( FastSeq( - ("field0", Ref("1", TInt32)), - ("field1", Ref("2", TInt32)), - ("field2", I32(1) + Ref("2", TInt32)) + "a" -> Let(FastSeq("b" -> (I32(1) + Ref("OTHER_1", TInt32))), + InsertFields( + Ref("TOP", TStruct("foo" -> TInt32)), + FastSeq( + "field0" -> Ref("b", TInt32), + "field1" -> (I32(1) + Ref("b", TInt32)) + ) + ) + ) ), - Some(FastSeq("foo", "field0", "field1", "field2"))))) + InsertFields( + Ref("a", TStruct("foo" -> TInt32, "field0" -> TInt32, "field1" -> TInt32)), + FastSeq( + "field2" -> + (I32(1) + GetField( + Ref("a", TStruct("foo" -> TInt32, "field0" -> TInt32, "field1" -> TInt32)), + "field1" + )) + ) + ) + ), + Let( + FastSeq( + "b" -> (I32(1) + Ref("OTHER_1", TInt32)), + iruid(0) -> Ref("b", TInt32), + iruid(1) -> (I32(1) + Ref("b", TInt32)), + "a" -> Ref("TOP", TStruct("foo" -> TInt32)) + ), + InsertFields( + Ref("a", TStruct("foo" -> TInt32)), + FastSeq( + "field0" -> Ref(iruid(0), TInt32), + "field1" -> Ref(iruid(1), TInt32), + "field2" -> (I32(1) + Ref(iruid(1), TInt32)) + ), + Some(FastSeq("foo", "field0", "field1", "field2")) + ) + ) + ) + ) + } - assert(simplified == expected) + @Test(dataProvider = "NestedInserts") + def testNestedInsertsSimplify(input: IR, expected: IR): Unit = { + val actual = Simplify(ctx, input) + actual should be(expected) } @Test def testArrayAggNoAggRewrites(): Unit = { @@ -232,7 +317,7 @@ class SimplifySuite extends HailSuite { assert(Simplify(ctx, StreamLen(rangeIR)) == Simplify(ctx, StreamLen(mapOfRange))) assert(Simplify(ctx, StreamLen(mapBlockedByLet)) match { - case Let(name, value, body) => body == Simplify(ctx, StreamLen(mapOfRange)) + case Let(_, body) => body == Simplify(ctx, StreamLen(mapOfRange)) }) } diff --git a/hail/src/test/scala/is/hail/expr/ir/analyses/SemanticHashSuite.scala b/hail/src/test/scala/is/hail/expr/ir/analyses/SemanticHashSuite.scala index 33acb8359ee..31e05af6add 100644 --- a/hail/src/test/scala/is/hail/expr/ir/analyses/SemanticHashSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/analyses/SemanticHashSuite.scala @@ -36,31 +36,36 @@ class SemanticHashSuite extends HailSuite { Array(NA(TInt32), NA(TFloat64), false, "Refl") ) + def mkRelationalLet(bindings: IndexedSeq[(String, IR)], body: IR): IR = + bindings.foldRight(body) { case ((name, value), body) => + RelationalLet(name, value, body) + } + def isLetSemanticallyEquivalent: Array[Array[Any]] = - Array((Let, Ref), (RelationalLet, RelationalRef)).flatMap { case (let, ref) => + Array((Let(_, _), Ref), (mkRelationalLet _, RelationalRef)).flatMap { case (let, ref) => Array( Array( - let("x", Void(), ref("x", TVoid)), - let("y", Void(), ref("y", TVoid)), + let(FastSeq("x" -> Void()), ref("x", TVoid)), + let(FastSeq("y" -> Void()), ref("y", TVoid)), true, "names used in let-bindings do not change semantics" ), Array( - let("x", Void(), let("y", Void(), ref("x", TVoid))), - let("y", Void(), let("x", Void(), ref("y", TVoid))), + let(FastSeq("x" -> Void(), "y" -> Void()), ref("x", TVoid)), + let(FastSeq("y" -> Void(), "x" -> Void()), ref("y", TVoid)), true, "names of let-bindings do not change semantics" ), Array( - let("a", I32(0), ref("a", TInt32)), - let("a", Void(), ref("a", TVoid)), + let(FastSeq("a" -> I32(0)), ref("a", TInt32)), + let(FastSeq("a" -> Void()), ref("a", TVoid)), false, "different IRs" ), Array( - let("x", Void(), let("y", Void(), ref("x", TVoid))), - let("y", Void(), let("x", Void(), ref("x", TVoid))), + let(FastSeq("x" -> Void(), "y" -> Void()), ref("x", TVoid)), + let(FastSeq("y" -> Void(), "x" -> Void()), ref("x", TVoid)), false, "Different binding being referenced" ), @@ -68,14 +73,14 @@ class SemanticHashSuite extends HailSuite { * The following examples demonstrate some of its limitations as a consequence. */ Array( - let("A", Void(), ref("A", TVoid)), - let("A", let(genUID(), I32(0), Void()), ref("A", TVoid)), + let(FastSeq("A" -> Void()), ref("A", TVoid)), + let(FastSeq("A" -> let(FastSeq(genUID() -> I32(0)), Void())), ref("A", TVoid)), false, "SemanticHash does not simplify" ), Array( - let("A", Void(), ref("A", TVoid)), - let("A", Void(), let("B", I32(0), ref("A", TVoid))), + let(FastSeq("A" -> Void()), ref("A", TVoid)), + let(FastSeq("A" -> Void(), "B" -> I32(0)), ref("A", TVoid)), false, "SemanticHash does not simplify" ) diff --git a/hail/src/test/scala/is/hail/types/virtual/TStructSuite.scala b/hail/src/test/scala/is/hail/types/virtual/TStructSuite.scala index dc507c33cef..776fb25a3f6 100644 --- a/hail/src/test/scala/is/hail/types/virtual/TStructSuite.scala +++ b/hail/src/test/scala/is/hail/types/virtual/TStructSuite.scala @@ -1,6 +1,9 @@ package is.hail.types.virtual import is.hail.HailSuite +import is.hail.annotations.{Annotation, Inserter} +import is.hail.utils.FastSeq +import org.apache.spark.sql.Row import org.testng.Assert.{assertFalse, assertTrue} import org.testng.annotations.{DataProvider, Test} @@ -41,4 +44,43 @@ class TStructSuite extends HailSuite { def testIsSubsetOf(a: TStruct, b: TStruct, isSubset: Boolean): Unit = assert(a.isSubsetOf(b) == isSubset, s"expected $a `isSubsetOf` $b == $isSubset") + + @DataProvider(name = "structInsert") + def structInsertData: Array[Array[Any]] = + Array( + Array(TStruct("a" -> TInt32), FastSeq("a"), TInt32, TStruct("a" -> TInt32)), + Array(TStruct("a" -> TInt32), FastSeq("b"), TInt32, TStruct("a" -> TInt32, "b" -> TInt32)), + Array(TStruct("a" -> TInt32), FastSeq("a"), TVoid, TStruct("a" -> TVoid)), + Array(TStruct("a" -> TInt32), FastSeq("a", "b"), TInt32, TStruct("a" -> TStruct("b" -> TInt32))), + Array(TStruct.empty, FastSeq("a"), TInt32, TStruct("a" -> TInt32)), + Array(TStruct.empty, FastSeq("a", "b"), TInt32, TStruct("a" -> TStruct("b" -> TInt32))), + ) + + @Test(dataProvider = "structInsert") + def testStructInsert(base: TStruct, path: IndexedSeq[String], signature: Type, expected: TStruct): Unit = + assert(base.structInsert(signature, path) == expected) + + @Test def testInsertEmptyPath(): Unit = + intercept[IllegalArgumentException] { + TStruct.empty.insert(TInt32, FastSeq()) + } + + + @DataProvider(name = "inserter") + def inserterData: Array[Array[Any]] = + Array( + Array(TStruct("a" -> TInt32).insert(TInt32, FastSeq("a"))._2, null, 0, Row(0)), + Array(TStruct("a" -> TInt32).insert(TInt32, FastSeq("a"))._2, Row(0), 1, Row(1)), + + Array(TStruct("a" -> TInt32).insert(TInt32, FastSeq("b"))._2, null, 0, Row(null, 0)), + Array(TStruct("a" -> TInt32).insert(TInt32, FastSeq("b"))._2, Row(0), 1, Row(0, 1)), + + Array(TStruct.empty.insert(TInt32, FastSeq("a", "b"))._2, null, 0, Row(Row(0))), + Array(TStruct("a" -> TInt32).insert(TInt32, FastSeq("a", "b"))._2, Row(0), 1, Row(Row(1))), + ) + + @Test(dataProvider = "inserter") + def testInsert(inserter: Inserter, base: Annotation, value: Any, expected: Annotation): Unit = + assert(inserter(base, value) == expected) + } From 5f0b0da80507cf3f7f9b5798781161f642f16b66 Mon Sep 17 00:00:00 2001 From: Dan King Date: Tue, 28 Nov 2023 16:17:12 -0500 Subject: [PATCH 16/48] [query] universal dylibs for OS X (and update prebuilt) (#14006) CHANGELOG: Hail supports identity_by_descent on Apple M1 and M2 chips; however, your Java installation must be an arm64 installation. Using x86_64 Java with Hail on Apple M1 or M2 will cause SIGILL errors. If you have an Apple M1 or Apple M2 and `/usr/libexec/java_home -V` does not include `(arm64)`, you must switch to an arm64 version of the JVM. Fixes (hail#14000). Fixes #14000 Hail has never supported its native functionality on Mac OS X Apple M1 chips. In particular, we only built x86_64 compatible dylibs. M1 chips will try to simulate a very basic x86_64 ISA using Rosetta 2 but our x86_64 dylibs expect the ISA of at least sandybridge, which includes some SIMD instructions not supported by Rosetta 2. This PR bifurcates our native build into x86_64 and arm64 targets which live in build/x86_64 and build/arm64, respectively. In Linux, this moves where the object files live, but should otherwise have no effect. The test and benchmark targets use the "native" build which always points at the x86_64 object files. The shared object targets, LIBBOOT & LIBHAIL, explicitly depend on x86_64 because that is the only linux architecture we support. In OS X, we only test and benchmark the "native" build, which is detected using `uname -m`. For the shared objects (the dylibs) we have four new files: libboot and libbhail for x86_64 and for arm64. Each pair files is placed in `darwin/x86_64/` and `darwin/arm64/`, respectively. Those dylibs are never meant to escape the src/main/c world. The LIBBOOT and LIBHAIL targets (which are invoked by hail/Makefile) combine the two architecture-specific dylibs into a "universal" dylib. You can verify this by running `file` on the dylibs. Here I run them on the new "prebuilt" files which are in this PR: ``` (base) dking@wm28c-761 hail % file hail/prebuilt/lib/darwin/libboot.dylib hail/prebuilt/lib/darwin/libboot.dylib: Mach-O universal binary with 2 architectures: [x86_64:Mach-O 64-bit dynamically linked shared library x86_64] [arm64:Mach-O 64-bit dynamically linked shared library arm64] hail/prebuilt/lib/darwin/libboot.dylib (for architecture x86_64): Mach-O 64-bit dynamically linked shared library x86_64 hail/prebuilt/lib/darwin/libboot.dylib (for architecture arm64): Mach-O 64-bit dynamically linked shared library arm64 (base) dking@wm28c-761 hail % file hail/prebuilt/lib/darwin/libhail.dylib hail/prebuilt/lib/darwin/libhail.dylib: Mach-O universal binary with 2 architectures: [x86_64:Mach-O 64-bit dynamically linked shared library x86_64] [arm64:Mach-O 64-bit dynamically linked shared library arm64] hail/prebuilt/lib/darwin/libhail.dylib (for architecture x86_64): Mach-O 64-bit dynamically linked shared library x86_64 hail/prebuilt/lib/darwin/libhail.dylib (for architecture arm64): Mach-O 64-bit dynamically linked shared library arm64 ``` @chrisvittal , I need you to test this new makefile in Linux: ``` make -C hail pytest HAIL_COMPILE_NATIVES=1 PYTEST_ARGS='-k test_ibd_does_not_error_with_dummy_maf_float64' ``` I am fairly certain this Makefile will not work on old macbooks because they cannot compile for Apple M1. This means that we always need someone with an Apple M1 macbook to build the prebuilts. *A MAJOR CAVEAT OF THIS CHANGE*: Users of Apple M1 macbooks must upgrade to an arm64 JVM. They *must not* use Rosetta 2 to simulate an x86_64 JVM. In particular, this means that Apple M1 users must upgrade to JVM 11 because there are [no Adopt OpenJDK JVMs for arm64](https://adoptium.net/temurin/releases/?version=8&os=mac). --- hail/prebuilt/lib/darwin/libboot.dylib | Bin 9132 -> 132512 bytes hail/prebuilt/lib/darwin/libhail.dylib | Bin 206928 -> 424064 bytes .../hail/docs/getting_started_developing.rst | 8 +- hail/python/hail/docs/install/macosx.rst | 11 +- hail/src/main/c/Makefile | 258 ++++++++++++------ 5 files changed, 190 insertions(+), 87 deletions(-) diff --git a/hail/prebuilt/lib/darwin/libboot.dylib b/hail/prebuilt/lib/darwin/libboot.dylib index 4b3f2b01d810df6e79a25910e8b9a3b655aab543..ce74c699dabb50379002ecf255e87cc138c54b7a 100755 GIT binary patch literal 132512 zcmeI*dyG`o9S88=%&agV>@LsZd)Ts;1+uds;Fd<5UD!Ivvf#1^N!7zVVQ_Ztc<(H- z1$3&AbS0!qijitH{Lu%yQERP+v`KLks-!U*ZQ?5i!KUrf#6+uNAc5`gckj8=yF-bX zR88aeB-CyF$IrJ^NkoUTevr`wr4Z(a584{mI-fZUZ@t%4|7Z0Fba*yUanCcQL#xT zDG|w z!zE3eWiD9{Jrv>P_$b_CnL;5sF?)97Tm;JQ#1b)HnRWfUaOGmZfw;ChPdX+zujvaIh&bZ4ojacC0<(Ou(j;Par- zUEk%l=*|{i`%o_PrZf0G*RoAd6RBw%? z4x`O94LN#lpKHBRDxAEfoMV-9@ewX1yJy;^?oDzgb{wC2o$Q=^rGB1r!lhMmbk;dO z>+Os>1Rwwb2tWV=5P$##AOHafKmY;|fB*y_009U<00Izz00bZa0SG_<0uX=z1Rwwb z2tWV=5P$##AOHafKmY;|fB*y_009U<00Izz00bZa0SG_<0$;m8RR6#6ow~eDmuX#o z?F+;LAOHafKmY;|fB*y_009U<00Izz00bZa0SG_<0uX=z1Rwwb2tWV=5P$##AOHaf zKmY;|fB*y_009U<00Izz00bZa0SG_<0uX=z1Rwwb2tWV=-vEILb<`z4b7Dj(WfhVs z)6CN?cTs;N*_%yrOHW+IGdXUje(7gQ&tx`{HLMi3m_a+_QHFFdYwgg;r*aZa<0zds zGr2w$RE|F?7OU?4M0QKL)m*RFJzw^jO znfs=!5szi*I_^>2-tUv1k8!RJ5`~Hq@nbkA?K^fzPe|-k?d#hW@es%9fRCnJo=Qmv z8QRE~;$n%o&UuC>misd}+bCR0+);5~#@FiaDEOtnCzk#)zLv8jiDkUoUt;h2Wqgh2 zzN|-LcYTUu4yBu&Hu2RPTNUxPig-swyt^Wv!0|-RlQ>W2EbE@axw1V(7x#Xhw|Q}W zJ+IueNz1#CE6)DoioNs7d+xj~td!lvTXJ)(kfQJ!D`i^YRWapA1mmI%c*uJ zRnAwX&)qla`x1NS3+nm!x7*2?Pi;(P^ZcoU^MCJrJzrG!m)JYs`Rhwq1^)8E2Hn4M zecSYW)AfBf*5SyxxlyT}+}->g29?RB<=}YbeEao$>-7W@k652>cMEy1`#3ALw56*> zEm_&I+IhPX-QxmffNx9YEn)8vr8XFz*LuPE-0vI^&nx5IBzvyP4Y-tb$+sAsb$MfXir8J3`yxcM0Ba+{qo1eN!%s7X*^ftQyJ4Zd)u>_ zcsLm|dCP=jW;cI$fE(iS@dEw;LMBdB$DfX{|29fQvgRO{Gr4qAQ;FnNQJ%|HT#Bna z<0!{dxm@)EQ4>G#QmJ#6RIRi0mG|xS<8VZp$>Yb4XmNroxRB=r>u>l;dcpPI|Izi= zQ6jqI;5=>%adv*A{OW4EZX1vOEHH~(GMB7}9*XdCd=&1nOtFxhm_563 zt_G3*&78~qshPz`_Fvs=6;j=et*)U=do5>qZmHxrHt1NKBwfW>?iUXeh3th$D-r}C z009U<00Izz00bZa0SG_<0uX=z1Rwwb2tWV=5P$##AOHafKmY;|fB*y_009U<00Izz z00bZa0SG_<0uX=z1Rwwb2tWV=5cu*0h6BFWPObF~e6rF%aCU28;Ot@A^KrmG_(^~U zgA_R({AKIu6CC@g_&WJ+eB)DYpYAJm@;S-9r?_77(X-#?I-fy(D(K_>oX7BgtKP6? z@t$jlK5Fn4AO7?^G{EOR`=@_+!=B@T;2@u=-1}oc_Z?_GeLOIE@I+wJ;QN6ogEIf| zz|_IBTbJ;9S_e+8Z5iP8?I{J+8)vsZH*hvUJl;RZ=Z*K$w#ZMwYvW*K0bPSc=%hQE%j4euhI37xqh{- zf5`PYTyN{_T-muO^SoC|Jm*wK0UxwavKwjICkwFf;t2s009U<00Izz00bZa0SG_<0uX=z z1Rwwb2tWV=5P$##AOHafKmY;|fB*y_009U<00Izz00bZa0SG_<0uX=z1Rwwb2tWV= z5P$##AOHafKmY=XqLi`<$&_j4>6W{wKa%XtCb^|2uHu;-w^P6LGo@!T zo5&hgid)Q}9r7qcI+(R~Xyj8liKcOs&YPKBA?=Lj34H42*yfnZSZZS|lU2D`A+tG^ zFp??NVON((HJPPmd0K#mw)3U5jmYmjvQg%~DQm=IS-OsUl(rr4NzcbPR|kngMTz(^ zoRjt)JESKhcB=OE?TUDa<1~+&a(OBx9b{-DUm}Yo;yULUo>=bB;Cz6yB#ApJ?#uWg zJ>JjvWxN|pe;I!zXGs#vc<+7h`el5L=f12*Vt0MYnM1nSX%k=hb~&~xVip~7e@8{U zyCR;z@kGv(I8WwW%Xtdt%JvXl-1~Lj=Ee2(ymHSbE$>3EIQx$)_RcHsx%0NLQg#z> z$<47sio$EGlxc;NcV==M!gcPx_&C&J|H&Q`-HG$!)-FETXV zlgTE-F|&K&{IF*hgY}5c92-~S*K|2o`E}W8GPm)2^N%8bmApZDhiT61oZB>>=x{OQ zr*S&|yyA%4A)ATEW&LcBiBtOCM;@q|x9NK;>i+uZ{YT#a)yvQSc2?WLSs$ezedZTe zTQ5HR#J!I{z0P>?WV%%K=4-Fr>0i0-lHqO7Z2RHPcmB95+BfIdpLf6NXFGy5k3ap& zzwRh3-}lxd!*lue1Fx2(B;Y`){kFZ=n`!@t>3{CQ_-`FmsDt{EHLw7#+KcMG0udF6pEdmeB1 KU4PJdYySquYg-2Z literal 9132 zcmeHN-)me&6rOFOP12^>)ILNs?KU*pAF`WBg&6#itqI;}j4?^IpmEsUyGd4V_HOrX z(gqar(1J?{?1O?&(gzw8bfo6rG>8)F2qS#w_p)V>ZUr<0+cU_W=Mxv$I>Dn-h? zBfaH?wCl)1sR;bf@q%zntH783#c1UYCytuSoLKum}s)R$3Dpk@wmjxT|apnePi3YhQ! zZ1z(QYUNn|48!~ZZj&hd>Dwsxp@H)p9qfK^{;jj$zu%Yr<+qRDtUipk7*svup*Bp= zrffw^)2ETs=!-%F;ad1SjK%ddw^ra95s5{wNJglcGiT!cnih^92#t?y-aGHFkl$??-)35m48mYlzcHcJvCbX z+A9BPt?uu?IadC?RloRv)*g8qX8-lE@@DlpHL%Gl|A3%qs8uEqcdY#5%gI-guO{Wp`fR-8*cFTt zqT`t0daUv{Reqyb@64>%dVhjd>%FDQ+p4Uh99!$=bjhh&?+2(W^C-lf+qGJ4o?m8n zzN2-N)=gRu(b|ADGCBGD%0YHGj!2BgjR>w|cMKBC^DMWg}@iF6)lRGH zFkl!k3>XFs1BL;^fMLKeU>GnA7zX}t40Imvn&z4g4@q8u+u35VAYQl_tqkGX(N>r2 zGL3s@T#Bz$abBSWrwJFk;9)x%$Mv!TFs2~9aby8_QbC?+5cL7g7H}J0ng#SFBA-bu zrDV>N^QoL8^Qls9DVuTatQ=S7Q#5TyoEG~S=4r=GryTL5KjCnbsC46u;~Gy?lvKKT zPbkE@P+PV5#pez9E8y9th*)ne%y0yq;wXauAGE_h5yI?00j)0^_Bi`dQ`k@cm_8SI z0yg~v%zpYup%h~Jhq28@Ej-id01o`)8VBo>!06^MB|P^EWBwx|j9+iSR~qnI1Lk9J zZJp4=(0ibeA>Io&_Qk~B_R!z6y^Rg+btob0nFhnpF&HYl>Up&zT@&ZMY|%?xO6BJg z%&P?R%rCrRCb5vh6?-Nj{A@eNi}p;RAok{!Ud%amBIN4sk&5u+?J56#?$jQLzX3Mr ByN3V( diff --git a/hail/prebuilt/lib/darwin/libhail.dylib b/hail/prebuilt/lib/darwin/libhail.dylib index 53cee1b2e416d9b9e1f601ac0741319eec0e49fb..da01854bafb4f941428fc8a3d7bc9cdf8176ed44 100755 GIT binary patch literal 424064 zcmeFadwdkt89%&BvJkEt6fhOkfRP4K$SRjmZVAc449o%w2(6~z1%)btunQr5Ky^R5)kDqs}ev%0A+vQ=bV|{*<1kq`M&QTZ}@!3 z%$&<}p7TA=dCqg5bJ?xu)@`uaZ0+neTL(PjY_<&ijW5A_H-vS;6W@7A(TG2`ZusB! zuML4V1lka2L!b?THU!!bXhWb4fi?u%5NJc74S_ZU+7M_%pbddG1lka2L!b?THU!!b zXhWb4fi?u%5NJc74S_ZU+7M_%pbddG1lka2L!b?THU!!bXhWb4fi?u%5NJc74S_ZU z+7M_%pbddG1lka2L!b?THU!!bXhWb4fi?u%5NJc74S_ZUeq;o;pZW3}4*uJ5@ZZ)R z|DE_>gO`H5?!2U*;BR$1n{D>&>F!z6c~I=14BQf6{bzhT5JvwKjJVmey>pj)qY0bR zOH3AVo(A!x?|AVl{>Iz#te>-IFY)^BSP}_pN-w9E;H%MK&-^wsBpCFl&h$^vn>}yt zeT(KU=HIdPoslF6%_5-i&-@l&_}h%X`S;KF&R%%;-Am?r!DdtbN_q==W*zApFh~CN zioA)J*|YClc>j_pfKBNI40Dy4`zw zG@vPe#g7X9BqIrZTj@n}inrOb?^qJ;{mtvk40=(1t)80&NJiA<%|E8v<_ud7KN8diDo*d;L`3%x(Y>4~C%Hw6o02Hm%OrJ5BE?rz zIlSa8Jkb`4vrkFj3`ODuS0c`tv-xiNH^_NaK5s70-|1tHug`#$<4bDn6~{vXB#Ma- zF5L;<_f;HR@}a%rU}z62bO;ss$X>BGbP&w{kJs?@v8@$Bs@=i;S3FDPPS+yU{!c4{ zsM*2&TZpT=2v-NK94A%;d9649;96Pv&77He%QVSfc}~%0CQ4cdMVp=A+Fx;cDS%zr z^~`Ckv_|S*ab{tUz2#*OF6^@heD>IfKXny5P_^>1+xPd`3%>V)w{I%8>_1tt2a1e(4Vl!e#T=zK~bOcK#D)Y;}Lb%V?RkzyCPA$ z&8Sm~y;1USZYPa6>3r;-a5$`}GZPhcc7mjKxHa>anX_{?JFm*dJ_^Va)h}~K=5*BY zi&RB@&!bk#>ULTEMpnO-)M{x5lhhg2lIjjf&R1)svP1FsF5Qrf250g&Hk<1Hh<_;R zF-e zD4kR6Ta*!-d@oA=^3+>1Z_CUP5J>vy+HX;TO|Aed_}XUM2%y5uiMOWhZ{m|AtJ`FC zx2%3Gsks5E%3Y1>*zl~Or=nknx}$#n&*~*Tx7vW^DC-B3>ShGhM1rUkbuU451(rr- zfB88HFiD!H#)aOo(DC{y$W;-q|L#aFsp2&}Y*$R!4#~f%oigIk((Q_t6OgqfiL!R7 ztgT3JyC9@OP;UakqViP_E$mTQ{vrf4P`+yZu^v@X34IX~T6Q4`Jx&sMf{WP}61$y> zeV1b2Mq+r7zPC{PAs&yY-HLrXMeT@0?KGnhyH)lF+IdFQIUkz~++`J9t1A*@^-?rb zQfxD&L$#>~ctic~gxOGcIN*Q0(bmmN;9G~Xe1Cko0`D=h7Ih)qFQwi=`c^dYmSAneQ^g5`H=#dqo<1n3Jj z)Q~}lgrc5<3PgXSwtKQ$BC2a`zb6xT$mwHGc5W8ci9$rS{KF>24a((T@Oo7c%UOYnI zVTwP@;}LaGv7e@>-I1t|%&0?({R|(}{b#RmK6*PCoup>7#7U~c5+$5m1c-mLOoQ}0j;!y%N~YvdDUjxU?#8H zL=Q#FEmTG{Iv>kNUq>%5lKfTeq*aw734bUf5Rp^te9R@1l(ILDQoqlwVgIBTfB7tn zDZuXi%w`K6WkcxkDeSkaPSP;^6}JpFA^k>h7G?_GqV_GUclE5KjZc)+35gYriR~*I zyLXVZM-pJnOX{w0(Hhb;YF3g|m6eQFTUAynk%6I{1CyXqQft{?YiSfe#nv#gIU@Ex zw>N$a`dJ2jDVBc0PG|nB;K;OVrcKRMcT4I?S%uo0l00H&YOY5r*y4pi8P(YqE-FC= z`p#SxwnM3IpqPR}V+H(Zp5eS9LZQ&KC#zY%qZk{qAxRv$@~we}P%YfFiIDZ-z*#rU(J1b=)1{K>76G{@NP zb_fUvx*H|EAPr>j3WM8?26i*4lr}l0@ek(w$GeOEA8D_roIjvG^}IfgX-3|W?wIx(`gydyrcLGDds(tn z(BSPOVb;S1oq9o&4lUxApiR*cc)VK6r8XzHc|S>L6{8Kjmi|4Jy$DUi=HO36o`00` zrWxh^0n&>i&3`|dJ~_Eh=&Qz0W9svx(f_SMe{~D=$)7NNn))H;UR+Nt<9ccoHBVQe zdJOoD5A{H6Y6*^r@yVI$FQJyWdUNdz7X?UxspZn0AELc5v0{HXEHoN)Rx(;vQ4hJc zgp24nO{_ATC2NzDq0lnby{-U_DdYwTcKXh0?{WE$=^qd--uVwwxUV82xb^>9bpUekwi928Ruumuu zqxqpaOMm9xJzkzk@3APofhN7bv_wxg=skUYdc&ghMw;|)Zi(J#gWkCF)BAi^q`s3( zdhJ@G_azcieNVu;;QzRMtMyV@9UQ;)!k^4NdV2f~k@h7IAHJosg9(zonq0gw_Kum} zh?mRix+r!yc_ZUzY>?Z`tts=`Nk!35G?ZKVj&a1@uthu4V zb1#tR2=eSk)+EW^NWA4o>#wmU$RA6y6Na{X=0j~5w?u{r8L z(7Zy?GQimp=v_rUAz3wPX1I>&29rX!8u}vQU!H0q`LUwSO=KV3Eh^k!;T1j>A?s1= zOomNwZ6>(Rv2bmM6QVKlX)VuFV|ufxK!{CFBAaCtdaa_(LS4*4;dzBM>roFjFZff| zqoslkc{K2X_Z%Mfi1+*>RzFu%?gm+fZ^E4n|2JfgjMVs)SO_!2H>W~c63szrGy4eY z-xUZQ<@!*Jr!lmjF=(fOwn;aZX4sb+_9Zu`{7*sotMj+_5QF}33w=~Dp=lXuF~vaH z8ZFP3n8n2U^Qk_3{A!pm&+fabA#1ALm)ek(ZuhwwvXbq-pEYD9*nL@|>uiR-ww%?& zQlfO;J@}l%4)V`*yLUT2CAO1)CfmK0_)KGD#t{kcdVD4cGQs~K(0NrJ^;i>6&V!;Y zOY{^p_Sbc%9F1!3<*F_8%_K~}4L(P%RpVIK7yJigvak2VITNg8Ewuto?; zX2bYIu!_KBHVg?v9|%g8`A|eYF>#=?f^a%xL8XLys?Sz5&cr;YJH}nzu`(WGA4cM; z2}#7egtET_Ny*<>@AUr#{K{&DqWab#px3eDVdCtrMktC|P^zeN;dQ$=nbex*D}j~94j~mv z>5l=Mvmxm-kn9*+21B92U26Ao#~?cfQEK4zvU|Ho+InK1?L%N?RytYu83c}^FOfi6 zQNLoVJHfjpV9c6>_(c@o;HY!kZFVo_b)%2qZ38z(z6O3x(MW)v&_DeJu8x6J#0?Ye0WJz{!f5_RaPztg?8cEQHCwQ=y{RzV_rSk2Q^cAvE zOLilV_6}CmFuNY_)ShSvn)*8Zhq*uqgnwd1ctllJ65?z@Fa82hC^jqNyI==!S49)V zwFNaC7O3k(>;cqE08Y*N8oq=qJ;DpE;>~&~3LRTmupHbd`dbg;5lW>s5&!#W+;WHr zyEjr)Awto(FZ0A|ZXQ(Znn?SeVeLe5W5DhamK7=ON=s=ZL}LrxMG&Ijga*Wx6Bj9` zSd=rj2rl2%Mj?v&xuTw7e?lTuX$)j&J&7_5%~DB!mN>$^zsRF9mOhfismSh|(!i@_ z_sz0M*Kf#f5_#D+obYt64ZK5h;Q?jpq$D%qoAF4rs+@YO#TkXpQOs^aeuJ)e4MGap zSPFso`u-gHn@QmC2&e)V4AWA!mtOl*3-Zsc^m!T55zFkq^wkj&MeH@aIy+N3S*B=w0vZKZ`IGMxvXNb7yd&>)j47Zde~j#T-$z) z-P;o`+>`|8+D;8+c3&@9t(JGN&?T4J=nyCve+AZSzgxTo157oA>+2;v&=uKp9|B74 zdhd5grMdCnlL2OlaY@TylKP~B-VufQasj8QIc6fU90}6#D-eWF^8K;5>0Nu4YHfhtNzx|533PK}s6Dm-NIQZCqpk*aB~L7& zOuYyh;0?xDF`yuizw@CNLrO*>dIL1UvlQI0$?nCrkGCi}iYde+)JfWL6zY5x1qr6u zq3cKiL+mJzs08I;E)>{MKro5GSrphuu*}<1-65?yPIF)C0*I7G6wITCGi$3nzpCPA zh;UV_Wvh-CPkZjVZ1qgp(JuZ&PJQuA{h>7fnNAtbwY%}Z!8z^(k4&J*hh`#1zd6mh zb|>P(&T(fl3g0RL)r`XRui?Q{##a$a_9bhbWzW=7t&G=JDZd}KP;Haf?hD}441QQVt6!zQWhXnLePthrnH2HCeF zsfL^<)!Im^wANBdu(ivMUPsXn&CJ%vrBPYnsV0iU+k!DNBSh-d)`)?zcOt1$&Xa0g zBvtZxQoV~*>ejNOeOhWfl=WXDJ}E+@8BXhsBt2S5LgW)EPE@{G-8UdVWMHE~rZq_d zGXWzBm!}qltt=StKj_qNnyEjS=08o%xt$Bi4&*IE-oj@$;jh2WsY^5Usc44APR_NP zoNKo@$5j{Vw9HyrP>#RUjY%?Dza5Ztf8()bxn#$XNqY&5gJF`!)mmYk6y6*fpr|J$ z|9eiDQ+D?0cIq>FF)~YPW+GohRDYtV55RAQy*6ClF%ic+l`XHa*~V4I--z@M`)SH% zlSaC0d{<&nClwzvsIMoA?1md)>TRPU;1&KRNM&p``|V2-XYuN)xVERXBzQ5{=WInQBW_}iF_fUz8u18_|dWLuB+MYLzwA&2TGjtf< zC9UUA@i0`+&>nbaUlDbE=rP#m+_^>!>_Zsjy%*)}Bx|=Mc(kR^gO~!4&LWR#16zwU zDB{mQix%-$yrYO`@Gy&5MM0ogh}19z>p9MVvw{8N1A_31$4}8CPSPfkuBSSfx?a3z zk*=re0QGw*tuQfA7|f&o9_x5s$Wafo@^qT2J-!-h*X|o6>l=Sh3RoX{81^_z z1~25w83gKpa%M8H^bMBvm3;wWXppRJ)Yssj{Q?m+&|^}RmyI^UjxRm>+<4jlEoQTaX=3UBwpX_LmKW{3tdE$$ z4R9A@=T-?Avbk-CvkT`IyY`2MVeBSj*`t1LncRL1*3e#JR_iNS;WA zW<$9hYlJUK&g?y$8A%)Z39c;)VS#XH1zxzWILdAY^W&=G2M(UMVD`Am?1Axw zf0{YVqph41>LB~8ZsJBjZZWMvyt@^pYh+e3RajxPe%~wRzIdvZ!4SqAL;;$UE;P!hnE3>bRhrs0)W3m-s527n!=KdnD9rDa5 z0iG){2SVXkINpomDavd~$-QmhlN^7v^$&uJfqhS)3GqMIjs*?w~M$$ptXS2nB9Z8>k34 z6m0>)0eA?Jf#8@UGI#OfCWzcKJkoKf=GhvTX=>#eCDdnTyaPpkaA z6^HSlJXB>Qf`Vfy98$w}Uke4)1Fr%0KE2X>H^9p9>g*gsb8aR&-c(NGL=`Dh9)`|y zEy7CJ0Sdb+6d!6A>cwM#5iE2lo^pzDa2k@KVkiC-HGtYZR*7MOuV3W^`=f!P^Szwl z-Ctmlt;P8twSl6#vEPim!@Dx6H^lhp_SX~n4(hTbby*)tpOB3HRV~2_KaG&%p1P~i zjp3=|CQUctrOQfI^gH{8u7~$75bLAsh8l1mT^4L_a?mYFR@8e_70nkw6Yc`KVHs=4)_N++e!K-p8A8 zjqL?qaE*=QuCc#Gf;Wlafenr#o(Zx!`geFEEjoH7CF4G_pMb-HfU^+0tFfCViN-+a zHJO}Y@bGz|CRWUXiALLO*ws9(cc$det+6@tb)?N~=nze}!QLLD(pu0!M|mIRW3Yk$ zZC*C_B$lFA)v!3wkXDmT=k0*<$bo}s)*q?;E0Q3A4wU3y(Z}X2I19-qD0~a36mPtu z-8&0BZRmtNtOGL8(0>`W17(yF+o2%_y&&1M&^lYbLS3{pwyOe3^`Sblv5u)lq_6*=otR!D&etiHi}^Vkqe!Gd?)|?DenY*NCdEv znA|)AL$~rT;k4)%BJ1ynE>z9|2CR#pC)?#M!WNw0Jrhj+hx@B{GIs83Eo0$ z?UDYJ|wlpMYJ=xOK71z7) z;%YE^&2uP@Othv}_cI`l0bu#h(HDs%^_ia`ExOnEW|jsVuSK4M!`>Wc;orasiO3w} zC+{RpD?Vp;;R{2o$Nm`^8sx1D4L7t<9F$NZGNH{!mt2o(9Y&i<>KPdrz0m0QKSQH2yhZ&;=;|Q5MV*hY8dx-ecN5>Pd>73VE_U{0OjpplF(f0FyG0&}j zfR$Jb7lpF=k=OwNGV8AcnGmwWYNPtvGI@`3=7X}0M7`iLJCM|8Cc~XKB^^w|c!d<~ zb;EzXCyw$}L5GAhNw}T)e?_hacL}VvoY&x+BXtTx>?Y18Hcz9gPDfWuK8k!=I!~zX zqeL?T=yGRZ-$GSoA~i)(cn*dC9pQ@Z_=X#9V34I8b{283YaEV1>@?@Y<^Uc5WRQEm_iCvsfQ;dK$yt^~ zJ7RqBD#xKWho{2guVrC4;z1E@b_y*~$d*d@)^bWOChumRck~~wg@kOUcYpajOLss! z&SFH3s#3&mr#k6FlW4L-+iCf}*QE&LdNTss>)1^bB<~7jZ{t>;rJ6sw?!-fkIwdG9 zKh~uKjm$^f+G7+uYf2*q41a0EH|F=81~iZBLl#5Z{iskJhk4muXul$ zK5gpM(mly&hH;c&K=~|5%TJ*IEXZf7@5Ae{m#JCO_gmr8cL^toVQ zqzn0e%e%*?$W`#$gHh=5u=i5*TzF@plG$J|6uc6>7GOZ!*V6=m7K>TO-dH(x7qzc) z{{}N$OwTXnQ(H-!2ABD*+-1I5%xocE2iU=PsYZNe8!%_KMknbGwIp2SwED!SDa9<4 zfY4va6CF1nJ$79JxFZo6+Xtey)>o3zP|4@KzS2;fv;(uLD)ur;ajug#86!?t(8L>91h%4TE<+!Z}d)8QFL4`-=P{*v4=smX@7ye zhZDG@TQ6yg1-PsKL5Lj@w)3Z&}<`#haWIrr_w=&ix{t8e9R6o&iw|dx!{=N^{H~wZjMI~P2=@eG?`_*UUCL<5j$T09rM2b>+$+? z5V`PpeXHOtmmc5^mncWZ>wTw5oLd^NH@;16?mqu`{fCi1!gxJ}O0taCl^OWH$npA8 zo_BPpA0*@|dT%yfuc;$yR23}Z^VCD*KYP(qf3`kFNdj5aYECxk3)M zZ+J`N^?Peu9IrosC`QNY;l%U3NJ+Y(_N|!lnxFq(^@Z?v;LyuwG(dEVlgVg%VFpf^#yKeVgDkcbMr(rc8bva1S{0sMI_vI&}vf= z2t#FcU$9pRwjeUB2LZ?3C!g~%Y%_PCbkXB`7t-my+_KOBS^XdOg}}|pccpxcjC?BM zt}%9t1aL^O8k3e{K4O1PPP;{@d}d|*RVd;ZdySCbMf|#15o=JyKSI~U7O^-|M5$54 z8WaJm4^`qm!R{t`j1J`5Apa=j45@0_UuvmCV#%LWV=22Fw78p#K5JNKe1aBIkG=^o zNP5qoaoy=bU7;Z`!rXifub(+9E0VP3eA`(Hd;Cqx>t2m+>BT05ayY0{0XpVtVQ0a9 z3}T@;x9c!Xguu@lNq?3aLDI!(9G#=juyu^gV0WORe&!Y!U>=P11KfPNca5oJWoKD- z3b%eQp|J|B1oM~L$_557?%|AoIBF^*H99$&$};WA5A(EP_R$-Wiq69-4k~&RsX;wj z(fKKyS`k`qHBC3$*UMpypNhI19>L{liaN*GZl{7&LjmC|hQ4&Xu}wr@AFW&kS<1sZ zT;g*O4$+&(YyGVF!idMko4q#*>9rSlWWj#aqFB*hCfSYFq!pvT6R4ZL;B!giTaLs4 z78z6X6!mD6phD{~+r$w^O~lsqM1zq>bPanIuZBky-$m>gwdHj*EYAG^Er@51@tnb* zFxGix*{()(`&(%&HsHyUwDkl}D2)%$kisjt6h8AhC}P9P<`CKcp>gzQDo+xyduc;W z4SRr+U@!&o(G9q&fL)6Kvxa~nkw=vKoa$X}*sA8+%7nLb8o`rat_80kk ze&BfjE0Aw8-mm&%4IuQW6)Mh-yd?n;Q@weLEygv}h}p1Tp(Cxv{-xze8!nm!>nJm- z`(7bALDx4=rR48-KI`5t-}yd-jti8k6X>RfQwv1L8~th@JNnClVzw6^9dr%$Q$R|Z zNYQvfPXEqk3aLcV=e|Zduhz1(a}fE-8;AqV#_~=Hlt?M07(BySe2#S9!@vPLuMfI~ z57DFnMt=zo(Dina*1;?oI;Mubb}c7nDwsS|!3<#iQ4Go`Bn8tOl0Z9)NW{S$Sk>mIzo>p{x2;)msM?M(b4lbLx>#>xm=e`!g`q3Ab5$8@&5F-xI zr+tufF%)2eicNXo7Z2(t%06qs$Ll-S5<|l8m zBDzmfXBoOr|Bqn}3PbySe4<%zxg7oT3A{r9XK~4Ufz+KT%%r7gtmXR~#R(~l29%Ib z0eFXC>tJ6%AyPihMDFSfYC;=IG|NDTiB2P^w^^nU)b?y&qM_eP*mk@+J9Bjd#{;e4 z1Xw%JidZbUdF=o8ST5kQs9aAX4~QU+WDmA#{YwNSZM`%_;Wq zzYKRr3;fgh61r}MU#9adD<1+S71`%b-eky;q8`VI47Y2G=|KtVNBMN{!t}hY)60AZ z?Gtgz1b;>(oBLy$@RxEvdQjF;dPmU!7pC{ojjh%<^Bsszl-~0fMKAw+^y01bCSDZ1 zA?Kq9`Hb>+hFwg358rT}`nI>y`{zZ`d**!fI#}ub;-ctfpN}5-uOcGY>7wX;?P|5Y zR(W1<>>}m)rPk>+k>|^a=c5-R&-=c-nELiOAH5iPe&V9&mBZ+1MgL{K6C=+*yC{0z^U;fu=cBauA*OvL z-N7)Cr9+eAyru3~in&hYJK{g%3zfU*09!tQy!dT{m`RsA~L-t`#ws> z6bx<7ml{TR?A40B3{io;vi*!~KjA?F`zC3_l#HUmBY$=9dByqaF3G+FEFmG*H@0JY zG%B;5c%;Lq7&dwB8iS|FzAs&JV)cfFYoC5ocD~wPddH+912yNXV2d_yN*m3JBJAN| z^<)$b*hTF0luKvM{nON{-SQUMRYP1|hvT(4h${^MdM|D1C)tln_S1yyIt$`kV-VjO zgZQT4T<|4tjPUiYnGyHh!KFP6$J%Z~1E5XOnAyW>aNE-5xM3;HgAnF%?eI8XeNRz$ zD6TCs8dY{}p#TrI`jve-*yH*DUm*`D1Z4Y3c<3kVL$31JzXIzG0~Px|$-YTeLE@z? zJv{ck9{cf$T6TY24iuQI-PXTRQBUKZAVnXQ?y=X(NDA!LU6dE8_sRB3yoLu#xFE^- z>S-x%3@-hX!|%)f!vWakHJFzOdrnb9XbA`ch>l5^tlc#Rhmvc7SGYHt7bT8K!%U}I zwg*5j&@0+T)P{;mQd%uziQDxJrA_5L*T{tXhtfMHZjgg56E$T_(&V@Dk>_v6$S3Uifp_{9%36s?i`3e`w0Nqe$HbrCo<`i%J@&6<`}>GJJkVp` zq1fxCIPj?g2%;9iN##n99UCI6GL#XyHO_)d$gFqmjOlK>B(;V_P;u3ubD>C_&?~d_ zKSia@^N8LQQ}G(beiTf5T*pzfZHnlmimP5R`e_UeE^}ZBUBR`3s7WsIC5kr6wayCs z=i(P%nyGcmgh+i`b}$Zt8+2mfu9z02DuwMN=tb_;wRmt3gxaZ$zN2q+5`oM-(F_B1U?YS&j_l~mT<^B@Z~lKOZa z3;>Uwo1v&!U)Go56z@=tQjmUu;^DU?WH3qXLT5EeyJ7}~YeMHR+5Rmi;+S+-Gsla_ zP?eujNYVJ0iT5Hr(M*`2jZdJ0~mTTxfgys4TkDyBjm@nB1=3}q!5J0r5@;XG;9tg5W#NyvaHh1GB;SBJk68nTww zG_L^>et1gqhw{kUzYPK8pm_byn5mif`H=qSdu{+j{uY7(R|ZYau%LkYNVq#9QPEf8 z$hBi35(%azBsEN!#a*B)=CR8R;_rgEg^6N(8s+o(kGQ5|C+8q?e%HK@GZmZi#KBqa zSb(2&3KHh!mSC3zW|Dv;j=)p&6g(e}OU>5qi%Zb*;|O_wW7szln>_-^T21>`4*ZES z49%;?13|2se*>JV4arC!zA3%Xy_?5eN8D_1@6J?3BhqvHjVn(-+y9 zy(`ek`?on*hzg?#@-y^;oiVXjm4>>}42!rb zK|D1QeU-4g%`aO0{D!hpjh8dYUdx`uCp$t3Ud1GseL}Hl)X!d^XBA6+CmbgKT4X%O z0q=Caed0T+2mD;L3UD@C_57EHi2+djQWBjY8-T6CQ-W9%SVQO!XuuijM|z4CHypMI zPo1NTs2AH;Jq2GnfBiV9Ve|53ON`Brnk%kI0_Cx#^POn1vU1J_{xWl{dY!HGVaDyx9(X+lnw9ho8;V{&)G`v#7-hfgf z{g3xOHXK3dTUS$FzQs3AcnkOi6EXZuo4S#75_^wwz!`$3xADwFH&BUM&y_~O`w&Xn zu}OWR@?rW<#r?q8-t--8#moo?*mp)34A-)ae?SfN++<1R8`Ah}egXE-zK{-O$6ge% z>4=qRkb(!}*E}d^Dl!u;EAffc()LnvMXEmzgab};wnU06rN6&OD#k+Z?W0Kxy zkmNs$UHaq5cE?@@fQW!zw1Z{Y<)(W%V4h#F?Jvv;xNyikcCD>FF+_t?6gOJV%T_-H zR&-hc$2c&t@cqVErzDln_rVoU)>DSlnXe~6p}}3qJM7T^ISgRS@xp!MN7;NN0GQVh zQd|bGqZS@g;@5i(DvO=Pr8G{k`+v_*BVhR!-a=e-C*S}sd>|I5OXvieSU_v+ZYO4- z8U<30WE4s3ey}U9AT}V16*&A71uBJR1LR*JxB7ssv9N^(M^hMyI-o~bY1Dxt0cs$MFQL`er#1RXM(|sDLujz+k z__+#>3l-j;ZZzPy3>E@Jdm=9k#sSMApqk$qV9?Hati5vcjilzJMQS(@aJ0IMA)VwG z#zbVSB-R5l9I%^EAL7hd79@o~G(XNj=RUj`1JMAa12W{tjk9|%HzX!NkcmnV*eMaZiod>0uj0hS^YkiCOgx2GtPf&)I8MN$E6;}+ zQWN?)tx0GFq+02CfBrwfQ1DJbDZ*)ug@?l)wFPOTj|eB#%)`;%w%`JU<9G$d@x5KN z7Ff*YJ||2MqEEUJ>3u%bJC8CJ6d+@?Cq*tDpp^t3|FVcDtVXDN2yWLe*^ijdQXUiKHDGmQ$v!rf%CFwz-e8@eIh z*wt3XrXs<{PLu=IvkM=%!UnhU;85f(H-)Z~wGE^LL2U~JhB*W^-|JXoS%(1Hlph>} zKulZaAkn)B!yu0RN(aGVgJYzPPL`kt^&E#c(l0%)#S2ot97*{$Qi9xn5U`al+1@1y>kEv0|P&`iYVyo)V&yj)*!n24+t4g|qKme-y=;6%9PS2qgf(qswXog=< zLVa`qVH+;Da{F^ct+25M4;pu9&%w9+;l-^U`zz&pf&( z-uclzDpb-OBPlJn>-oXzhJqw039-*56>n@GNK#>s5NUB{j}t*3o=rhIx4hXO@E&s9 zO_y6?lu5Qn7cw^U&6;NQQ|dXhY|B#|;U@D3a-iytSrL{xG(27c`0T#n4So!Qc*$zu z?g+YLCu^SFcWVPK?DE~kFZRM=p=7&nDZkijr7=g5w08r@$9jN0g9P2!^)6T0Venv( z^S*{aq_MH16bRDZmvORZIor=ar`o-L#OECLKL1R&dw+{hiLK|K$#(A(_)KH};hzb1 z?<#znmxBEsf#N)1bbjOiLgK%C(tuw+!4Y~zzu#^g`&~+YnFL%Xvg0W2^`cXZ#~C?p zZlfJhm@T>k-Za-1`Z=2SB>!pqvca&%!&2W$+9@Sz@%1$AS75>7+NST|HxDjFQy8DY zU-9=!X~dqzxV{#5xWf(CaUXnV$DCJ{vprq}XXs4zyInz$B#B!4VK~TO4i6PJ(`s z)lZy-Ay6zoc;easOr;ufqo`*QLQel3Og-&CXqP&cQT&#Q6SH>U-ugr(CAs3n&v#Zd z4%*~=>@%bdVs_9(znSepwnC&mx+s0?IqLlodLGm_#iD*K?WW7*h)4Rj1#jPP4Xi_< zK9uq@1L0u=lIB}Y5HCQ1uARY$+fl{JaIucR4%MpW)vh=*D^m4BI&Mj!yPH&W*)?>; zDw!mCqSghyVv93B7qyl&yR5p=k^_Q0h`2u-=X@G^dkQw8Y~&?VCdUOvFw1;|0cpkW8-Ap)#V2d zT^k6wxR_ZxrL*83?6(c}qW#}7=pZ?v11x@-ost}7AF@bMlC*2r3q6u8!L&1G&e?F3DF9dkRnY+kZB=ZwJ& zYWZ?v7e1(PAw8?u&t7V}z6pC;yGp8}RaKY*`@OEbm5Dam5V^wGT$QHMSd+-@))mQY zAwEG-3|+z7>5K2nG1o`={jFzdjUtn;h}L6?Wwf*42*i4#KI$7Ph7G4&xMfm}Bl&l* z-uTAZ?gT*KuY+IzT>O%2Tw6l(EH_c|bSJ46Rapr{BuEGB=+;&Lp>NTPxj#_SZj`Fx zw^1~I&ECe78t;jg@Ff23FN>E(Y(;ZpM$ilEyOMS(@^9sF(6r%d-k=#zJ(5a{Urmf-!(!Ms9s8rlC(xfnkNQZm zM_roA?tcts-zCi>&^b{OhOaVoFMga6*2@;!qD`lO_!jL`%!SOt{f(bQ7d#RC-|^qGqe?j?qDoj3$_ajnB&K~q@q9juMCs8)Yzxvv``C9xG=e8; z3ZtzmDJPt4+~4p?Ond}yTE7tX8{ocx{Z_(UwWOxqFrx0nHF<}jn6{jhou%({^R1tZ z3;M#+)ub3qj2yuJ1mQhR8k z^%!2bDX|GBKN-Vt;(-)mA%+Z!Q4qu6T|^ii`)k8!yAy%jVg*?Gy@sv!jh`fhF7=@s zO&65PbPNAYus5eB!@+HbiIxFJBfs*(E>&f6)klG_+XC36ya3B^^C0-BH|S{it(P#a zr#OxeZoH6XsoFWGb+Fg6)q_n8wu&=}lPu=iQDh8~#^dH|^Y$1`RiGX^(p~L^y;jZb zHFET{YOihbHMiFS-boi@uk{De-o8E6_BLE7+7_ZCAPF(v@ zOl_BSrS6g7QNIc10U=z|M7Ehs5-AtFZyIM4_4qlh8)sibU0j@ThN~~ayi1Nava~Os zKc8dIH9|_Nfj0B3BK3*mp6Ih9jx4Kn`k_2Ud&XF1Buti`=PY=XEVEIqS!Q57s|CAk zOoZ!*T}HL%b{SMf`9;}f&*y<c`M06Sa5-6Rm_sar6Ro0<31@Q%wln0olek%~bG>SJFP7 znu%%`k%Jp`-ce-G$@+9+;3J=dEV_QAG;q<5sl>dUOob7`PyL9_NdrGO#7d-aR>Tf# zvPg=_F&42KNV(Rs2Uk+BtzvIJAH5&Ls;{w~!GoUJY?$mopv4Z89sdbZ(P0w(C}x~Q zg`vu!X~JxH>GvEi$AZfrJY2TpRiyQL>&6OWxIF%d7%m6$M73-_zM(QA^Di-8jxN9i zfYlrn>+xc>_6o|Obse~dYF&mVO~5k5H-D$Vp1bG)R0kkU?EIPNMev;}tM5a4$o7z^ z^SqA2n}8bs4GC7F?C#R9~ zVF;{@2???~KErZO42lo_4%~r9C%<6+7>B=h=Mox+;P0g5Do(3kz@U-UTZBRL0&-eg z$c(rsgJupgH#caa_J!{FjsWG}HrOkpk-Q~)WluB_`w1s$&IvsDI-k9AeN4PDU(g-9 zqXcmkeKNLDL{Bv1_#A$J1^pYp1pyPBEI#f6edb}bq19URBfv=F`-EGgP30?6-aQvn z9@eo$c@CCJqz%{WubP(I*;4Mup*>CR*XR3ln#$h|L?gCS_b^z3?R!D_OXq3-Z{%<0 ziXSR}S0LvPmcP1(E+l_l|7MZD^+?{5{IQoKiP-NsNmKqlz}NZY@0FPNh&&vL61-^n z>j6?R@)t*(2$8vv{4Kkf@+|VV=0U;fHVDV(5iIIM2Nl8F$<3_RgNim+;#inWp`IOCgOGe{gnhKr5t*0;cF$Y2H+99a<88w>lqbK2`*D|`We^{ZT9cU@~=x+~0 zQ1QDDUt12Uty@m1vG>{WC1S;vifeY_RZ)k2$R8N1as1~5y9I?|Z`E*=KKiCVVhGoV zjza=Tzhc_$G@$QePlA?VW;@Y)}aX|zZ(U~Gmw+LyN}ki*$m2p4Ofm!5NLaeb*^=cL{j|tAl9`ViFh?UI^d!ozF0X5fbKoy3bS?H z5vz~xOu=s;IP?K|jP=oHK1IND2q1=@26vJ1g!iwL*z$#9K0+!K1LsoDckl`cmbARo zID#HKSr3rD4%dO&zlqvpQR;^@zs?fHK11Q7sMz(QSU&|^Wfa>R#kMm3Psg4!>^L{r z@E{l1n21S}N4qD18jwbCT)fexgc!;e+7OvsY8uvl+egS&<0bj4sU81`VpdT%8Wb;t(RAL$q)jg3o z)H(@q7KjZ==wuDWE_?$EZtaR&jihf#QH&1Op#mDX00YHT*}QJNvO6gJL4=2%5H$@@ zz-<%&Cmy;I4w+xGj$o)|6PE~qj_I#a`H)8Ja^@d{da=s(10Y9&@;Sm$i<|Ljsf8gZ z+08@gj4$p}jznM|Vz`Ks(}`-W=OrygRFOsI;1(~owt7=6Ca;^Dyto;GRxyrogZCKw zd@<)~L#>4;bfbtoy&@u-bhFdV#EJaP5%D7J^Dr{9D}feCS9?RcoLHSl+#HHaKwR)k z^1pL!rBE0nZh-RP2S~c|Z1@*DU*LC6(Qh_FQZ7dhf|XxPhxLT~Fp>k869>$?L;yqR z;pU+N-_4N-u3mU^aJlXITJsp{yB7oNQ>Xw6y(dBetdK-UaGha|u>w9M5&?GTaH}EZ zDIz?kAzWK7+zwxV0$iU1JJb#z^QDke?D5XL9rjRM9^!&@vA<}CatfP6?4uoSTV$y3 zSbYfpD1OnmCw>#o6ffLBk;x|mkweFr)vLzsb7JTUt{`GXl(Q=}s-z)D7{AI#KgAnM zpZb$kpP&QMZHUi)hY86xIb{QGN39TSA|lxxW>C!t0OYMutii z5a9R=ejPM?6T#3ML|cR5*#4GapdTiSD4y>P!+*hPEDXN~JvI{PSYf!XBZncE;;uoQ z1%~+)=0I2!hHY;O7)~w#3{m^UY=2qZ*|h$Ox`iO%l8^*~twHd`K1*dIz36~7^m{|_ z4!Dek;5pD^=|IK`!2qmqK}sH?xPFMUKyWvOwL@4Gg7@AK5Y*pCIxnK{c>61AAOZpZ zVLl-SciugWHO8{#5eAtD_C zYv}ieE{4L|5 z%}AD1EK0YC%c0$^{4gKD@_`y1GNyStGKW9v~!5q+I?NCoEE+^wTF4+|K z1+rLhNu#j$5f;Ve*%E=vy7}kPpRM6T+u)NiFT}>D77+^UY8|0x5Z4@`$E=~>8=>1# ziCBcjfF7$uuoa;lG^N1&kK(o>&VmsAKpFcd!lDQ*c~u~^dLALvVt-0+MSeIwI7*O) zBYt!*WUD~^?&H>hx)*WHfy%Lles7@CQ5OIOf4(V7JwcDXjbJNKYua(3PJe{B7ZGOx z>N5&kg|H}46aOQCT6hnDirmLU`sTd&03=Tuuu;y7kPqdxrkiENHAiTqHS~KU)C-k} zMd;**sPVH1wjxAJo)D$o6t@y_7KAoa*ldJF5lVeUAmq7Q*yCIm!)cG+Q->2S^=vr$ zE(uK?@POSXmKvhJw@m3%DZM&_@g9dfxL%2|DbR(K53^u1Un=e^Z|Wbhit_g$e;t-{h(>^T2OoPtqYgJrF*Y_q&+{%^Bf3!im2L9b2Sp^2hDnsYFntqg8!(!dXNN7KA&BqY4HvDUd5`Kiu|p&@LQDBL{usz|#zU zEt2$0A1}$8<|RaKOao2UbuLA2R`f;OO#`U-CXp3I2<9sk;MK7{@|Bf@-)6^#L|wcj zV$CmEQg2O_tEyli;~vrgJt7xZd&kv#oMmHZ;bt1VdZSVMjX(k=;&+x%GGBY5k4NbP zaz`$?l?A>;nEx?{w|x%BoLsFk^EtvJRJ!-d$9>3!2U_4>HO#;>c@A|v+05ZwQ?N<%El(qr{)-8lr+*^ z@9Sh9ARM$)MHWq~d6A zI>ZvcKY&e#XXfw_^M@w99q=0!s}a_czT~=1cO*yoM|u5PEN|Zfyu9pYMt_<%f`y`ETm{6Q{zZH4Vlu$Mnq~wB)F+aiDmC@pybi-#=U0a(`b*L zzYJ^3lQR?*JG&+WJ;y|qfT13fhPvRq9^msKlcHXI!R2I+dbe0ko;sD+PEo=3mmf*h^X6LPpRl|Ur)x)hq|B`)IV!8ry1hj8ApclQG8@gDVl0bo}Y z7TaY-#6wk`4)#Lq}!3Y38e3kz{Ei6 z(H8Li4YaGlaWoR$3|3NUESD(ibjb((0+cC~Ksf>}HkJ|61n(=4negs{7Xrcwc<~d| zKzJurhgJ&I&KE=(#01KJzbzJJGvcC9{uMGu6k}7Ae~w5(`L{e#E!%M`b@7adf5g0B z1N(DuflmbG+-fm~%tN*&FyBm))eL4y>mBL(d65*&kWMC~FD0VQkWOxbH15D8@Cog) z1t!|hEU=(0ez4?rsu=;U(_W=C{Kw%Z_?*;ldzc4{>VKdNSL=xy%q6D&?Zo@ZU zzH2VuhD}cEJsal{I1NaPHC(Y?BTT|YkAn%)DKRkqXNK8q>ZDMK2$OV0Z zM2#>Zo&TLB!T*kpsC)P_2K_<$2@trIsiw8q%H7RLR5^_n(MgUy0X)gzuNI3UQkV?Y z83ELg49LZw!ullaFTw}B@#jSN;{3(GMcPv8nNS}Qrw3m~3a|rzMw#$Au$~kTgwM

l z2fN#~(d^i0aC?SWvZFL#d__`wVJR*2Yk+uuGQ9itln_ z^^@sT7HsWb^Qs9c`kdWh)MNvqBmSz)R``AL(F^eVUc!&Y@AW3X2k6C+`p?;;_@e4` zi{Y2#cPc*&@o{sMcdcrLYnopY$~~=If7K$&2<7tBQd?x=n2ITF8q>xRWRS|z#<8M67jjvs zW#T9;mjw|{^qL6+PDzh#+CNXR(Ki|3d_^x2VR#x}B%@d2$@^zc@bl*S#nj($9KzV4 zn-X!=bPHYKy+>MsDc*po6I5&jCMy6@m}2V>oo=muv@67-2{r8&xZW7g>S(l^sCnxfOTe_b2WG>F?`hwe&uTRck~SMo0rfJ4`4(@GM!w}2iOaHxKYbDHnjZXQxdF} z7%%9jlX+!$xUt)5__Lffb{nF2bz<$e3-Wi5;g4_5-*>yeH-A~p`TN~7g1`TIE{4AV zp8~{U7=UlSnq7raq-7jue1AUw0A?T*RveH0R`A&aQ5OFc;fG0%*}kPTRUs9Cb}qKG zb18TPpJ?xUU{esE#m(CLm0cIYb5_&#KB;g%dAQNu6AV7@LsTp6Juk{{3HU7qzu*~D zE8r-pd?nS&JARMC4gNg=_HL`ZAOHA5yw7XO`*6WK4>x$%o)#FFOd^b%`3F+Pz=Q4M zIG!>A^NliP0Ipj-9VfxO_0i1>@BnZt_W-Wxl*__#lk^@x#bI8YOT-!JD=`>J@1UpB zzpJDHd+2JkvhUi-BRkxE2R-NiZd&S-C;NnS?V1vkNCj(co60aPFQ~l z&0=F8gFzG@N}-*eJtpcyKBHh#Y#$(IiarFtD6k|U_yY2{w$N=4ll4)jNi-(w*9D`= z!|YE;K1q+q&!@mPKj8hj3P(kXFSZIu0W*umz* zT7yTA4Xeu6K@+q z&F*iyjB>jF+hyi|$wf4!Rh2e8WPjkCvw+5X+&%p=*v5tpy__!@FIHLH9XHV0EgUnc(3scsH7ep@h{*aT(o6nBRnv8 zR^cXw1HKS7w4%8bbp)ABiaI}$j{5Y1Y(k;IhhaMBf({u{5fg;51am`-N7w-LZ~Oug z-`b~-o&jG3E5@ntruJ{ld9O_p$yUr+^DeYJX?W@QS#uxEj~B*4bhHq46glZ_W=Y0_#hotRR839Y*#6{a)Y2QUTq z0i34$0BAv&{y-gz#bEYJSX^kTW)ZvvTDT7&#)9dK%)Cuwnkr&t@&&7OCL@)}jDvmY zrrOrB$M52SiKOG~n zLoo`8YZ2zav)S}ogqg&Ru_A6Bu=5QMr8H04ELH3k3EgkHACK;m*E#i( zp2vSPz~&EDWO2OCtsMV7F;2AAbv#im`xU-_5FV$Hu?ZfTi!K5Wz>=tlOGFNSUswj9 z%pVNP7nvj>t-!J$k)+>oDJ0ht)9@|+nexHsa8{S=8w)9^;TTLDZ}>E{@u}z}sgm|c zvZO6d;}cC1awtC1ujxco3^9CX02B!oyL&i74S#}zjpNLO*|rRezpq*OH~xs_&x|R5 z6UwE|GTAKOT5Y~9TBuO)2|ce`LRimC%iX5Y!Q% zPi#*rDM`Zb9~kgs9O{6(PK1hyK_3TZX5q#|Bwm3wx@iusOu=#}?)#;UdpH+|n>cvP z613jbd5HSQEShm-rHA@PS}>Bp@FO51&4z!?oA%qG)~3B@tbq397_0q&w$+xU(S1#` z`KBwJEya6OfvZ7z;}!;Q`9-x{U=)0XK^y=eN+ne_Q)89OLP zJx!VXtBP%!37?d}OC3(VGY>HG_LrJ`i}TouhFcLEymMvJV|bY%YvbYSBgMuAb0-c1 zQ;#u&c{>0Tz)r)(m`qK}3c+^G7!L#i! za_P(Qn<;~F!dMw_R8~Kf%g)Bh`XtPe>Gz)C2lxVsq*4ef{kw{O6R<1{C%|?%0ZvQr zG>ULI0k*;k(1^{8aNCu7(-d+q&^4HFmf(gJ=i~nb88`&KBcxgRNa76lHR9({6fLI) zzv>W}sQ1`2QSZ_?Sxv!n2!QC@XsHS#+jrSr)+BUvijt33Gq zjsd4-bDrOTzJe=4J+Mp5j-n39dUXO7)5FvebO>U6bk--%Y7}!_Dz-Pj!gzo>Zs!9!h1q!qQ8vVJ5Sz zAtu*YMdr}Utdh>qhpZxV=si|RXXx!#lm8|w%PdYIxX>SN?>~llEN>b1Bv)erzcrM< z`tY;V^kmP_QcE}7S(Z+?r&>DTF0jB-4n5lfqd7Eaf&Cm>ZwbS#v4l#;y;O=<^)!Fn zlv38*v-9w>aj${K%wN&_)ct*N^UR)NG#1`V-HEPS((J|^na^3$Ts)S$G4AqrN2W|e ztCkKIe^(!=IS?(>gXiPb^thk-G2UN*_4ciEoU8;auenOsFU>2(HVLn?&De7Y(Qgka zp26>P4Z1*WM0nlGnyr;((>}^tdm(u6Mduj=?%=_oh=)KI0-Mg*ISk%a>|L@m9o}O0 z{tAm@5FaQmU4NE4ro3g-c4wV`4aAkK|7}mn);T@!vZx-u=X^g>4r{Y>M@)Qm%*i8kF@n~33xi!p3xF|MtOz2=|7TLu@7KxvVjv;(NE(A=5s z%HoM2Y9N*?@C+aGFwL~mQZcj?C!%SKDMD?JoP1*3c&X=a>Q$6DLqFC0BGtM7CI^nr zu!A>-K33d(GEd(4+=5fp$hrSkt!h*;B672u${uc4hd_UoX!v*;YHHJjoy++;Sb`_B zN42b{TxM|A&T_cjpBVY5RuDNim%ts6J(OC;!>{PPj#yrfYil7#g)J#8iz7?mBftIq~Eagf4 zma;6il=<F52=rq`2jjetN?#1Hy^+H(|B0@iA~EzlN&M2t7A50}C_BEnwUzI$^?V)n@ESx=cLwWEa2FqQ!YP5_ z_$W`eRvhke9<`&#y7>&+S;gu(q(}Be{PpK_M`NjuJC~)bM=xBTk0DXt7-93MPrO4_ zYVSstWUHI8-uoe)pY>BBS*%=H6OOr`<@RsBPYUmW#bZ1;xI1n3h~O!#%aS$t7(HgA zPSj5cKTya0HP+CTG|$3Ix~1Pw=1|1M(=YIFEl$Om>E2qL+x3vj#Jkx2i5!BtZP!JN z5!cDpe~AEy09$WO@7uK&A-ELPe{f}KjEU0~4O19%jVp98HlR8pc4&kbHSq#wJ3g~H z8V^>&;qa8mr!H<(vz-{g4lBUhk5MO>wZ^T)BH3sgYied&@;Ec`sizoOP512Fq>tEh zC=QK1f)Y{WIu#Y4L705|OuoDo_rOatG398^#rx)3u+M;7o9ve&4{MP;jC$bZ576O- zLhnX?55TJz;fcHmukUZxUU^ab$hU93mqrJH)l=NHd5&#A?IGltjGy0X#F%=^jO z;-ltN7C+i!RZpkI%qQa3gNmznq3LxZ+fdgW4EHN*)drk8PsbaM!!eLn@`1XRg`J$a zk;9SG!&cx`_s4eY`_;JlKo&j+FY^IBY8N3uty*P!p(5-UcSgFt;``@!BY&Hq^b_eS zVZ1f+p3pZyXVtm)uWfLQsk@|oCbjOZs<7O$U#udUkf*Mz(Bb9y3|u-r9IIP?*R zOCiP7AKq@wJouo>`b@;n;+HyD_Y~OWJft2E3UHHz9ZK#Fm}fi_dFeZd!;@K(YDx7A z>XR(JsdlQ;9HdGkO-RbQH8NbKK1=RTtDh={JKb@8ac1lQjjg${+#gF@V2Bg?V@l-g@jPsK?K=;Q>sAlr zo*+_>Fl1NFL-<~2PQG$o%B@tc8%6PPQTR>2Y7%qIY)9Y#JcOaAyLhJ`n@N5Rcze@G z{<>2WjwkJoOhRYi=)Ix3?eyvFXqDXEb{wrbX5;|S*!i;AUvT=bbp4c4{t^VH;B!$W z7$ryS90p#tY=7P#$;teDJRXu=v&v3Ed`}?{hTK_vOySr7-fN#QSI3C$-1$s&;@P|Y zqt`ED7YnEGL2-Tj7=3*N?y^Tja;hYvuk1`ZJmRuc|Xtp#m286Kzubm;eL&~y?R5_Bkd3ynT0H%^@}t< z8=vy9NGoZ^qFV}ymbeG(tvT`a3qla5BKQ<+6?j3|^t8d#dz!zjW$6g?Eaw1D^v^z$ z)$c_UJZC$faY9w_JthUDoDpaqDA|O*%Xe;jaF|uhij~akw#X6c=-x;-lOcKt8eJYsl-{LeNL?QNC5h z>(^~i-gT@hKAB;>#*vLLb&~mNyi)cQwU^iYEUXt^Ka7R!NG_*=b*18J++wM-LwrUI zHgsXTIwSUv1N508EF$n3F{L}zwa!$T8<_64VwMWs<)Iy`cQ?qHoBi?O6V}|Z2u`@4 zRsCC?$3t6t8E*j_QM}#-J>XNjO%d)$&+?0R)!H>4m~O_hJ63%8`eFFYyRIfVQ+H@# zbbV4uaXb0D)O(&O-u*`LD7+V^*om=N#^*C32H$;t?6fY-4Eop`nZIG%(!dV$w&LCI z;JY{YR>yvP>%1bncv{7Xz+|459L>e$m1uLmi9MRW^Ublv*1%Y0Z1CBLclibCYEs!{ ze2o&AQ_)E{_SAQZ`N$D`m8YUU*jSvs+52qtGL8xzZ$F$16g|Je`o%ldxV)!z@qDx& z{r8p^pFS@>W}YfzU_@X#&iO=&mE4#z=JZ#_Ba5wtC?g&QFtcbrYI+b)Ld;T@W7Oux*W-Z*vLP7jWa~HYW@?KY&@=JRc@e&3J*j)Qh?am{*IN z0A3Q|ml2#f2vS?l>5jbErCOBQ2vH5UJMu7JLW*mUB2OJ2#%dqa`#|Hz3uP-`E^awv zL`gMX@~+!%7T0h;ZFi|o2_{C`?vglp#p~yCXUBk`^iu-;9nd5oe8|;bB+jbnUi}*5V?E1MH z51d8(1!Y(jyo!A0h_dQV4DXeAd3R^8(z%CXrwO+K5Z)0v8^TMfUsqZ{wn~bpxgA;O z^Ln11yQeA5%G~|^(LBQN-2F`~o8nYkLdw;jtF%$#mP_P$|!#nuSAI(_3 zqUkH1!8Bjq@i|Ls*$z_LytY%3mIw%TMzAV0Zf!UGpe=;Yxq)8p(DYiTrhj&6`qz9- z?{#asr4qDweaeTWNLuZjp*m7NJ`qSyJ4#y`&DSwtEly?**j#Hzdd1Ca+nHVMudUt8 z-pvkFgA0EMYr}=omU}x?lom!0^CXy!asf*E)LI`djVRtcyDC^!U*z|LVXpZ9b_cCws`pcPN=OD0fO86jJamyAi zLS)k@^&MCjzV5mT@1Ms`QFF`9ilrD|v1n<97dPwZ#C*y<8wBfxh~99_$qFzhiw{qW%I7%)|KijCc%x1qc^IoB#Dy2PoH-)0 z29JyCyUgb4YOpw;_{Zb19&cMxI@zH0-UUS5>Iu{MUF0S#?(xn%2Au-PQS%p*zcqp1 zVe+emmSeyPt?L1~>nw$~M!uV-G@6j#l*hCAzCrB0DSvaX{9SYKwL#sav*Y9r1xnP9 z$-k;!{x3)B*6_NflD{25=KT^rF~{jOr9wIBC_8F8Za$4Sbd163OvHz^Pu6Fzy@gRN zUY~`XC40%Z=gOKt=JVMlEhl4NwnOjBdM#`>cJ*$}s*gjB!=p6c#2(X**`a+NJG2pg ziRXxtX-{Tda5Mx&k7(y0)3OWOgI(B2$@(d>9fWU=;YzRv_rJ$r7YBQ?-6i9Cg2Qp# z7yGe2+tCsNqp%6vb*#bKz-K4|ur4R?IohnX{P~!!+u@WoW6Slu9<`y1%~ovXPJ1aZ zq6|fA$;3C@AH{Cnmn{3su5Tc)1g*GaPkUMOW7wDN#=h*U+~7*by{R^P-@tR2Jy2+8 z8Mdo;?cKw34Cm@EYs!%y6RJ>4D$lgu7=YkInwoZ8d*#3;n*>0oqrPdk%f zPaBi^o>uJ8b(c;1HfwDuw$93?bp(%)@wQ}r0k-i9dbp@%Ed@Q%eN3osT2E7Fsn&n2 zil=?wG=XOdWw1r6^+kY?TOB8Ps0kN0CDjdFQ)j&EG>< z)7|ls&|%+=@MsV@;kVr3qVm*yyk&+L%N&um-^8?Ey+6v0_qS-gFZ5}-Uqe{`E%)n^ zN58|_f>&_1;QiRM1>ABw`QZVy+$XXwcn|{6az6y2$f|?lT5cy=E;m`T_3EaA#+%EH zw`AI`CLDp-y>sl6Fz*oq2lrDtVO=zzsr7-6X6$?~R^5fFMJw~!TmHE9O3ypIUdiun z!Z3)%4?P9zdI5XMCQypR;tpN_2yL+$oIV)}P1NUbtW88J&xz5etl!~5LaYFM%O*kX zH1qQu_=c2zo}&hzLMs{f3iOAkI40rl3{QISQydGpQ^q{SfrV^&isKt7`Sbps;s~Cq zp5QnXoKf?zFUqlVHWzOSp1=661mhb%iML?k#Z=zD zTzN;S@ek=G*5B&>>Mx_XxdQc9==h=fyZ$Y+{&4o6r_nJ--@DSWpb&pa)#VA_WBM`W zY+aWPxoR{@=pRvgjvJd1rR$I2hjCDXvT+~tw`L#x4@`Pl?@^m`V3A|(`N@vbaqsh5 z!lU0>il;qWoHhH6*k18WaaQ4Weuu+YEL^M8HpSB(&${4PmeO+U+p2dg8UcV)HvFWb z*!M_rOXha0Orw{e4kkrd@1@QEEFJd`yx4fn<9MLv+wI_Gqu$*$huaT6)YpuMh??hV zquz*XU5~*jX~ub(P8fBZz=3(^>Ufcj`qr`*TN;lKWz)LVfT2EaTm0xweDL{kyaEIx z0gU>vzjF?8tOMzkjqBhhp^f@8CA;4&%HF|~(b(}AX4g^rPA8KE@35VAoca#iX-CaF zY^NRd@33Wd1m6iWIr|-L-`~+zTk#E@rT9RY4}|$ZS~ZcFECm!~MG4?$lR!Q%-m$hKS$vmghysd27 zdu-p2SI=t2Xxp^!r)c{g^CW3k(|-Ng_ajQy|2o0G?+AXx7W_Eqn&}Mi?R-T{{YJFv z7@?0xu&6ebQvWz0|3VyPn1v8vY|sW}mBdnKC#oQuc74%Hvy?BTpOxIgLh9QX+I zR_yj5W-$*|MSPA01;Y7}CdbYR(ft`<9-sQ2Wx;dwyjk=vX5htqo+PqbOT#ygb_M6~ zu@sTE&rVlN|N8-XIi@@ppRI|E1B7tAW2F$wVDO{+v-Qc)i+^2Kgh$r_w?@`XjOr)a zzuiANU-SL9^R=n5^R@G^De-?bU)zMPot&@f{``M4Uz7eEJI4HZpP$yB8+cC<{dqa~ z;`(!pP&Gm^X`g-%D_+2JHdMUox8<)dh0Y&e> zT2eM*#Uy$^sQRm#pTg9xV*4?-bp3-NND~v1wR60wj~}ofUtvD1?9r;y=DV3Y@)54U zz`-W&3eJK_s{KjwZZzdp!XDK|uF}GkwPgEdTFU|3xA~?#Z7)l-yq|9`Ev7teFaL$r zcU}%*8>wH3_%W_v!ptMWZ2o{E=1^tRzH1t~b5`&C78im!W$S~ZF+j~4Eq5SK;K=Z< z;0MrFa+LZzdWuCFXyj0?%}^i zjDNb-%{|rYDd)hHhR}jeUk68A;|d8eUk5FFyl$S zXt8+#k%uJJz^5Zn4dOyqsmQw{_YGE^v3ETNFo^$K%;_1nm_E7&Mmu|>f%NVE~`tTt`OtoCDZ%qKjYaJBH_{ z>#?cHk2g%7RCg)}Drf=xjXz1j- z5UL7>)p8{dUjXB4rOkNS9WNimONb>qt|-&+D-of%W$@)XUNlBtZ;bPFj8H<1{Ai5H z%McWwaGw;5qp^pds0GF52YYm;0;cKI@)A-zB^qP$7&A3ASCdPvtF;WyQCxVnij=dm zH^xC)?g0rgYB9+4?p8;R#D3P!Ph;Ef|NMEJizjirLd*Fn&*Mb6t*swip?LN4IO)9) z<4n%z9e+;idJV&GZ+~KYrN%^j#^kw`>T#yi(PvMD7yYKR1xsi0Mfs-+9k8e~rXr+7 zo(J8fdy(6j`umH z^Cv-IBm~7`3u`U#Yb$`Wc{0&5V>*W|Ls(ZW)~3{9ff_M6AKLVFHDoxS7f-s9UPzFXo)?-zMJNS723$l7;V41DQ% z&tv>9$@v(a^z)j!z2m6pZ?Frz0$aov?OTUy#-v5e1C7f+~ zY%*#CQ+}Ln;$^2bPeK+y9$M6aLreU?=r|mvX=!T5$IWvh+3UQzSxnKG_P_KPd_exzaLBaa`&S_nLMY#Sc z{a`-`6EO5HO$NJxmYuOEavh6NzqtMaqx}s}-&!VXGI$rzGT)*|LsYCOUZ4MMnej_4 z{{lq@U&`_iTogHOukr&rKF?3lv8y@$;fo?e_8On(rpV6`I({z4&s`LG$GW%tpQGdN zR>c@><@hrfMgF$e_~+^PTXp=Gxx6PXiu`7;@pVBbU#a8&iQ`u-ij?m){zY2;QXN0Q z@%@V;{5GWi^?!+uKUK#s1bN_m=sd#DP4;d7Ei9RDH2kNa_Z*p>2R z1CG%0e&vkk;M)lsJL4XBB3@)jZP>=Ohg~U`N8?{(#y=mP)4C=}3F6{I+A%{^ea%v2 za5=~W9`uh0Zx6=(bbeAG%-|t@Mkah}8on!K`*QQ*yYNNc0ROI(+oBim;)_q0 z{q-~6=D=H2U|e!$d(FfG2inuViVQvwWZCJ9BEJVi!cY3oGyEh(%j6D427iLZfMr>W zBELrb{_1ai%H>BQKHsR!xOf=ij{WsAV^QRFM8}nzaRsu@*RpQp@RCK5eGq7#2VbAE zFv@>Ehvz8%D>eTT%|A=?FXr&_MUj_~@jYX%tz-9k<=&v&KPmUm%DqLoe^Ksj%KfWy z?@;cY%Dr2;_bPXja_?8}gUa2a+((rAsB#}y?vu*hs@$iQ`>b;RuG|-t`=WARR_?3H zeOt`C?mw0LjdFJ@_g~8WR=NLC z?jGeD%UD-d<)$b%O}XjH%~0-O>Y3r*;$j8rb(8`)b&C985&lBKECjA3qsfdoTND zv+tkmJC=RfF#cUR>|4gZvFzKxzLD&EoPERC_dfduvCoPzwTsy2WZy1KE^8~<_bL0< zuhGGiT^#}GH$-Z^$JDz>N zVIO~bsVm4nAN!WEZ!P;O*>^qrPGa9C_LZ{l752?!-{=VM^qKqcd%~~`}o{r*OBadj(tw{?O@-2?E8v+nd~#t;j^%B2>W_4W32rJ z`#xvic=mn3zM1TMlYJ+$uY-M+?0brRjqH1fedn_8F82L_eH-CJ%Wz-I?W6a%FyVJ= z!VfV$<&$cw>guy|%BzBPE4*cmHBI&2BGOl<>5KCgFT<6JV6MlLos;Wrs9Id_^#p3G z{N5VR5`W`zPq4NwP|}B`$P|;EtHsW2s;jT@`im$k%;hoV_=0|qce$^=s@f}(60%fY z;h9!dU*A|=6>RjEROEPy@Ee$*TwRRWO%2s3N=bcDQG&2s5vEnqH~P;?25DrwL`x`L zNqI%KCq|*1{e)~x`Jrrx{u4wdN*tJKW3rKu1yL&+hFRY~R7H6Uyn!H8T98;0vv!n1 zdt-6SybtYl1OB!M-6-@9&{q8YaJpurs0daCn*s@$&(`{jk0VVvI~)1*RtF3G{;Jgj zY&UT%32DWZFgtH<)rzW;px4ihQzukJ?rfKNYPQi&aX>mmex9# zdzUx*S384^PU+}gXTS%8;9Sz^cLr;{&W5V8s=(=i>CQ1V_&n^`p9GjFzMVM+OdDU&>Nipu9L_@%qZ<1Q#Xp~B-vAoyuk(aeQ&raMa;moBYq zSn3RTgPtHdfoFMOsdK`F34@4Rf~PLvVXNwCP)4=7vBvAsHmJ0*VJX|8QkR4k_4`2jf<=5JycWQ=<_z91qTKZj??Q{<&DAFjZFI18nkLB=j%;^sLxoP!fObkfiiy53}qtT)SHC6tVbq%C4KcQxIecfW3u)OMY zniyQ}o2VN0De5Ohg~6l6Re`!{Pax<=ub_#w%RK}JuXFrN=lDV=Ima)7lYQL88t;mU z4Ndj+Ds%}Fn?7hvfV_Txqd%a$6X2(nw1l-p6Wqn~%6~cCIeXsx!lFS`HgDF#(jq5* z(`Pu51nW>=^Hj{^Yj`$YMH$B-bxRs*yi1&O@p!(ccwQO!oz4YiZp0fsHc;!WuXip( zS9Oj*B~aVAvcek-BIkkWK0i|S2kT%(#;@@D1F+1_oZ}A9cAo8AvefVOImg#2z6rIB z%e~`RW;B5q$u)KUhN|UWWC&%%urR){DcH;AoN@H%vB=7}aS-5bsHs~5v%%RcDK9Kt zIIBnrii#aKv96)IzNyALhzd_RWp-)7oC>9YQ#tYRfnd#y>Z6Xz&W4KJC56uM%d7m= zwKKHg!L^1)MDf>E2gg^{*P)JJv&Nqc^Xk;>C8Nj2raW#hmnV)1m?7FXejXAZ6VQOh z&&%b!%qlJ@DfQ?ofRd#y22trOoom$4_$4dq0(Fb)>g$55XP|njn(Bk9FO08Q4UN@R zqpcx>^+%6KcknK$Yw*?_Eg|S$ji)#HXuMx8pzvxAN~I+;Vb`7G>+8>)G>FWE(I&=4 zn@;-Zu~6PPREVj=anA9Ly$xnUqjP+948(APYLB^qW;uxZlQMd&6i*AxkKtkYoVRfh z)kl_}#srQYQ$yTUJd5fe8$z5_+^iuP$j=(%6esclB5EXg0UJ z#53;%stc&zI#IU<9)>1r-kDVaFICml_zP&RqBCh>`3dFoPA;d4k~tMcb53G9+g-$D z=E4dlC8ecI3TDn%6h{c7Szy_3Z0PSTS>p4o|)7Wyzr ztS@YASW>rCfS&I3Q@c8DfR3$es0$uPW5=CDNS~{sEU=Wuma@g0SYOq!bRyQ0-D#N+{e4jB6FfKD(-Hj+ z`%bLBAt?K#_kN-YRUX2gZ^Pjc`t)LvfAIxpwF#-qE1`p z)(qN{Vt+e>E=aB591p%b139zr$e>m`p0xbbKKRZI3TLE)_oNq{DK)#P8>{3q7T4SZthJ0?Jbw(C;P#!ew>#X#+u@JY-mih30Vwof9A4?|@ zowU!k6k2aAekO$;w4AUzh5l+C@o@@0X~i5htd5QpYPV)$LGt_*$5ScB?^1_clWJU+ zij#1<2(0V>G7n+HxWJ%eiH_apVmmE3;6gh!8<}hE^ps)WXs73mUlIMqV!zBzcUsVp zU$^Z4Wg3O8Be5FziS^Jwr_n_=Y@YnfHu6tt^hL^k&!*AZR2=MjEOp<<(~Rw@jtA3> zovDty(~Oq1bjaI~mJU%5*!K^o8?V?M>(Y&n?L$N9MvvWT7HZGcO~x`pH$Go`8og=Qze=O`jU1xOEC;=iMweI*-;_q1tT#X#+imvy)6l3;ftRNogv_i< zov_AE_oYrjLrTrssxkm}WB~E5u@AY+Zrp0`)ud{-2T?9jZYFk4iiTWePz5$imJWqy z)im9%9g8zBv(OVpddNb5H-4?_aLR$1S6Yk{9nBV_YSg!e(Pr!e?;A$SzYOCYqnzk= zOUh;o)HjIGKvK3@jCMZDC^e|kbqGx7-c7L4l&#|Mu) z@PiCGHS4(yYRtGNgSMbzGU-VRcERqprvEvEKD9djltJIv?4eBhkL?EZ>+90&-=pKC zt%d)#bo+N1bay&$gY6!a4%YKB(h;*IBOP%zWTYeXhK#qd#*XxcQveTOIJQ{mdL!dO z3wks6Y1YHlV~k%+Nq;B9IG%S@ueGH-lR<0j&l%e?=!Ntvk&eXMV+^7BMDy)$8nozY zb#c$tqm2Eh48AGDD7*$s$M2?XGr}2kOFHLHbE&k4P!&;?z006F;oD>E|BNy3IV)Xd zX+^~Qt(ovYX&s~M#Xd1@7EJ+AdPBb}vj2+8G#)~$iB{V$Heg#hI>&LqxMCyIu%B6lmhWm(oV=lDN=vd5aU@sdO`h>WS(HJ!+v9D1g3}MJtF_40_E% zA>&KF2;QO44eKZCJ_~fPRz+3yQ#7r|pqq>t|2F779=j0No|hY>(`WVZ4?1oUc6v7$ zb0wWboH28a@rprDstAyiQF6P1X7Y{3R}VJ^jYvPwFp5(zGzSiyw@jK!G&S=a;xA@_ zFA)!>X#dC}%mu1* z|7f8r4E4Z5xpG>_Dnt#$JgEktABo9NVlBm)iF{m~wYY1_>uzz7t zWAvK6AobTYCr|4$#y{xTW%nC&voT4@mCa4R$)K}K zbUfy*BdSA*$PXnb-UAHBjP#RecDmpAW%@E>@ral*vfOMu;%J{4mCI>OPG4vommV|k z#a!>0^cyV3tiw>#gT9AV8iroB)+k^blYW)O*kPn!Vlh52V9}ywJ_6G?OiwNL?i4)v z^Gm#T%9{Dt6zZ_@@O6DkCNAHWa*oO)^tf-zfxUSEwx?vp;e%49Cg5YEIC1>D8f7LJ zAN<+>W--pSKzBM@&?SfQ$G*BO_FM7tCQCs~80L86>}xF|SoI0ENh$9bRLLhkRQ))J z>-AD2v(3P0$M&7u%D&bS(flN0&L5u0JfNVO7U98V)!$<=#~+qDHjY0Q^B8m;(!9>d zg6{rmplUfY5Jj2z!%(5H8x#fOpYh@;DJL4lM#ng@%CMd|iYzDMnlUuP#@GO6KZ`lh zmv4~6Ni-U}-$7hBZ9l%OvGRyhIUfD%D2#QWvq1CFbljkApp~Ew%(Z-=KG3s4J1``! z19hR-ZQyWJ!$!~$=w=QF-3IE!5VRe%8`HQ5Xt)6O{b*p=yH?OjH1B&r!x;At9*=Zk z^G*e&1&9aQzL4k|#;{Uvf%-7cIVT{UO5_W)664y}pe~$asGo@IO+*)gx^PmsmGLT~ z4?sIH_V<9co=IePA)U2Et3X2+U=9h|aWRHszJ3W&2Wa;dkPFKDxb|$Mdp&dp+5zeU zb+_V+VxV*bvI^?Fk*JpGpKy*EG;|a60_wXN(>|uRKp#vuB0o(30>$Sb-mNG%X!tfv zyg}P;NBKcJ{t7*VcK;3K=j-<&f1sV4kUvn@{g4A1egNeHbv%Ujk_-F@@(J4Aj`{>` zeH8Tx>V6FM37Y>n^akpB0`2_+B6y3ra5{|Dc_qZqV+RkPcse1?>*h@haL2Xc+V|rmvxXK-;$ywShX{ z!2K&|2($y#@eXDgpuP`qqs!qRqI{D9iXM4t_bu68V2=&wu6S4eggf2I(8D> z#2B;2GzI-wqMP$Jmrx7k8i*qjI%|#wz^jh7N>RU)g>q%sH zo{YGsfd7|}_e)A~E+R{K5qKAq#kClD^io=Q30Ztgk&oqMXAm za|UFuAftOF`tK^pUJc!?CQCagY(#qdnUvan7Lns@vgDslRCzX~=buAquHVA{Ta@Lu zl#(B!wC)hnJ&$aTHOSNXWYF&r|96PpGMlv<)i!G|AnmG_-nnDQk{Q;p8f{jI}q;{RFhN~MR9w|3O!z4SHe-ybsO$W*k`=EYK zp>Co2D%!ZP`16Xzp&sRD^HK)hv$F%a_0ncqsm1D0npCTUUwNev`5kkmj@14#`eHIURmQUgg1 zBsGxKKvDxq4J0*?)Id@LNev`5kkmj@14#`eHIURmQUgg1BsGxKKvDxq4J0*?)Id@L zNev`5kkmj@14#`eHIURmQUgg1BsGxKKvDxq4J0*?)Id@LNev`5kkmj@14#`eHSqsZ z4S4x8ariX~T`hEt&=#Rr3cW$-Z9?xA`iRh{g}yBGZJ{3v{Zi<+LQ@XV>1PQ&K zLg*1fvxOcbv`FYYp}!PbEp)lil|s)Ix?bqzLa!J47om3x-6C|W&=-a75c;9eFNFR} zs8!yNFj(mRLPrTbO6X*v#|teHS|Ri_p-Y9HA@nSvYlU7c^eUl$5^C1hEw|m8WBQXc zsez;hk{U>AAgO_*29g>`Y9OhBqy~~2NNOOdfushK8c1p&sez;hk{U>AAgO_*29g>` zY9OhBqy~~2NNOOdfushK8c1p&sez;hk{U>AAgO_*29g>`Y9OhBqy~~2NNOOdfushK z8c1p&sez;hk{U>AAgO_*29g>`Y9OhBqy~~2`2V8@hK_RiDi;*^?D*%HUr+&`LJrfN zU(nar_X@{Ki0NRNe64S6pZ&x`sdD73${ZS+@9``uKcOO+>+xh~PxE*JwN-v^jc28| z>U2+aV^c#g8zD=*K~Igh-W&A#i{>NpwBTx=*HhQ9q>)ZMqtkMKyJem6aO*mwb(_(; z(E`u?VI{XBn4MGaZCDzt^?3dMM*pnrq7@Ws|7wr^8D^{;*W#)gPnF+awc69*UFp$0 zDz+I#7$53 z9eXYDf|A4{Q)qj9aXCu=<<(i{@M+#v)m~q)uCakap+Tx73l^AJ&Rbj+sH^q_g8sUO zrEtAf%S)=uD^Pvas5?*4UsV?jKnq2KnA|W>P<7=#IK(mO8-)iqKb`H`B)b%5N!)`N6 zt*h?egntijLW|1dzxy8E)NU#6S6a5mvtoIF;(GpRiZl88uj^1q;_WdiMfE3{I-VS> zE(-6%0fAsuHTrJt>7J!c zRsI@PG+eAf5A*@ltgf#CY+CGDT-Sh$6lS~1-E~1pwrh4(u&Ta@`zm^NWY9f23nke( zN!s2`~I^& zIVS7w#>RR?HQTY2E^mQ15JX}HEal(*N{Pu4dhWMooYr-06d!DDH9$7u@8Q-qZTzCP zM)eMDdr>-%ce$^=s@j{N`1l@Ok?qmN?&*^m-QxEm)2u5L#akULV5^&8D=KnC^0tBH z(v%pZ!o5k{Y&C0oAbFH#r<={*M0O{bG_$$uso7T<+ zkL0eu3QowQ*}+T`IxvYp%Y+V4^iMIN0~48G!=QoxkqP}-$q(Fw4orNXKCqV>HcjY& zr1o~3y+}(mp#zc{)6@1MHqnGe#r`xC+7X>4NzY3#q2Y9~(0!)Cq3!Vle@&Hp1^K)6 zl%=K~KyfxyEf>#9cw`a5Pdry%*sv0FV9$JSplP`mVSao3{qWDd4ami98SZo;&=Dta z3g3*?d#5!8yq+bP-UW;1mshwv`hJ!CBjO_G)ink@xDWA8!Rj}Uu5G|-!& zDOEn7x1ok}rbOodFGL0cbxRw}!WRDzB-S+qy#8R(ax<6i|6V4Pj8&4!9@gV2|Gl`X zie7P*apH1ywKV?FDnQYcfVUy&U(K1S$W?i&{nnC_hQ$uf3o8uygZ8B@PJuWb6i*jKv0$2^i zLfqoIpr;Dfv%0Y%P>0)j6g^n$Z(OO%v8{jpNp%f%!6G)zs?7RwHDD~yol-bFRyE;3VKy;!WOl9!V4#X;OU;4DnG^s&cg;B zfJHIb36DRhF1G56i_i4>8#&TO3%5+Hqg8oi$<6EOUBp9#HqD?SU@fvP$XuIyxfWy1 zq``xwy6RdEY3~hz3GKbUt@rv8zc(R`j^04E^y8^o+~`*cZSNCOeMVEAUyXU4iDA-& zIUAAQFjcpFty`C*cbRDkR(?33hXbmrtG$5$!;lqlG1j86B3Z3cS{HZ46Ii``abtbJ zvjl5YWHCBTi$KI%MBL*0TZ5l!%qs@#=9k(-B5*4&fL5!$s zwCqsGl(JnR41x{8B`l{?<0c(9q7anMLEQ>K2f@)L<@070O_{`3d(_pH{<@%w6tW@d z<=*Ah%dzlA{&gnwt!5Y&xV&+NF0x>BFAi>1@bsEGUZL}E6coh55aTw+X1+Hwz*-=$ z+99l6!Nx${Doz}$gG-f;wyDd}!gVmj;;~Zxb_P7MR;iloRIpbO5rq>9+pRD9u~<(? zwMQTfIm1v2OI#g5h8eD7xFXQdD>JOG^j_G&7fjue;i1cp!DP)w?XO$mw!O)27%o|6 zvJDGRmR@HJ&~O`dzrg{sxpodRFHazgTC^<0@;9Y7EvHEk^A?L~eVg6T+g77MuaA4_ zR0w~UWw5puBLUb2@`K@16?F#(v0ZXwd!!*a-@C*Mie`O*!O~j{8+CrhF*^~TECn?+ ze(nZ$8WnYCdTA4a+f{J)drF-0x4+N+R`|(USliTqBE4m3`2l+0SQzluNJ%3GS-dNC zx#%?mMfn^OJN^Mqn~FkPjKU^A=WO1RCES2IjB*NF5;9xT5NxCyEznk{%Bo7g8!lPO zo0dmYPGqBqg;EOYX;mRzYRrM~jh5c>zh$YwmCqqG^f?zq8Sj}*!4te*Um5EDBp6sM z9Mi>CWUIWXzMk&1l7$yoJq=Xkp-on(;3*4N$J6Y#KEZDIId(hXLN$%P)v5}2TQN~b zV*jy1HHEx4MQdzG5#a@DT1Y#s+?fP?X_cg#ZB}Y;BrAn{aN5}kp8=3vg7c}wwyS;-Ig1N-?mMP!|&TB$Kj7{Q{wQawyAOWOIuzX z-ep58e8qr1x?jf2tt~S#X8BiBOX>;U)pThp6!$0xc0R*_WmV{Le!4aFng5t+qSbm^ai^f%58m<{oQb(1bQYln>tc+=(W^bdMkAjy`MUnBB@j8 z^VF&Ib!r~{8?sxGCCJ{vZul*BJC)n^Hv2>Gu-l>B)_2+8t=#rbox<&D*>qQ04sA}$ zrH9id(GzKt>6x@C)R8uoUQ5fPx6;U3+Sss^_W?>Nls3<=whrj#Ga`l`6G=n;ogD=G6T>H>&Oq<-Wvj=v8*Zue00s2D=?Q*zJBB4G$W4 z&Yn##*>h;SJ(u3GPofX(lj#%t6zaB5rCs(s`VO)~-zn~I!C@(7v%$M|8`H}IOX+Xv z5Z=Q9oTGcvb&fc<*kNgCl&-opol<7n-fJHofTWp$F|Kth9(|vCm7_TQMH` zI}+*sJ@@BunBBHZ*zNcOyP?b2LO{oZpyNT%@gV4U5X=*=`P1!#=Fu}i<+_J{mL9X3 zCFv>6zJ)kd4GoR3|0r7kx68b}LvKs}s&1@7-ai?H&QpXQ6wXI^t1++yz2#MCj;9CQ z?(!*ty5%)KUtX2Jy4F**V%3x;jImQDm6R^S+`Oo$Vv=X|YWHfsWx6|K1vVub{n%La zRW}5GjFUEJSgE@JN#@d)j7fEi138nYcvjY7$GM@(z52)4`G)AWOjv;ed`DFzb&dAFmxavb znwinf^y#eL+<-i?{xH)ca@`8-WCooa!;LG zUIIOM-1IM$Ck8&va#iPQS8gjxK~@(^*gTY1QZ63K#lL(gY!G!hX3^C{kv}zG3BSg* zHLHQ{AL_;eT6UkMzvc7@B6lJhRPm}r{%Eg}KSdhZMf4;Rq_2jOwWJ}sc|~nmF+G>= z7#gz;har0(*ny48_CCOF`)%yD{uvh7=2GoQjVCC^bNl2j;pH&X$iyfje(k)sxv&ot zJ{&}-qnGxXq}Zc2>?iVX-)AzGDLnO!IOD-h+gpP?y~DY`o^#xGGjeRfDh)O#@t-an zwiuw^eFE;@hM|S8M4T{U#>Tm$H_ko7l>V+52K_xS42r*ng?1>nokgo!eRmkw>W7H5 zf_4rgYc_qbPa(ZLY!-cqG}@M;>4(|r@Pff??3WH-Qr{G)1B^kF1mF#^HtZ!thyi`F^O_4K}XW zKiDMRohsg@{g8oWsB3NI^`Js)hsQ46)1UU!87pjTSW>sNkhbiHd6p-L?QYNVz*4L} zndc&KT&DNz3Ezaf`a5>w>U-|wS!A%IU3J`7xW#ur%>K}0GB|nyRV#4nXj#>YDxB&- z@!r{=XL6okqetf(Z8D}gMLRIM){pO8(Fgk@!M~|w?`F3XuIl&5KD{~uJ)S)~Mj%7? za7ZD&H^S#bF22E~?mL*J<92q#w;|ZN*NR+sz&sZ8`vc}Px$FQi{8h1O=39S|`ANkt%D8E?Id#kFGwt()ub59H>0*@4_3{&*nSrdHLgsKQ+d;0q)2 zN-z`Tqd)ZRh(bE=z**FKAPVvbt9&jLZ3b)GLH8WU#z5_N{-Da~^#du*rRM-Vji)Pz zWYhKgwW;N}S>pAny7ORI{`hOFXw$)H4G9-l(UyaA`(9l|j~_g#pUbQ0>4VW->(FQ0 z*a_FE#dUF&i@$byly^YAQB$gFYA~IYRSVcvR3FTAg*tJ>#+5ESG%vbxN!J{T{?wroRBp!$Y;xE@=WW1RlFzBKiquJqB6CDBiym(x6h^^rv_wsRMQO6+Zy0Itd z^N@M{4768|v)MLZm_jz&#&s!>7JKIM?Q!u3E&nmjMiIoa*}B0&mY8{d5j7tfGtn=i zi;qMzJy*#+kKOQEc0=bg-(JR(m`#3llJLixH)dXF~H z*GI)%5fynelDFAQj9MxGE)PGa3~v_26< z;fd%j+#)P|e!TAqS=ur2CmlGs$Yu7@(9v?ni~it3)o=$xDZ9p%(_60iLnz${f9_#e zR@;4B*mA~p*>HSm=!$F`J&9O0+Z*7BZ<`~m655ch*k9QV=9?CyGi-7oHEz5#aG zud}(!{yV!*m;F3Dq05GHY_~CmXAE6-o0)ZK+!V>qr8{%_G!S|6C>c^O%W*G@u1kmGJ|=Kgq>?S$+%lm@SE zSGxQ0F55O~0A2RJNj6)vs*@X~PJZTbv~`pB+GT$~*=GBkD+FEkqud;NXi{RAy=-!y z8tGqEpF-4k4|09n1DEULtI56f(Lc|oDKVAQf0Qj#2G(U?$(1hq_!OJ%B2>p-yX^0$ z{-iGZ$W*h--i_M{KpCGR9Ts7bHp@hqe*Gbg)2EpyLq^w05PksOa8xv}Z} zV=K?}S>_b1R?~NxQ$etPoFfa!)H%4E?i;s&E}n+*P!yi~EK3zS$ zLoZDCRjCtcSanAby*iyNd zeLo$gD60z6#k^@t*adL58qPMO(`dD{S;LOX%66m8aJBKZN+3X|v$E3Z%=fw7ZimC^ zbh+}~`T1_Q&sSOL3mGA!HKQ}L+Y-)nIKru6!-4Q0uq|Y9x*U$SjIhO#-=_RNpU_U5(@_~h zK3gpz3(h)Q^PM61LY6T8waPyiXR_7GLY%HvLyKb(9bgGrJ1q$2z)BxdZ-cOI{6k{x z7Dp?J&}#5Mz8*?*bQ>I#v&Jcek>`++pWg`=oUA2>1Kvi%K7<@ z4r^E`q1%QOJCH%tN+pyKGF+~ZrA=kf75i&NKt8RUMl{k0QRf6yHh#AYZl9>}`+~zR zjYmw^@msSsb^#B;s%tkW{}xHOPd6Vr5R}8i1v>mf!QFE-HjdTuDoZpzQm}8H#&ZOB zoT70pFw3Jw8ow*yuG2K$Z-$oV^lE&f;IL2Qvjn#azCm!C;FkpR{xSa!KTgYQTcPn8 zf?X>$-XS=&O5;!TC~t!N&-06?~&$=Xs=lj|k2e`~fiQ zBY&+9UjRLGeYXBi<1++@F3=da!7AL>qVe~F+Xas)lKfw+!*4KgSmSQN-GYywt>Zf{ z(c#w#b_uqio;g4Hg6}ETxcdqnKL_>B;j}^HGX*;Y-z?ZIcrwfv#}8elONN|VXm8FuuYjpT0f}Mg-E7Rd_!JUFD1=p7AaG&7Y z1a}Jl9GL6ReXW+Cg?7)l@=lElft7yl*7)~=or2#G>=r!9t>ae;zD%%B@a=-bf}a59 z^x6eSO#XXxdJ9h!dG~63z2K1GF2QYr3+L5G=ABP|4)r~3U=<+IQ3L5uTtW|F>X=;L~a}e>g*jUnJN)RO2TE z(>@yS>y`2eUM$!x_))=?g2yb;_QfZ7lHgXsg@QW-hXm8UI=wA|U4s8DxKi*`?548) z3JVSjZWnx~;BLX63w93E@{g+3{BFUufA1^p0 z_)NiVg0B_aDfl75&i%B!9gI_mPB~cP|1eG?$~#KqtYsR{5Yrjo8Tt|Zx{TU;E3Re;QFI=dHyZ9RdD9%x_tKt z9x3?X@jAXs@Tr0e1=kBcN$@p->jb|g_-w(>30mI8f-42zDEMo^_X@6`sN-)Fe39TC zf?EZDF8Bk%M!haikKkc~?Jh0v2*IlaPZxZd;4;Av39b~}AvhrTYr(%0Y|qy6uM#|3 z@NI%!g0~3H7yN?Y1%lrbTr2pWg3lM6vRs#MgW&xI-!Aw_!P^AS5d48)w_wWA`Nb1Q zs=Xg9c!l6&1vd*mRq!=}&lY@#;A;dwCio%2uL|xE91;A1;BN)@2p-a)^OKpY^XC*i zT5yiwJi$0Pr1C#Y@JWK*f@=j=3O-Y?Pw+*8LxQgp92R`1;EjTx5WHFN>w>ol{zULw zg8w79Td<>1=hrq#m+vsa!vs$eJXUat;9~`!D%dT!QE;W;^8^P4UoLo^;9CS=Cis5A zt%9Ew+$Q)P!R>;-6x<=$=F{cr6uh6{?*xw(oH<#Sce>!Qg3AP#3a%7`Nb;Wx3# z-lJ>iQLi?>8uM?QCk51pjX8M&0>Bok1{@aEBJJG+1_r@O^ z&iqd3oqyL0zllx$b`{!Nf3e}rZ?0D#vs&lh#3p}Y`;84}{_sSdz$22riB0~*`il)$ z4}>0kIy6#h2Bd*hD{SNWItQ$}e0btLdtCZr!5&it-P zI>Ld%Z(=k3ME=-tRes^W>lf1g1)KcNg!IEP;i~+CYlOcoCcMA#J2qUEU*b;`{*DCx z842mfhR3$wPZHYeerLwF=R<+}ZNHOs0vC$@9D>dA`x4TR4d?uaB>vqawf;CmIP-^vKRs3F-^6?4j}2%3PT@aZ_)To``x5jY8_xXhX*xoa z@c%#Ty$yU+)wwRdCJ7L}GztQu!XSQtsANJ2iLGSil{`P zmV#}GrM2og8ns%I)*kd|TYIh=wY5m^v7oIz?K#&5{EDSLV*Mg2=049_>z%CGGm}h! z-uwIC|I9Btdq4YK&-!@RTJQSUYp*Rh5#N=Q-(mSbvV6h9FY|=&%E|X%U>xkTe8GwM zuAKa4%O8umc;>%g;d|5X%E_1K9dlP0zF^_U)IU33pqzYpKEmfE3}0{}zAGnRo~Q8n z3i*N)@m)FjWuxd)DL#)OUvMJ6D<^-O<@5Os`GOPiT{-z3me1!sv@?vG7}A z*Z32@xF;rmTcH-~^+gO{Fy#!tY_O3NGkjOB!@t-sP ze}aYY4d0cMzsK@VwtT_D_lEDv$?vrM!#4hch41&o-<6ZEN{xfYlT7>t3%`RG`0*5S z-1CzBj!P+3>c1^tFy*@Zc=FGclP~M(xW10%Pq6Uq?!VennBjLMp^6RfM4(_z+7cBfNPx!8!{N^&lZy#d97cBhN3?nBF zIqr$cm-U5QZ^-xyrkwf@;^2>%d{<6>S)~@MeKvf-5xysUS5AH_)&cN0ZI}sPu<(nG zI1;`qC;y=3-)O@ZEc~E{@5;%~#&df9F0p*U!tc=1-t@b29lqs{!#peXchF+td*koQ z$q(YWI)6(oUvMJ6D<^-O<$ufa1t;RWa`HPZ{{_nzoQUtr$)!HM{;ocv(DN&gHyTcrIEoQUtr z$(QxuTpv!pVBve)e^;*4Z^P$$a`Ia(PQ-WR@UAYc_t){9<0<$5Y57*YUqjixt<; z>-bxoh%f8wDJNgn*K@r+`GSS-%|BO8zO2XR`h4;Q3%}SzFA{%OPQI+)=X!pf{_71v z_}L!5xTl=_GV6Y1(C`HdAGh_7IOMn|CSTqU_*~HN1yj!cM>E=G{)owU<>brz1osCG zU$F4~;PEGXS5CgXfADnB@C6HBdHP?jT=T!Cy;rXb-(umH@d7`d7=L-cftY-GzoE3y zrvFyDHtw1JP6GVso_ujnOuoDiaZRD&3#Od>gP!rHD<@yxpWyox%zwec_m&@5PQJWv z!S^r77cBg&G(-YFV#eQNp7Ga2dm`VJYyLJmQtCe8TP%FPmiC6f z$B~mS?|<-p5QZ;U_}L!5D<@yx7vcLO#lmWS`k$(Q#__`V7Gf)nvwIr;KFO4`MS zFIf0HJ^Am-b^5<&BFOhybown8eikqA<0*uH^1cf(`SQLC-+v)rFfW;Z-t@b2^5y*) zzAr<*VBrVh2Ya^C zsl|%#?~pH;?sfP^9O1ii^5y*=zVAc6VBvf9=gP_NwBhspAo2w#;=6M4<^3VPPei_8 z;n#cO@5;%S_l@}e5&41>@m;x2|6L{kzOSUyZ?W*b`6ur)QBJ=4C&TCaP2>v}zTb!= z`gi5z`z@dELy<37_}L!5D<@yxm*V?V$QH~(BY`DHeIzHddo;6!{^PQJX4#rLzw z7c6{l{<(7UJ8k%UpNo9KiTJLZ{N~YgsTAKgAYZWXz4~+ISDrO8dVM9atgrMbJn!T``}-Kr3ziGO`K%kF zaV{{Qho0Q58xiF{YC`47;MQjNm5SomeUz>g=*cjV+Z z|3Hh?{gy9S_}e^uS5AJ`Lx!IL+dz9HSolFBj)d>Z$uGA2T+0_Me7}e9%E_;{{MD#C z3}3MDb-?h0IOLIQ{trz6t-`lBA>WZ}{tm-`S@;$w?Y6T=^LoY9})M0{6HewMA^(@r&f!NT{p=dPUmpygj>`GSQXG|`Lb z&y|zkZ28+PUvMJ6D<{9h@_%Xhf`wn~3E!3L^gm)8bW8dzPQ=f07M*79Q+ZJ@5;$9v*GjoeVu-b6Y<48<>c2}_k7=Tw zKLGiH={{ciT{-z3me2hJ$QLa9R`|moF~fJ|hm8;SPat1# zBEBmpU-nnvehcIa7Jj`ad{<7s?8m_U8ORr$i0{gE`hRKy;C>D|{T2(~J06w&9w;YY z_Iu#|59A9Lelaib<0<5klP~*2aK8xh1=BstZy7J}l2 zMArW>9^7HuW5M(~UVgfA=7;PD!u>&*AA*G+wDBWmezuvbl|3s(XV&Qx3nYgE%eAyp` z`=yXCSomIh=E}+6X~X0GD&z}J#CPT7cUV66Vn1wcje?4KWq5!*zg4l|DX{^!guB52cI(zKD6NrPQ-WR+pYN_-EPp2Q7~9J@I$tI{nsti4DKl!w=FU{CEoCpDQPS+w)qiu8{I$G2K)D z-uxH$l#}0T-9Kf+7cBf%xZ{tQ;k$D3gTK~d^?>CI7QQ!pS5AJhY7WG7d72H~B9(5#N=Qzti$BuzbOZ_^w>1{{<6%jpY|x zEPQYMapmNz-G+aUO%60g^HSy#ATsnM_5i+ z$jBGWOXgoE4*rPAcje@F?xj>I?q5c}U^>CaC>Wc>8V{UidQJT_8?jC=)91>mpKX?ZV7$?f;6!{^PJY%a zCV&MJzQw{1dc$|*$fW>U;T#(fcy37@@cW~cY4AX_mq>r)4Fe;Z1{qO?+xFT zliy+af3W%!Ec`N0_^zCM{~wKm$1PuQBEBmpzuEG?WBGy;@m)FjotB?#^)FcX-t@b2 z@()`6V97s=6Y<48<>aeBne;ESe8IwRHsVPAbLBeymj7Fuf6W#P-y46C|EFiZV2{Th z4e!U^9CuUn_~QZ(E&}Fw#A|Q1+4z-Dvi8CgUvGav+%r8)Z`P|uA@eO?Fy*YTLC<<9 zS5AI$r{T9-zF^^d`{S;h{IY$9f8z-z{(^Xv_ZtVBCKY6- zX85c8hA&w7L0;g;Q^;{oOn&|AlqxmPZ}@^Kr~bC#;E$MmS5CfqgHom1CmOzB;rq3; zm+#8S_gnr-byzH_P`sa)!Ut!*}I6{?`2)2#EH2kB1-h@Lf6iSzX4#?UpY%QT$yw`Pr8L zZ8@}K~d{?g1Z}~KnEPovqNBF$JkEf7FPX58aYO%T#C-Mc;J@xO$ z!5=aCuAKa=ziF|m#EE>tbRUoJ%E`~R{2a>{EPQYNxpMNGEq@oT8NOiQ?=;bi#NUR~-6TTzY;amK%cntZ{+_lEDv$(Q}2xqmeIf`#AV3E!2IUw_az;I}o&7cBf{ z58suOf6(&pKm{OQu<*V4=gP?szH0)&H;FZ0u<-q!@Lf6iWtRV+HvWQzpXK4Za-Dw5 z&&sgrw^;aP9)7(eCx4H{H8y;~!q4UfemsR7_r&C@_i*6%vduri6tVnf!(aZ0$#>=C zXMaekQn%rn`WH;6ny;n3d{<6>ndMKj`WGyGzlZP2$#1s&-&nrjM0{6Heyio@TKx%5 z#CPT7cUXSdhA%h~-<6Z!Y5DKtn)(x*i0{hDS05P#EwtrFa3a1dC*N=R+~1wy3l@IR zqkmUUewpPD{)*uX7QVOsxpMMlzj*E+&+r8c-&=oNIr$xue#;jud~f_+Ir*|5J@=<) z_<|GhT{-!|Lqa^2;or``?o>8Usdld2 z#|vNF(?9ZMe}C@xPrhKv$?xC=empUJS5Cfs4}jkXAYU-uYraQ+uAF@3<3FYNJpu9s z3*W0hS5CfsZ-Cz)AYX8V&kK5sgYkFeF~Ir;KEgN>FiSonVU z!5=Zhcje^E_YZzwX2KUN{A`l(6TU0g@gGEqQfHUj_**P|um1dwoP7Dd0>8IF{RvLQ zcje>brvA^5!r z@&yZj4=?cJDddrpFW-~k_a(>|O!vC}@d7`d$am%B%l9YvJqq##(>?iq9Q+ZJ@5;%S z?^W>o6*~U*`xSIgzPJ2{d&(*1DHeB9NZ^*v8|}bsWon86gJpOUN=P3D^(b%V=pI_X1Q}le|qaOSeaJ>0Mzx_T*?KqPkg6TiY zLwy?l;YZB;aOKQT`JM^CZ^HZ(O!wsP#K9ji`L3M&;0Q{TDzV`U=3Vl=<;j(kUu^ly zkY5a6u<-ZLbNq-IzAGo+Kavuq7Fxbw-i;T&Dd?e>nDVO=u7sS0YUr2egGyg~2 zYBA^Ih-JQw^DD$Mf5Q1r&9~uk{(xBK2RNTXT=pRX#*gz8#2prMzKmGr!#ICJ+-&8X z4{X~gnzfkTEN-X<-a=$EM*$<2RZxPG>THKFJyDu~G<`8LponF^C+_b= zEc-Wczb0bYkBR$_5zGE!+>eP^_FLlqO2k>UCcHmbEc+pG|0Bv}e5=KFBO z^1d72ZzGoX+xWg3vAmDQ_s@vs{W88!MlA1(aXmP(ddm3E_2ruXq`_S8PAu!$xjvCt z)*o`cp_cD5?zw(Y^DX9jKw??%$Mt>0vVM=>+aQ+jZSeag#94mhAHN4ee9&TkUxav% z#r$3gakj<${s*yq--F-d(0H=(kI#dN<#{ik4-(7sK|XIMmgnjG9)xy(zTxxx48-#N z1%5AqSiXnA?@tiR_a*o}3*xf#44>Z@AeQe3@OuZu@;!s^S=`xS+;crL<+9%RLvjC{ zk#oJS=KsoIuAe2A^|4%!t?|=F&h^p62RjVrdSzl+kIePM#Iin^>v^@@`p@;P8vokx zxn7o7*28lBEwQYx9ikgA*6G8vpowTjReNTy3$ukIwhMDOdLyIoF>N%lfjx;|-Se zMqJ-SxvXE}dLzyMli_py8L_M{d){JMFUR$Ll*{@(u9wsJ6~pKHJ>o2jxt>tt-y1pC zXAw95(O|CkBKBL%^<%`cK8)|z6U+Pad|#GW-iPJ;uf)Nf#y`H#O5AKQ-%lm(u$b?g zYW^d}J-^35EZDmhl3|C&V)TV1J)j`tuw=XgUAk z$L}fdQofhK?;{Y)_Ye5J17i7}0oU7T>Ki8hT)(36TLyDIigs`D34ViR{mVRyWj)U- zi)H=L?G|TsDgFC5kz4$R#j+lS>pvKttnc7@EMi%2RW9;B8UHp}EbE!L{)+C4JB^&< ze_|Qmb39Kh<8_YDiL;NxKm0h}Cf;T-=Rb&LeuMKV#4=yP`3a5fd;#Y>h_fx``bT0} z-^lfJ#IjzF^J&KqYgYM1xQQR%*CC!V&)`WKW4{cGIlo4Ey>-w1LykwE=3vmc=YAoY zUuZD*=OErT$6)UFL98t1{F-)uk&$zK9{oFH!{hsSlz)G&k#qkN;$1d8zJEmAW#wE? zO#E}p|C`0vS^v3T3FY-RejM*;|E&BKi;HdixPJ-d9o9YP3yHgI`uRQ~@t17)d>@gx z)AG6h3h~cvd^ul9Tx|KAFFk%xvsygSq>t}g5&v|8!CY^B{2--%WijVdDZj$H=YBMl zzkZ5w&;4d7FFe&??hiw}_B4aJpA7LkR?hiX;$K+*xPFrUoo+GTzoa~9!{>fC#2?%6 z_`Vo%la+HlIPsq>=6o*k%ho^ccSC%I4WHvt;#2%SUH%@jc&Lpp_rsz5e(Rp|$HcYP zKfW(Y{Hb-%_eqH#vzYtw5Rb5Y&Q}wE!ScCYo#icP%M16L;5i|a>;_gFcL^*zc)fG-xJ~YLx|NZ;~&2VLY!?ezwbd@Z!!0m zA(s7PxL*vh><4qZ#qzzjyMQ51v$0j}2IH3bec-{l)3y7E{L3D^&V#?>!9VukfA`@3 z_TZt}(czut!HpjLv4-bCNgU>`i$@Oos z2iJS>W)E)l;9q#~Z$0=851xQ_-3|W&55CNUZ}s52J^05Syvu_Rc(89~bodiJIN-tS zJ@|eP{*4EJ;KA%ixas}82XFM?fA`?mJotbIzvsapd$14n65&Vc*C-F39 z^T%q5q@Vy@Vm9=zIv8$Ecv2jAks-|*nuJ@`%!zSo0);=w=l;HNzJc@O@r2mir?U-jU(Jos-O z{DB7#Du~XnksdtCgHQ3`GdwuQgD>#lIUc;wgD>^qum`X3;PW$;nvNrYBL~L}95Zpu z!clSqY}po94m2D;i$%OHI7v{YH+N^QH!Gv$2B;baIC?x7RR+XuEVho z$MrbYnUjj+=4Zf@33&O*p=W<5nDB$FUj5H*jphu@%Q{IKGL4bDrPE zu?6vyQ_zJlW_9ACu| z#!-)>0Y@W_J8`7)<=EznUxgQzuPqN(H-@Xqt82pLwY7Ck7;bP1B;v%KA$xSN9t! zo94or>Xo$va$H!0RF^kZCQlle`?*X=ad|^!ZBt3*ib|X-k`61W9JDFFxUxy-U*YoQ z4V8`k=ZJ^4wEFs_qlKX6u4!P3maJIOSlN`U>c9(CUTccLKsakw&#hWhn?%X%O;c#i zYNPXkC<3HQ8!MM5Sq*vv^W^S=+NQc>lQ-ZZu@pKSaN0of#IgVQAzwplYHBcw}yx^8Xd0GwNEhA`sPhxj3wyCs#eg|jdB$o#o= zYihmaIfk;hu6E@`YicVNXWKGRSYKbWu0KshSb-yAh7UG31&?Hg$_R7Q!&HzslUp zxTJAql8$>@t)hm8+Pb)72dBkbvh3>0il)MbhVpf`oUxtMIB|gSNlW@DW(gR5l(Gbj zJ_=a^2Fh6Y%D{|@hDwY-L*?iKtZFPS4&^mguU=kXpI_ckQ57ye{Bf+y#rT z&M8`3RJ1fVyl!3bx}`J2hQ1bBs%r?>*VR|lHuXo=l-%lNjX5*(!q-+eRYA4I>-u9Y zouR|Z3DoLHUR2j`aU~`m*40-QmshOnkIgXiOpr+hh0tt(WL;KSQM_*5((ux7GMZsn zvkWW8(s(PB%juFpcJCZXARDDU+@R-7iZHzp-%V}hwf(p$pf}4naT4WAU_ovDnx>^q z$lBGp__w^MxT4w#B}(47s=B@iBdtgrTnqA@YKm&t z78U1*uddToClMoOpp1Eegu%~4k#xgxN;+azJZoWS0U{MH4$P2P*$Uj7)FC!n zYDY5hiFgk8M@H#`l{4^!yOc=W5+x+A<0Rt~@x1SZCkmoZ*1YV5g~3~doP6{ap`|Fv z<))&d2+c!N)LT}fWyL7@ftk)dPJTvLLq|9_rw`AN-hMsH3F#RRGa@qfW0@WqMx{F1 z)r=JwZH3V!>prquF04D#koHzuUNtr~RM)O_%|*1Qx!Tjn815MPIY{@16uW3ywCT!O zvZg6wvf~)tX!(koHH}r#8j9jy+fdz9$>c@lqU!_-O2TkQg^Ch9P>NRpZVWsXFUPbY zbL(nXRIfzrqVg?D=209b*}_egjZNXzjVp^v7KN5J%?O7Bft+RKjnx%lQo(b z@Vat&xW1`j0kvc*E9^|PmtJ!*qwd<;q9v^RPUpal!z?!Njpb|M_0@dzvUYWpW@d3R zGu%{nY-5?vr#uTVAw;zpTRE$I`Py-zfsqLEzj zjuRsgm-HJEI4bGiQBu_b^@{Z?@;; zm_e0ogi29UQKAa7i;4r*9-20&Uk1ukG~>E{DJWDPO8=Z0v7G)%xp7D?Yli8H8}p|h z@XT4W0#D-t0d1*m4?fyn=OiNOvK`mz1fuM9l;uh68;j|HeY$ zoLSk>P}guwz05OR*+k<_5zl0`u3fV_T(2J$H@fyU;)X4)>&EF^h?~6f<;yW-wGW!n zFqt-XscszEhW2KeQ1Wd9Tp3nul)t4$($<53hF)0}L2 z6YJ0!i$}at9`HW92geYol! zzz1@Y?N1H3UiSvlZ(TeZzx%6|N8xw>_46n^M@Bk*-d<%P>*C&zws*bmBW7=rnRSZO zrq4iax1J_;6zRz=x1GMy1%d2~%A3k-Fn`LWJ>e^HC#DTAU0`Rs0?r^?SNmRxGc1ml zM2(Q5MKjF6S3f1HGMYJ&Wp-n~r`WA8I^eZ?`=6it&gjK&Dku+6RH(KlCcqa9tIL3M{jdLf~o-K`6 zWps^=NZj%tC5bM~QKESEOg!}D`T*=%0w2Vli8!VZL`5y3J+t)QEO%*+%I)nw4N-K< z;mL8wo-OU$EXK4Z%5^hV)so)uyJCgrmru>gBrY zCK1lBu4@b<&Xsxa78DzYrA>iAP2Eb&Y3o@6j_4LuXzwA(si~}8iO1<$6d4&TZOX}B zh9@fJW)%YlyvA8%sFBElGy2J7VqUpk80_dO{?mb^{Hm^PtZZm1T767nhD<%gohlnkui0_0!zXTT>eonvr-i%~~eU-H0KYfz)tX2vvO}pKd>qSA9Cv69(1Ec$pIj z<2htPr-^lbnW*AOn`B9#4dUoE8ZU>Jmzx5+yvCes%XKqUdo54UdDW`Mrd8qPi(KlA$ z6}fPXKP%UiH=u!ER@V^m%-*WFW=(ZNrFdsZWz! zwOC7wD&K_Vbxdz~1-gOJceH=z7OX%+y$Y9CROlDKmVtyv?0n5Eav5%1w|ZG!O=EZk zR(R_bWh%V9CUT*BExc2QhM#9RS4b-rw8FYtfzA-RJ1dNn73hi7Hmv~0q6yxHa_}E9 zZaQN@Xvw^yyj(#2Xi9}wSFWyDjTrJ=k9Q+*=3{ex0kekls^!&~(8Q?;(`_nj!hk~b zJZF7fWA$|;K{tH6PlD#UIu+(?N99mXquh>;1L`^YTo5-k;1B0@c+mohTiaMGgtZZ> ziSP1tHOt|C4LZN*(em{QbY^8KTVTdq6e_&qbqVZl9Xd&ohhZ2Ggs)z$?=F~keKaYT zB(B&QIP0>y>gD~jXf|r`^ajCTF@`n5Iu2$MIwySb;w5tm7l*?kbZK(TT&`K`;c0aE zT(PbJ$;OI@)#deEOcA@5BbjayXUg)WV=9x<(E}hon}!HBRxMb{sW~z#uUS)$G}OtR z+%Th%TVGz?uwW5fEiKeDZbkC~b7rj#X~g>qNa6q zRGi^_oV-s+lXRLAUlfOGi;bB#wyXPcnQ^9Z*GYCC07WL41Myv%sIZM*y`}+wIbn>I zD`3@iIbXeabzW{!QAG{Zy;iqtlH{XkOy|bbg`;nvBPGSA2OVvgdbCF6Xj@=n&>_8M z4c}&tRKo;)Mh>#W0nezUG+dgEdc1U2xOfp3l}0Hd4;BPA{EWAi?CBV3y{G@yGZ?dcj;zuM3{cRvgg~kbO7ZnA-uqnFol(JF0WupW8&U z3T`G_H|PoHOTh8w86pvI%5NarAYg3PM>S5~BEl#}+r{W$JS(B1g#~62&g#hUEuJMW zU~?Z{QH^H~<&D;wAA_hTG^sMGk?NTf#vB>j5^~ zq#8@zK;|GKQ7yENL}kMQ)TO0!!-xU2#|Cv)OoPfhiLV)`c(N7QVh8|3`O&k7+b|pL9zdOgb4w5&LUf^Z08V?DLEMV;4XhT~{0C zP}@EMKBlpyZHo3j&g5RI4!}YjD}%h`|Aak07S(YXD`ZIx{;y58LfW2Po_SMJ_fnl| z&gs4Wx@7MBX|G%N!Ar!lMAd^pzbW=S0gnGXb|KrUs1cEOp-Tyx6>Xl-cpRxsN-8P? zvr9>ODesLXW1Eum@~G@nazXTMqf8Gr#;1r?Pb804#emCCzXe5iKAh68r(G7}DOjWg z%{XQy=*S{-R3+?4gN#;t-)*-iQ;#snV_8CvFbYRdf@LriZHzpN?)om|$$4QiMCL3T zF&DeQzVKpZXwI*w$2@W5&CuvcIGaOQ?!Evk0PuJwPDfK5`i^4~r6J-$bYbYd9i7ah%;%Ep2A0!D9UNNB|MMaK z-wJf#;kxaE%)p!13{g|!_mvYgy3uTpE!N9aBL%F_3X_CC(IxVT<8x%yuK)P;UFo9v z5uJE$M<-E6M7_^2J^Iyc9cGyAf-o~n?-@?@P3|2nxeMgtmwhmp zE%EL-GO+5JAot@f@Qf3W0W}BxrfOi5H~s0T=*Y@vvMKMolqT7z_p6eqjE+2kA6OP2 zPi4^K))aq&i_@-KhY*G17DQc+kZua(9yQAE<%>YvCUomjOgaEJ$D$ z7j4YJo-&o?wf(Qau$xTTmsTS$)fTNbR#SI*V}*JQ z9VHUm9xF=lpxO;K;&IeX@p6%SH}~4A^2pnZrb0&=5@RK7GWndt9XaV&r3TU;N7MM-uCnl-~D`y~#4lOq4826DDgQ0sMm#;PfrSO0Rbiow3n=`9KiixEDG%GzEd*-v>mx|k%TSvRXZ52XJw`bkuv zsJ%#m>a(M5I|@m?78J2M$IeGFS;x%Bqv|dtrG>t`OvaphJ&I^9fmqZKU!0&pFpiy# zkUT{$i6+`qm0(*XZMvhLa~$cT47Xj37mWe-&-+Q#0Q=|t`F2zV zuHTgRUD%}WQ-BXOnC$~FcN)H0uY~JseQO@ON`{b3Pbrzo8NHQ`$CwXz64tXbPh9V$ z&K#TA9aT2=pZGrV(Y%s(ennl)^7HY|pbPQNVAKa>@TvyB6yto)A+oDr)VB>HekDnc z`ozIN$x&ZA7$`aFg9ig8M}7BTpya5}9}JWn^%aDHlA}I`Fi>*THxVM_-Ulo0vRL!V zquKbef2XqlV?Qnfu^UhJGWvc1G%$B4`@PAES`OdG^P=7$WE0`&eG;)ZOpKG>tzh`# zTT)^ZH)D7f2MXe>pDR)q&z);uKQU_%vt#ZAV(#R}+zG_eV(-Lylo#Vstk=o-lN005 zjF>ygxXF!S&5XGd8^b_s!tkj{uVMqSi4Da1l@puXoLKL3;v_HD``lRXXU2LzGuHc= zvEI*&^?qhSOd9iI{lXhNUQOi15@*H6D}PoD@mOVHY+7P96&uGuZ190tMFnEhn`B{# zO>dm?5r~afoFbDO8?W5hc;&_x(%e|p#O6%womkDpi9k$t#s(i7dThqTF*WgY9_v-$ zh!q^0n4DPeXT+BM8FBOy>(`9foS6};npmaBh8OEmJY~g3Dqb;;$*tHF$H|-6IK`_q zapD=9;NExt!?P%&J9A%$Q)@hbjXI z*gF;2zv^PA+XfI^RKz`sG!I+`;939k8q}J80UK{;uhrjWjX!8B;AXXY^9`+Mn<-IC z%JQ&B2KKT~C=cYUHsAQuZ_#LHR|X2W5t3);qoSF5qamg1%aS zep6NN4n!AUc7(*ZG_#qoP2Jien@&bAK{yEr)A7a2hQ{g3SK)IL)9V}R_`#*d=~RIE z*Vs@oeRX+t?evQ23bROh&E)_%`d#Gd*X8Gh^Kz$?U#AAZR`c|{RKrn)Dt?%4pk#J4 z>38@C65XO&PNN-Nbj6Q>DQa=uO6*>-@~9Lgehe&R(;Uu4x|goOn|enT1Fv645)+xd zJO&Yocy%Q4aMmafpa@0P7q_o7z=~t2Sp9qq)IeX3L<45A-!X`Q^=M#maCSaEswlYr z3@ir4(o*VOCG>tDwCM)u>^&6zl@*SS&| zs@a8Md~bTqvc||QuYWsFH1T>Jap#ZN^)J)!YN>)#lAUryn01wKfOJtgc7uYcREe|v`+ z2ZDv~`ptXc@XCq`XY)$0e^qCi_#LteBKGm~T_-(WC*+wFM#IAqlZts7lGasrk>Fm9)nvK3x(BAW4NvHT74%d{2 z8`rH~R#y|o$G;Q$_kGDk_$OS30F$I_*69GTB~f`(xxTcR>FmYdzs1CFzP&~4`q%5y zT!q8S8hfotYIh#Jf|7M(;ah`$- zx8s*S9j}8pUkBq{^`cMt$C~oUFsk12sqL9cZEj3c^*1Sna<7VU@OL>ffyZ84Pl1~n zeK-EU9rbDI=W#@*;2y)d{l2xI4mof3xj)=}-na9!%kR#?JwFbUcBc9`eXVsol&LXSD!ye zbO`6_EF27X^BX?ZiO5a0n0n&xG>g^W(^RvqQ(BPvm(oB=11SxpG?3CjN&_hkq%@Gy zKuQBC4Wu-X(m+ZBDGj7FkkUX(11SxpG?3CjN&_hkq%@GyKuQBC4Wu-X(m+ZBDGj7F zkkUX(11SxpG?3E3KU@PV?@?d!AE)xO&(Bn6P4KH;JQP^@hHmXh55MyEWtmD9WH$NVH|C0=$2h7bIa6s)$N%$zU4b-%fI-hZF$i*bjv%w%q_*f zb9Z0kJ8ySZ+R!b5w9G9Fedlgj&Yr`d*rfLk*hWK55W` zcHZkspVa<#`swXcGUm50$XL++m-NZ)2hz`IzdB=K`__y_?KR4``Rl{BEzMNTFMUs`(Zg}hgf4~-QZ46vaMSJcyEnY7 zURQNK9M7DV8A`+Xew@>B-iUKevue2n$G70`>u(LId+^P)C4(Uwg5zIqR(lVAGr0Is z9N)(E`#66ON8R6Uc=??-uD^YoI)3Z8Zv_|cdH2SbTU6H83vm4}xc4dC{0s1BIB%Yi zUHig>oZ6ie0=4f>m{EJ;75>^8I7VOTubqjbWblT%MjYF5JdR_`#LtH=pZJB)D-+HP zeS{-_;)S7g6K99UO*}JHfaA&uRiX70szd)Yp+5BFglj@qPRt9fpExV@PZRS)Pfjcd zO<0ie;_OQ@UL01E@#3`0GG2Uj!WTk46K02oO*|(wZQ``hiKjJ$W}Ma-%9_v=3QSlN zDw&WMYMd|&I3KtmRQhFqX!|+-+U@6*)vm$y;BN2x4s!FxzZmhK^P?n zqY>AQxNgLC7Ot~!oyBYL0>A;_0B``f7=EleLABiG^R--q>xXepzxB1yi<4B#*|)wG z3j2m1D15i4^tDZ|g(`mj?pJHnihG{jv_CWl*O%a0ZQ2)7xIPZovwg!3;H8q%shMi; zaO82F@5V>lk-zouYZdbM+qf>q`Gi|Hv@QGQhPKzgyP@r(f8NlxRGqN933c>~DzLj8 zbu{o<;meagEqwVFHF(RMgX;DN)Q~L|IG>R=W6PN;cgrS~w&g*UzGX#PVE5P4Chfim z?pDIhHEDq@H>XWPT}|6E3G!ygR;HoOv^qE)IHZo-`l2s=Ylm~6?m8Vzcgs}T)=SjL zt*#wA0r&lmJ0EaBov`)Xv=Lh~hpLu^g-A1wcW`9QQ7sdFLk}!C)Kfb0=L&UGb-%x4 zPTSj)3s)bQ_FN5ZSkH65y`R?k+CE;H)^;eI-uB_;8Ex+`8Ps-o{ouC48+|SBr4MR< zKRu)UP!GLlH=J_!c+@dpMsvyj^zrSD`D%MNWS!|_+T~s? z?oCNo=gc3ZX3oq|kFov_#W8F+^6I@CUk<)8Xi3dGH@rL&_x?LQv;Cd)(d`G*N44)8 z^vvhqO$&BUI@Mo$dd7yj!c*@HO}hFgpWlmnzQNCY-h2OHr2R#t`Dv7esh{6>6|<$h}`pTqfEx2TqX!Lcolt$Y{uF&p;p@347$ah;3nS8)9Z@-gt{^|$+d zCu|*eTX3=Z`;9M8@~N#C;QEWW_j$PcBJdY*{{M=t91UBU4O{s|*f+5~$=J$Cut%<~ z-0ScY*-F<=o(}#)IG)AvCXO#Uwyy!#VlTggYq6IP!xp{>8~6^6tkY3V!G_l*s0lUp{l72Ua^?QGaayq>Z<0NWT;er+2S>U_Bxv}F=(qS(mEX=iWA zfNiW$K5ZZ0fcsn1{JYPDJKD#0RbWeVnjf~&w*_xyY$@~Du4V_*24=%fXQicWZFTO` z#&tND?xw@0zvD}bv5m@chc`7QoS#NptF&#r4fcWd@nIZOaZE%TL)%#QtU_H>-G|o~ zp)YwU zwuPv(o#@}(hyKk0__27X+WR+f{yTkW`x40B9d!KG?D1-E;5fB+AN<>&KD_<4^bzfN zb@8P^H>ThHtK$Z3eP~41)?Wuz3;R&}Hw8zs5A|01;P%%x{dr^tu3yK!vsK2{H&C~K z=*!r85a)}5|Bmyof#1h@$b>U%&zx{p?VDfn*Y3peJZx|l`aNHqkc*pYU0LLcu z5${zQ@A|rXN~sg}9X8=$zv0P?| zojIsmXlv6=oq94?ZGRNU`%9ieo${eARp@W1cE9Sla0WV_cDWX|Q`A z`XI_T`<1fKd~N7UoLF^^dZH8cut=gTYN_Q#sSo7du_fVv7 z>J?01X1gEheKM(Pri@ciC zhV*FrhkPi}{l$%HujJJZajBB z1NDr&VcIr8$5QSnD~GOKxmy;W%2A^c7o_d!wu?|c&WlqHk3~2?KsaxqU+GQ5v8qS6 z<(PW(&_A%QHrg=0i7?{Tq4!XS*xs@ZWugvIZxd06u0|cY&(cv^eKe~z3AK%>W{pj1Lqi)dcX4JW~^B}(; zW%;+J-hCDM60bf*%Is8AW=nf|#zyX)Y3?n-J#U^Ki}ErK<>kw9%FD6v+Xuhr!S5-o zGZ=49q3zC4-J6j|nUm1(g03=sQxDOWB;zJtzl&wzeUt^-v;D`bC-6dj_r4jbE;KHpZ{gAclP0DoAvX#KK%T)R zeRm&zZnu8^qz^x@cl_+K*SGfJ=LYNNo$%8i^y#@`e^#14Pr+GD!u$uz0IoA}tpXWQ zI1^YE4vM*#iF@jcgX7-M#C=shM9aF+@0bd|r^4^4@M|h~Qz4%Ux&N9>?IyZzrM0Jz zQ`QET{ z2|J#u*ZO1_v2#v`(uZJBgQ5{8$ahO}ise}hm4FqgJ|2*!-QmPBb=j_^7WUfbltwqXeG?FZ7@ zylD*TFfWFG7s1~m%wNqzUph_qvo1J-FfT@!*eKz#wg(E^+L6xT40m#R+o5UEX)Ws` z+_MmFbUi}*{OL0VJA8*`Ro(MUK^M`j z@l%Cyjp`ng)_nViJ@5Q|;t1LkU;Fv6qZ8Gb1DWGhDf>VZzogzpn^pS$Y+u0#uc?-y z=<{wqF8C<+(rM{TKe_!L#DneQl=Q+!-=Cb();W>>oP_@NILsN0ZhsH*r?1Ywd_wGG@@S%HqR2v%jZ3WMbY?%`7D3}VnKGCPH@jr6sT{oj`-i&aR z?@*pscHf1~CC@-YsP2Sa8-K9$&W*SRt4P~ zO==Xd)FX24C1z zRKcOi#_ukt9F2f`#)0MNqshLuPK@`cvwiSuKk~Yq{;iso-wB=G2R-da{9c1Uoa;I` z61v*ZQLqbjz3a~%1)1rGW-W}xomnC)ha@3j2P)64<=K+U9ZY>{awL==Xfo|ISgU|G*3*6PWq+3h*1qpTRiagfsoW2l2wQ zkkPG(@7=g&`R4WaagFU-Mt5#hE#Jj8_L&*YKHBZL_ACGI^qd~}wTNHUtFTchcT?ep z_nFSkuyr~vNUx3q(n)uj8Kw-*=NYzZ3d;X1`i=Pi#KwO!><7ld#xLp$nCW@Qre_z@ zBXX@no4$J>*I@uN?%4MREz-H&W>5vY*KK>`;ehjj~}A zXQmy>Yle-(K0i7>3=eL!K5qesVdylY9`HWv!41y+GT1@d(_-A?v!HP}_sksvdpH(( zI1YJvGV*jh^7a(i!&5OwGDOe&=sx&`7%w1hJJQD=n(~!k!O+vxsBbpi$m{SXRGeao49K-gTXVx;wo2LbfrecimloHoE%D^f_8rZ*NpBg^zqAqOV(^FSZ9P z(^X?owo%4+;JkT4@FnUzGqC-xwl96Xr0u4mcefsw8M+$J%?j`AxUl`Rc`whwx`F#A zj0^eZ?W_HZuel^RGBb3>ss2zAj)lO5z-Q&YQG0go>$SrMHgZJILa zxYrk{Cx`lm?#^u5QnKsz2OF?nVfQ(QZ+Lmppi^@eUgbOC+`~7%eD6u>-Kzsz?)u9N zU%?p&>zt9n+s{ON2c0%9RE;#8_0Js_J~;2qT77+A$A!1r>$4y1xbW-q-qQE8+d3}X zH1A;TgGkRN9B<)x9!ECP6v%zEHYfM3+8IdOV@SvEaeRp54x6Uuk*2q7+P*enXYK0~ z{14&yV|hXw$f_g?>41o=MNk8J{v9XFS|-;UL8I2lHO9tpXnWNXLZ_ zAzloV;Zu*<2$y<$2?zDYa4v%X)F1V=8T$GLboWbKZ^iX;=YE<<#;bkP5x-x-g0 zT*z>m;qRo}&e}Bir{Guc)ca}`xS8N~t@0IceMr|zU%`Cnb^&x7o{IGzfp6T^Rp%?{ zY4jDatSXjK^kur(mr?`T#mWSA*S7cT`h(mx;(!PJBzx%7zO+U^S~z}9;0C6M#IJp!g$g2^*+$`ShZkZ#_g?hR150= z!OUkywhX;0Sa1&P-S9xLEeu;!j(U6x>SHeaEr$)6)LNkDII@8^f7R68E$54!+pg>F zRLba|FR_1qxM3U~TLwFY__A;IDB_I0?hZDd9_+4y?bq|Vuv69N%xi1AddeZ>)xpe& zjq<@pq5Y$cni1?i3@-iHi*q@iUA|}2_iNy?w6vDDaX!4h6uMZ7JVEXz;6 zpP&co?OSlyUpqm2bOYMs)@5leJDz&BU_RoYF#lupb3w}#=FjQsG5Wm&ZP{kDZLR2= zrkxhNo$XfZusLm$V6$ICc<0)@JPGIbDp0;~)F3Wp$P?|~V+AeCV5@$Id$8dLO&>IV z`-11ICXZe>dGu@GF4+H}Bj+C)hJ0Owd>t_eV-4i%B%QBY?s`f%hrmHNzaR&37*)=> z1t0Rpl=(%_OIP@rg16CzOa$Nm$o9Lk5ofk#ypMdC^_WsKA4K{(;pR@bFCDFN$D!UG zLcP=FVIRuFg~5Ug5D&Jm;dFJ)7?iF5JW;i5NFR4#7oJ^i_%iyusFOV#(%X8*qOEv9 zwS3Z}P|wxqD)cwU>iRjh{p*Ym>gOjID|W&i+co;W;KkJ!pei z-o_%Y>36uWU_AVJ`)m_-9&{Q>%P3X4j`=k=*gX?*i=?Z^*VZ!}>6(jl-5Z;(FG{)| zKtC7mx}Zy@>uuzN4{?;V{SfYUJ^0O%DK!sA()JyAa>E5m)Bdlt;dI z7oWlCGIj0+x?gZJ!-AXn!_ltZi*m|yA8Fb=Qk4!QEqFj~@~EG*H0iY5Rx&^IlelRq z?<*~3CM_YG7Co**S~#v_xtx#l8sx*{uqiueQ>L4~&xa`EZLljZ#MqU=ZCw+eGx_}G zVE0?+tL^uuscYUJd+&JKhbMsVgMDE=-HGcC*p`=YrtRt1<|8j&hFj`-*ZZ*fnD6*4 z@B_5@ko`|9)8jhGb`c{DnH=Y#9Q+da-{H0lc3ADWpk*e~{N88aBG0#=-s*7!^8V|% z*5d|TZ^AXl4L;N#-HtG9wj=PL@n`>9-|xZv0q-Lo?YPge3H_kGV%^hj5SLvn|A@2tV<_JH; zJj5Z)L-5%O!(n}^qOHeV1j6q2Wu#9Xjc4kea5IwmeBg#h*^Zx{)jLk!44bj;RaNjY z@_}u+JNCT+@sY9by8F}G%J8hk9rykYa*lh~U><;T0Gtbud4TCyC((_3pS|JMw%MqU zQ#M|eKi|hOJLhJXAT1%J=@O)EG19mQZTv!v4KsAx@_nRX9Oeu%!yN@fVFQfa{Jx$u zSdTC{XW*4_&fuqzbq+Z+OUCmta|g&*mW>X~9lV0|Y?L#;^N}~LDC4{GnoC&MCv`NJ zw80(6&dnU}>M=9tM#i-7#{EA(>py_9R5~ARkskL#UXJ@!p{x zd^clGj1iz)?7^xr$AV>;*A&b#=@iZhLT^<(bNq=j-F0Fa`QL#5*s~a-Z25EI_`^EJGR8 zVWLf?eg`3~3h@{H9*=afzd_k&2rJmr^SJ1EG<5u-j)%4NTF0Z?2Rk}W6o-T7s_mnp zckf)P_S@D)#&I*+`Lff^`1(H(7WKK=>T{RXC&uqupFxz1Ivt7yt+xf(pSKSOu-Kx)e9zTe9EjiS4#~*R#v+Tp^s^t~^T=SV7H=T!P?a!rS zu6sD@;t15qk+3hYF&op-hf%O8neEM}i>i58%cbyhGt2i-xxwyNFs2_f=+MttMlOe({m6Z|z8Tj)OB=KG1I%lE1h=d|8*Wl<8-}TFiT62h zb9j9SX}bhzTnu}&2+wrVwVfG@G4@F~7%ra?vwr^!?peQ|LH*|aRlsY$s@krFp9i2r z@&Ba5J&)0LoCi7QsMr@SJKoeMwkO)|LnrguHXxnYW?A188OI}K$Yb_ZPK6&5-_KB% zXd{1tbI-=Ky&ta}gg6gI+=n0ynaH1^SZk98`#%V_KLdHh^=iG(RN>Da=GWWgPgdPK zV25_W?y~F_rmNEXk#7&eCh7hT()Su{*Xx)Ie3|pRxIaH#<@#0g?I$C?#F$f^*@}9# z1Lci&QtHq7h_ipBdaTvjt{t#J3_~66;kMleC&BK_!+F!^jjg(WW^>8p8=Fh8kZ}9i z@VgcD!0l&sJ)m0Hw>|~_LYLjabj7;4htG96-{rdf2yUl5a)<7h>998CwNQty8_J{N z@z*@`$=N=QX$4~yD>ap9I7s#KEO!Zh-1(h_b@x ziaf7_zx$B}Q|?B!f1HK>$SJ|@M)0dV{Hh3F*7%HSuLfW8xdGRQ*DpZXn2$1YG0MtC zXitmqtj;`J5*?ll9X`eMKnGec+P^zhjW3Y4PpG* zziERG{AfRDS9N)ZF3}$7zR*gz8D{UZtg+3Z8`hIgpm$=Qy(ejzt>ZVU{gY{GyZ5=z z(2Pwb^(YTnc+RBKX1~%iJh)fZYtkO*bjtkW#?ub%8~#*VImg}kgVEAQ^84p<&G)GG-_jlG2;IF4cRMy-#d(T7?mWdljJufVofucvr$4$6rSwkW+_ z*03;bfBsCh{W(o_KY@0T<(IZjZ4n2XNPgFvekiQ|q$~`h?9$LmPS+;YQ{m(f^%|@lqk` z@Z;q4F+ceZc-<@uNC(TwK7D<1`wzjB`{lU5-`>}@85|w{ronn${;Ps-8{D!VarqN) z&tP@UG(7jYb7XT#_hePTy2`k6p7$llKZ3llr>B(Z8H;hcndcqbz6{~+`YO_lx&6+K z9iQ(=Yre4Sc=QKh^Vw$Y!2Pu-1Fyhl?LvL*!t*%J10RAvFJLS*6!XBk{7sd4;EcAe zd+j{%qG0zA;6I<$U5E2CsNc-9I$SHv1GnJ3nRUF4dCzu;^JKrqABz^+g}{+#%Su z)7pF+w9o69c%~nxS)3(sPLOo0#U>jL8 zt7R#8eYTN%W<}b_IR&PTya#b{+sJp%iEblb08Z3K{(dCFJHj?{C;WBW$N>6Q18E~$ z!IyTW4cE!GksmNUo;Gq2^fu5olJRF7**PL{8+i}hBx)mbP}btLk!yAQMz!bQ%)a}l zbwgnnhQU6>_Tkw-?+pSrA4(dy1wc5hc;+TL`J(2ErabxpZP=d zk=UQ1z5dzd7qqo*Jo%95dm-XB9`lzUWB!sh{=b1Is__S=V2#2jaQ`9duspl|WP`8Z zQ}nAi2IV~EC_PU(s{JZ(@NK5{Dm_;@sIBvC_T5g^ZQp)8zkU?*dslrp7nkDoOv$p`hAh>_XC&*<+C;IH^=y=wBId$Z)AVZ*RmgeJdH6_*F=neF(1&i z0pnkuA?GtelkVXy%dqxvPVsRq`*8m?@LmTm3uA1iqi2J!O^svUMLqWERJEOY=){<% z6@9+%pzVAQ{g9W3sK=>?PV@!GA`T3v3(tHzF;6-Q_;uLwQ=t1U_}vM=@2l|by?>)` z?*k9`_Hyp3>xWoV)PuI?Y+u`Y_9Z7r_eHBPE;oJAQ>;xrrTrDgWm>R1S@*l)KACjC zl!^L^ck}LsZLgZ!yLL1aIvDlWG|p4O=4ZD5l>Ys>_j$x0fu$V%8?N80OKao01k*N- zX`hZh=f~6++OX@9#`&H!&X1&VyiMcy_J4*yx(r9!SgxZvS<9pEv+W%l=RVdhMcU-( zvE;`q7h>*eL1awHF~XWbc8=va)zW}#_L14PaxSeCy7(OWzs%?A_j~TB!m$#EmLZ+F zu(zjLx6}*PU+CvF_vkX?T4M!RI7Sfez@3Rk1Ke!fvXM6(4xJMb;NJ|SCkICW$1!dH;de~?KbJOe2y9>`Y~WDzm4>0uG#vfy z5t!>R^Q7!+cf;QC{a4OIh#mCyvvpZ`So&e698g#KKGv0+GB`G38#zBX8_yWJhTJ>8 z3wCV&hTwO){($vbXq!d9LlEavVGBRCw(v*56V<5)GRH*E!FJee*kvk^^LxkSB4tDIJcs0 z(Gd06AE($pH|&oe_)65Rk73*0HvfINxqi5MoME)WuYDU}6X#kRm+ou%gSI_bAKH_K z@vv%Jhk7|>apH;b57N5&Z+Gk@Sk>hGj#oNMAxUF%{dvm z9^XC=VQyG;K~lP2PhGziY{Rq1$EM7UwtwSa@3ntx5vHr_JAq9e4r^JD>yO{^wdwMQ z_Z6i4?Z=pAF2*!$fA?4P+Qu&QH9kbYe=YRC%jsWtVIE=^#~8XlJ;vHa%%#D9?RUg3 zx?_*Us7H%XpBAECEkOO64?A};p8uM;Gmhy$mtpN3uJdtCJIAps)3b{<2W6=eVMsrQ zGSrQhPhjI{ucp4&bBDOsHVy8tfcs3ycpqO4(`Q-BM4QR`qMsRXE9)dy4-Sy#Y>liWc z_CeX^zJ@U|-)|d)^6t*HJn?bQ*#7+bY1dc)&aZW4a2OW@Za z&S%+oJ~)>*DG9$;B=XA>uRp*x^jB7wCh+S_k6*t`!mnTgzluD5J(7f9=Oyr~(&N`X zN%%EBfnQ(q__Z|&zlJ36>p_oS*CpZC`{=*KvpK)=_*I#NU;7gH^@hi264Sd%`4E)xWd@m0Bd+k!$Is>fHmpiGt|42C#q7;H&4YLHB)a6 zA36tnF)Tdq*}LXrz2&rrzxLAbl~4YCc;Wy0`+>}+p3u( z?q2OX9{X3U{NL=o4R}?>wfH^zoP_KX0tN|RigJQz69gd&6cEY@Uj~eb5S4mqlaK@w z2}w-CS49a(d!wLD3RNmpgJPScXt9L~y;y_NHj1{W)Y8`0ptd&(wV6EB(i zW|4$*pKh3#h%aN}z(@7Foc;JwPJK(iTlmX{i3%S|HSn=n{&h|IKB4a1nK2XmJreJ% zdmP-e`_6%3Wh}$C#gTwo#q|^n%O6@#w6`Fs6_S6=E}|Nh@w{`mgz5 z)Z@FYm(L^Z8K>vhsImS-BDMZa%bHP@ZL?HWN5#a z{B?FXnea&B3Z|{9^ONRz?&(NI=9k>~{Kf)!_rd!+@IC|HdvM25J+w`I@`a7pAQq@ektPzVWvkhjAy;25?q9`QpZJ6Bh@L4cyz15;eKy z>zNgKy6di@E~f7|FrIBNZyPyTkJ#HB0YBYu6N zGOAB7zTuvjC)a$^mhpG$?s4Sz-qY8;@RA*VobckOuYIAw@NKEP)ZB895&ObS(yV`a z<_p|?r^7P|&zxp%nPJz%OzOc2OP%;huiIt3Iyr@QPdneM+xh;?J?3o}&9(Hz*GWG_ zCz8<#@wpDwk0$h0p|4r!s~>&!u=fq#aWlA6CON-g>`?S&=E=;lNf)j7=?oJcyhCky z7hQb;U40i_9g41s{%PD=@06zTbn(xbR~XDMI-gaP@gshKJI9R(w2pmL?y^$#D|(tO zUu|Vi)JS}MP8 zt+i!c5orAkyi)Efz-^j_-u2HD7Qf$P`kl;Eg3r@&E|a~@@b`_$T^Wv_w9NBlUa90eJkc9dCl0nJbl&=TBB}Tpf)`iJxKMIy(l@C_BMEj zvhJqvQ*!9Yf)3Lk*jq>6Y#6?wKLVG~y;dN!hr4ggIhV@Eru7_0XxK!0SyvM|Oa4;# zGiCpbXLp(FdEUj3dms9VUO!1#%9{wx{YTpaXOgj`ufR8=B9k zx;nX*Iq=8qC$o>L*XEn?@psOwErU*3_m?wk9p}pA%$lsL_N#xH_menRCi{)=ux=py3!OQ|yx-4NLd9r?0$LD^#{Tk`(?64^hpe5V*&?No*1IG=-0 zlk4sqse9$Am@}F@f=6@?Ur zII=hNF}fqZLt;;bW?g1tRJD|u-_6r9&@XmrVEt~&5V~bf{!i96Pjg0bCvnT(lK0|| z(69aj_A-7%-0~^n_bzR&fBi?SDgJ=CSk^z^W&Km^V+;E$>f7I%;LBnEoiwV)T@&Qo zvUv%6DfpwaC;qs)OFdpG{M&x-qinx-@h2XDFZbx)?8kUwkA2KKWzxkePttp?x&KcN01Wa#+7=i>vv z8Xx%lhg0>Lwp&L`v(KkJ(0<-32i)!c6jwvTg|4$&tifxY`V#)Xq?7$2@k>}h_`^3+ z$L+gV^*;4wXkab*!JmBhK~2{h%jf^gHt%gV?_<0dtar}jyg_@dWF7g9F#LaH^WR9j zb`+ob1as(q^)-?*%qb!p~{?uBN*VOvn;{3i|PU`p)~Y$!~zKozMMo`fNiepI5o~ zTn(SKob6p5hR+XdKJ#6C&a(OZz{RJO8OrB&htF-$fDVN6xz*`U1@`d3w(W=wYcAY+B*J&qS&8~UX`=l;jdx!9{3h7-n(y#y zSoJF9TlFe;(G952;{C(7Qm+?3H|_)0Vw>^946VQUp0W5t_oHvT4ttW0UAY$fG7~#9 zgE37S{wdb_u;{|y2DA5xeuv^1_}L`Ar}Y=!|D8Ti+ADs+59qy5!jBQwKKX=X%tFIF$IysSj)2Nm)*Sh2Wj2{ni>F&5Yu=T`UN1XH(*mMgo z`);+;2O!7Y&?xV8d4l_*=k!@69lKw`f}3(=AK1~oDkq+{S1IwdKS_U&6EEcxue^cQZ#r?% z(fm)+(yvKceDy8d&{Qe$-L3eQPQ1`3@%TYW`Z6aTS{vC*X=a~6;+GIFboT^0eN!Jp z!ux9%4#vOO+xhNA-B&74b>-JP&VZ%TuO`-vul^x;D*9Mwz;s{RFur;}eVzDl$+@p5 zcpuf9{kkGS~5^uddGKoU0Sxo<4nIwPAL|uXEDR{z~BJX)rFxBhN0IQg!iiUts*k$(Ep_4pldAW}bl@qp;t#0_s(QbRPGr8R7GJhH z+Mn>4#2GD{NA@tJzwTXs18FMXRMmHXrIXwa$vjokq><*k%n2&*#D^K#?gk%nJ(QRv z@B7rR;(Z#==GTns8NlRhqndNcCzFiI2b;M=MyDNR;Tla^WOqoF7}crFiyT>8Ep$jf zpO{Ylfn$lMdM0pYA20WNQa?s}{Txbs-NH54!8PAoz0D=3B#F26VxW^|lec<}OE!a? z_-69D>S}-!ucE87eI4}mcHxX6EOH6G%jd&Hm7w*4b48)=E)@O0E1GfG`Pj+c*h|im z-rk4tSG1nbCe7zg490}QtN+P0;;(!tab&mZza?@;rJLF%?SwrM-BR>q=&Z;Ixncm2oy;IVe-Sk+X}K9I9#oFmAmpRN^%ZD|(x zndP&i@1?;$%Y8Jp0y!_yED-$`TW!^ChyP*LEsX!+L*wIqhOAkE)+^;1y!rAxIna8A zJPUq##xHrOJU?Xl?8yB$m&vn~hb@--ZwAXV<>t#X=QJ*sXDMHvS$DXA=TJK2E~3-; zzPkwEO$s$sVET(@M(uD*MBJZ}2x0@ZCRI%e*N$ zM!h`VNVrGNE9CN%b72pA1{}-b-QBT&cyRp71GW@k3zAoUqvh`IYS$QS?oB`+cNxgp z&`fl>`{jYUo3X7c@HghX$jL{@do_56f$uf);)k%S5jqAL3CEgvH^{K?`+R?RFlpO> zErW~;j%{NM)cBBfUiSs=X`BS!8H_oox8G3eXp%Sf-FNY4nRK2ynan*4tl8D8 z{)rd+RPsY|AAKLb2~o=&JezvEkvu~4GamEZ3ZBIVT?`GfezgMGJ>%(rtWL+v+_8`B z2X7MJrUjfs5xRsXp-1p5!B3fQVb@d`-Ap3hWX;#?)$qq=uX#}DO-gKhaFD4^s*#ls z${4L}5c6S=I<|Xl#e-5d_9H{|JxTLrzMIOr!UgDdKC<$O{(98zcL+bv+TKU}`mG$_ zBI6^O3)FeluHK2yH{^k;TjJhWa{icb4=wO0{mn2INl;j59eK7H0w*m56yE+I`1>O|r;aF6=q1%)k-6Su^U z+rjgC!ZH`BT{)~p@V>*n-0$!#w9{W{+BrPCX*F~^J|7~7N!SLljYEBhsmlaim)AmH zEcl0_Umt=4JJcEnexZFalh+_HhVbUSW6&$kkO7k|iO+^gH{jjf-_`xj2P?Yb7e zR@-G1QpWnn6Sk~|#|G?;;JbI_;+7ttw(YK5c{y}j`QCy4nVkLOoL|3k?zm{l+($pm z-F`v$pId7`A8VgD+W7eub1L9Z>QdI@27Oz#(6(CB*jF@DDeYeFAk7D-K*5PVXZhCel?MDQ~8Nb zzj?79zjpAZROx&K7GwX8ghz{TJhY1N$9% zFTJ(*@Q>mEh z&8>2Ox3zC0`z602jkfET(%;zoDyj5uiQJt!893PtwEmeiUCCVbO4|EN)(Liz*4$vO~y{95|lJsq(<(-=Xb#0teZ$$QkR(a}t8Q!|l%th+x-=xnRN?%D| zCwtW2Niv#Sn!7khyomC@NB^5e|64=7ne6@0{+`UH-oo{jE9on5A&=8v(r0(k_jZaYb?d6^3?u0P?v6Rt{{|t5L$jXrucN+!wy~xpdcNH{C-#dpi?!I>x&!OW`x4ccI zS`)ZyKJ>gHW8PhIKZ1-$`+_@k&Rt2pSmTk7vxjk{lROo)f4L*}UoE#~U~st?!b`Yv z-y@y7+;i2<9?%=2n}^J7GgLPZncIB-Pjs^Zy4!W-9Ch=jI8g zR*wE{1ixcP(669(8=zTq^GBp{>*f!64x=OO@;(>cEC%*v-Y$316yc!hZOCSHH1Tn+*R>NTiYS*Z*>9q@?}^e_pV~Y0rF-YZ4p1uxjU%4-v42+M4(4r`)4k^trQH*` zhoV}I^G!6yJ^#%ObCSRPc&Trk+M9gchB>~K-7hDfjIlId`+HPB&w0j#$2fn(7(>=eEZ_Tt`Zc7JJDpi0IaIc`dvyacQ4Rj; z>_@okkMJgs+&hs_?4eIysv|m9MX;njy>R)|q?Xg`94&htV{q#$E4tPlKm6gn+w ziT}_pD>R?b=cM_YO;;hX?YZE){^k6un~pTYqQmpMRQg!H8}Z##&ck;2I+4FRi@BYX zrzuA2%B(*G?qdD)tRC#OX-Cd(+6N;q%~MGHQ#S7v z^wY8yVd974bKR+8g3s!o@gG43;!80E8r#bWl7;1C@2hB?17Qy<$YUYN> z(zM%==~ZLix|uPg9WQ#U<>=yDfbX{s?Rpua!*tAF0_^{(F-8%SeFVj^IZ{aH;vUbW0 z);~Ez)$xrRS?3bn3yl+->eR`PDJSr;RVNR5J8a|*;^`-XY|l30qz=9W>}uSozky<( zi*@T*TC5uLjHmap0C&xI-Ht7}Ic>{qZ^s?79^6IUNgZ|0XRZH9n_rIHn?vN@+%9)Z zrs9Wk8N9WZ(Jps6H)hpMdl^CREW6P;U+;R(y1gRx<>YhMm%MX-56YR(T6Nahj=1YB z6*?XJF;@O8z-&PZjSQ4;S)>#hddqa<}bt_3X%V-#JO$R_3v4>>_+0leY3~0>msMO zy{t`5Ig_mTrVw{Qhz!4|-TnZdoba;h&_U?b^&3uBUxg319y@6rS>4ZjM^@i-$!a6< zp|YwZu3c8}9c9ak@%h!> zx2xowZ|L`$+m$l>lwbX;q|QJyIjs$i*Pn937*{NGn}-Jp5^i0 zkz0mKZVQMHmD|n4waX3OUu9oY`rkkBjjc1v?Mtb3Go^3*lBZ7Vo5;=9w*X@Zkx#I` z{qM`|kMMLRx%G3&t(!}331Q^s3z6G5ZMm%%xp_V4Tk!YO9D7z1(=f3X|4}`zu)k{} z@~dUu?#Ry{Mt+apd`I0gt0(?TdiPWAa}2L{9X=M0Uqv{*yGP5fb6Q95e$9JF?|z0a zh7*5)_)xvuOPr&3HSj*#mfzLHpH+Tenr!PH_gC>){dN( z^WKr)LYMr?i4T=u0da2m{q>mDZjbd9`GtNb)U0p5gmGDps_u?nH~%nDmw%bUAJZCN zI^)G~`HpgqMUQ{UdvuDbu9Ey;>LULXDYxVM(Q=3KoBv5)MA*73lD(~gS< zYux6H!(@yZ4#!(I9d0~tLC-~uJ38nI!F8A5;#=0YN8@)Y=P1qkZQhGp-s|?AXje8$sDbJz%35tEv0CTfz$Rl;?&(THzGnL#!Jrm1UfnS0B6V9W>+8B6_V=t#{zE=X1Kb-#eJ9^$R_i>e z=pB3a+z(i%b!>FswMeT{UXx~})x+4~*MJJA_gGkOM{v3;d>nrq_P<*n&s&0pj%>m7}p!Ik~qL8rZS zgZgXVD#i!acZ>b-#I11EhV36;4*vV?z1=2s$l2T7Cccxd^(n^>Y8m^kZ&By6e^%>h zZprU*KfdEU@9+DB--j1m4xVPchue`y)(SLVEIQ# z(T50II%@GCYx{b=gZ?)3yP{vD?9ZeuSJ@bV_Oe%pDSMmnjeQUA(;?$={5a6B?~_;b zOZ4WAbI>nY*K9_=c4IT#Iwp1or`1ha7pT@g>r_kDT5p}^6S*p6ivC)1WFL-t-1LC0 zbAH#JEo~>P&Mikzrjti>r4gO0?NaAD)+J=kO!~R8$RV5_3(j_YVf+W2K2mJ8)ax+H zPva(?bi_W*?WmZHz^kVoV#a&-BYR)yHhDyQwYl1_YOK1SYQWsaRZ3{Er4 zl6k**s;&=yH@4iX#IA~dfAo7*M;(ifA4=TNz50A)|2UhVzwIy>3eom|{JZDtj*@gVwelnF>{^9eBt#WkzHt4A4Cv|Wo@XU1(#y^aD z_)qdlJ&1h8KMWn!br9mS)LsvAw_+Og(4TeuF6)AGY(kq^Vbw+d`HD7RIBlS#F7QEa z$<*!OP~wER>K~Km!Ou|_-=HprN;%Yp_=tVpy11~5x)8h$&U3Acp3rdxKdFn~_O|R! zsE?jA-&Nd4pG;b3T=|a3{$AC>n6hf-{`0D5VqaphGy2)+rJs+H=2pg+gGQ>ovfr;N z?wX(~*75u1gkxT#PW;4%;U{MD>(01S({KBV$(Y2H@woOomHkn^WeSb&@O|=kE>U%r z^owR=f2FaVcN-z4_ z1dU7Zhisanza@d(d)MDap3w2;zjD9)M(~|&Jo^s*5pqvt8s{e3?{<`L_rO#5yB)DR zW{>Kw-&^yUaCt*{d%<0H6MoVYIBRycvXj18*=b@I&`oFVFsFCW7%%AEwa87zc(;^qjln9pBBUo+S6OaPH1( zAHm}}@#&R*%77ldhYpS!d{(7int)<|zI8tJ1dT@p5}(!9w(BXv@{YSTHa)?dHFU0Z zt}+yPBw`QOA(LR)zN16y&CPem-mv&iIAf)7y#KA2rGKd|-uv0S zzrnkI7w?>34AH^=ecpd)^X_r+euOw_H#J@IZru+g-zYlQy3eNWAMM1yWnaSAz4b1* z?p^ZgRQG4m74BToxZ3U47t@vhu;ukLfwryMV9P55D1J6w+5J=Y_nlg3H_n~&;rf8k zcC$v>e<#^lcXfrAo%JqQb}m_WlHFNk-AQ(5k#&zV$vS-h(3M?k{N&jD%?JM_`Y$fOVRr9_D(3J5Tla z_^V=9C%x88JCJjl4i5Kv&S;ya0OQ&2nx>9BAe=pekN?Cy=KOvS4rd+gu>Fn2OM9{2 z(`gOfsIOrS{!Q+?3SEDPX1(s-{6nLz#{2b_H*>6e^ltl*aXEWtp?g=g(BHQY>zDA|c%f5|#A4SV{iY40%gx)YdHRmeZZ+1m5_yab*(}iBvOio?TKFsai_{OH} z#TfOE+%>W8?QcWxB^~rW@~C?Gt>@T>d6ZulGX6W?*63OvthEGLe~>;goIcFcGzX`1 z^r5#+^MJF^{8#RQbIaLH_h0Yg93v8^<+Pc*J^c=jVXXZ;cSsKX^p00Kd%pYKf86mC)981w z`=^sP%DL+T?mrl0#2z!%>Fa|`a-f#IlZ4PX(F3sq?3Zr(2m7Rso$|BZE@$Ol zA}+kVg4a#?&T7!QX%hPurqR3OToGr)_e0|&{PuExi*v3piM6(KJs#Osa;LBaqnR{)tHq6?zGv9Q^ z6U>cv%NfXk{suMv&d_rWKdHxN#spaxtJWzzOB%(zbAW!=yZ$2JL_6JINEcrJJFjJr zA&=CP+JU-6xvn(0`ALlt?T>nvz7Z0j4sa6<|Dh0C|A~<9slyKufp@Nl@1-PTzhd# z#oCGqf?wu^TRGcY+pddp_Q}$>O5$XGtKaFdd&j%r^p;PzPS5x@IKAVG-!VRO?4h)A zX&V)k(=z-tXm93ipKJ{M*ik|EhDDTmeZ|irx>o?kIZUlDSmcy^N|A;&yLub8R z*4dM6o2l&z_LB41t#3k`a~^xE%WkH*>}FSb=~3_z)$Ks7TC3{qM1{6%r9H@`2%ZTE|YvBFWGmGc>(xg%Q-y5_R~M^^m%HF3y~c0P zN%xWD&z9S8pnJXDKt=|>Bie?eA9A;p(0Cy^KT==_%!T-VU|26y%yYRoq z_pmIzvCi=H>Y)<;1b)`qwVWIn%1;7zK<@ZjgI;l$%C1UVKesX-3{dwHujA0qjo_xFzZCaWPU|G%ec3aIokl{Y{@Z< zg=EggJS>*ES|)R~8O+sW{??tjn#|e8r>uYQL1dD~e62D{?G7+sdk*>f;s&fY<1SeL zE_ZLu`=-)!twh$1yOI>@o0*tlayc z&bcv>nb_P>d|Ue-_BO-XChiMLm^THx&%HO;qe;HN$)V6T$%r~xqhb8XqtMc44fi8^ z`X1ZQ{!dTnrj9cvr3Oy+*)Mmi^*v?~Puof8bJR+=hB>e7AN5JJp0N`d<r>Z4b?x7=H?GDXaDe0q!*N9rG|py&E|^OkMeq zCH2YO>Xa{Y*PEtj+?**5!u?encll7&x^&KT>buXe8h8T#%hFFRL1+I-*-OVL?fY3k zpCxj*)s}-F+9w(DC#B95awvO*z0vI!J#P{xcMdj48swpnN6>r9p^_66a&gDYJ%r+` z`3m1}&4M4PL(19Z_R+jNj(o_^oo@m6vK83rg?}>&`|jlD?hto=ssHBJ!q>aBYsQLu zO|^DTW>ny0n%r^dH|hqV51D4*kuK07q4D7xG}a`_nwOjpqyCs|LO^ z1A&vH8o2M*ezzZ<@kvb>)zpz*@c3949kgapO9A@(UCxy|^&dc&Ptf0dm%0=k8s+cE zC-+`9u}AQp^he}dJ)ZH1+)uF9^LW-;nGX#OtSfzq%3|CLimw-Y?69} zpM(&8`tUBn#SeAW!4Gw}5q`4JXQ@ZwXCL%#gdcZ*mEhtlQS)Wg?-9Pp<4zZ@o&H~| zuXSgsueG0}zQWCm9p9EdHPY6m=(fKW9lD#j)5GY2=zb;kq!Qh)puG;Fju-P@VOIuG zfAT%!if3j0wNHI3XUpB`-bMC<(1jZ2&4bVd(bs;o>vwGZ%?8h4^7o@HJ8_am&dT?r zJvwo$Uu5*Pb-IQ6SKmgz@dcCmp8)?Y)PD{1{fzp}BJRt?{fy_0^fA`Go56E#X|MgT z6Au1F_QOY#M|?CLJWcpzpTJhNh4C+KVxBXE^m5ie|FUUXW{*I38_x=P#l5((LwTvZ{VjKwG@bxHu?`FNP zCRN)FbY07=k$S8()o!srldwNY*q{A_jXG(o+-sR3ZM8u|`XB0J`PvS-NZ&Go@?>Ad zYX8A<{DtsH-AX%Njt)5TlsZi0-SRg(a7jPZr;9o~2yaq{4bb{KbWUXSE8_BHT$rd{ z7N3awsoO@rGsHRNUX8J8Z@6{Q<;dHiVHO`Qq zeY)0*XOTrR^(O6T5`BR=${Oz}Y~nYfRD#bxUHd{S#y+B#vOgAj$EO}+&Hhzg622s& zL-%-m2WOH#lm0>U&ft4ELC<8(I+=PL&Hh+u|9dZYLOA_z6Y1~f*)2zBA57~Y z@7M5N`1Zli$ltX+FoNF|{21f6&jpg;t&;k($AGAG2h+L_t1owhjrfZL-Dbue3{2x&Df%r zHXlmM-B$)$Z@sfKEi=2|{iDMjYw9Mnsh1MhPts|v@AkaM`>uSGWq)0CNY3V{^D5?C zjNC*w7?-q<0p92qIN5k*<%50gF~A$K))?Rok2MB(!(;WC54hT)dkpZ#) z0UmI*HTM{xj`s8sHnV{~^9}S_+SWh#ru9em7~la{o3X|Kee3_u`x^2+V~+u97z5NW z2KbPAthC36Z=!d7q;C*BwCj9uC$SbM_a=$WllHM(`UA>#-iOvr=vdnE3D^2ZJ8Fau z(Z2$r_vWX$_m=tR?H=~rRQ0HN0mheokBRM)^nF-Mk-N^FaqZib?Tl+{Xb+X>hUjPk z_Y(143+(n}tFNPv(&Jp{mo_T4=6&pgW0#$^mf*3DUduUTruMPca#HC#rENolpP%UC z1NQxo?(v4`hBKeNRr7}bFYxo?t~d+sxi;<#fl1gep;NC>uHwGZRfb+`=_>wA?M?Ns zm?L!|I?aBeK3nDZzz(IHnfv=`pV!L?FE$2LFGeoXryj&V3cIAoli0n-RR!OMs_^oQ zdRz^wm?&%Cm)kzDeM5X;YpFMD58Uzd>QgU#i66e(-*pRTr2o~sC!)dIr9JfWt*Sp; z_VAacq_gg$yqR*P&z18m*zi|n??~*p^Zt5y?`PiE#rtb?{;B$X?2zgB3C14mUVBX) z-zAmvF|Xn?_$s#kRqk)^&UZn(#}1jH(?~quZtd=rCwtPJ&xL!jM}|JzBsw{avhwc? zY;3;E-lt}N`sj0Wq4eTApRIF2Thpg4V{Ej=a zop$Str`EpO_JHK~^&+kEt!JI$<#)YazO5R2kFS^27azsPCqSAvS<5!CA)RT!4?7;8 z6TUx5-}Lf3Uj2Q(dt#Z#`}nTtB-SLJ$HoXvgAC8H$=D>BOKpK4_ZU&)-D5-#RFhk5DrI&6ftL1KTLIxR$B+~c(wq|N7t z{nlfpSmD{Hdd0$XGHc5Bgc-NB@y(GYud4nO=w9Rf5oOm4KU2i^c<7%y&P#t}f7ep( zIg|JHqoV=pO5o34Z#gL5#Cb z1X`Lu@V4}s9~dWlKJpz5`TmZ4@1lvi9WQipFG=|$+_m$;l$QKbqh;xx@R-Rrdia)5 z^E0Fc%H5jD$k2VaCTr~2?%wstx?1YeJsxm;lbcx2=9{aV1Wz;H3~Is$>LQ-Q;j3Y7 zVIuFO{TzMMx|37nrfK1vgvKG~CZJzvld=t+KB4|c@GNC@bIRhZgsdy*{K5mzQub%0 z)A{8Y{y)+Ade{F$!-V?p0iE%P(+<^3m#p_`-L=LmdtUCC_YB5|$izmdUIFIW!Q+_= z?RVy5Q`_xLXqxk#G?z~4GL6qEBe8QC9_L-30DWR``2pFKEgG147)SjMt9VaYJuC{5e;yZ?XCs*p* zq2(^{WHDdDUyXY2h`)pQbHUeWGWN{9=(2;IhyS4TuXnvc#h2f zx*rR$uC#%ylZni{ zUC7Llm&nJF**5t95FTZ&E;3Y%T}6f>zYWOaUd9BEGY-0!F+mG*36*6H`JFwT7S0*U znv(cQicNRR=18>o*#)1;5m|X<-x|4WIn-HR-zPp?UnpmA9G_3w6B>#>O^Ulm`+VMt zZZJ1G*cbg1pUdv(-}+d!Tl7xmQ$zJWqIoQBjeK{k`!OGS*DvMn2R~+P(Tv{Z`#9T# z41dBNk*uYBhz`{A?Ca5Iz1Ukbk$XcD`92kL5XknEw=I93%+~7$KKekC&2d`@I7JU`+rei8|?b} zu)V&5*K`-_Itz|-bo{%lvCW}=hEs3h>qyo2O%K%Nd-@*iM_n{y3w+eY-RQN{-QCy% z*~d*b`X0Po`{A0$@^ziW>N@F9og9HK@xx8R7Kk72b<|0RE$D|l`>+2IGIR8LHETc> z*nq|8?`m{h_HiX{4f~YeofW9N8yirG4Ong40M;QAB;14!JNrmt11>}-hf!85_4omL zJXhNRvBANzxCHq)ID2bb5d7YfRf4t$F~};^9{jZ_yglf{TH{j6EN84IHp<<`2HABc z`=(ipO+CO^$L-EZ7Sdw7^KMQ6I|J04(-|7?B7a`3D|FBrdb9=BuszKFbz z{)E2EqFfmpttO4zPx*G9!;BTGOxEK}S*K(?B)aa5htk^XtixaAZ29Ro+VhVM$^Rde zuUP+68?13qxbZ^51Y7SWpPSxIp$x6NUqSCCt5&(cyPS5Qb(i*XF1mZAgM)nKw4)Tk z@j2U2qJzV3Lw>>WIonXYgTrn^=ke@5r_^0=V&6td+wK<3o9LmmZP7z_+dj^E@Tcro zi5_N=SI#E1@*J)$hO^s2{_A}6>I8XZT-QE-G_|iQI&?w%+*0eKy{_!ssq#E!Z{sIE z>}CUN;n}MCci1lHjuhp(r(-|uPyCzZ-Yxfc;N(7&M=9U@oaYAa_lEaENDcm^M8 zv10-Ho^`g|DuJ~+oKQalsQGpAPoERqHeK&ll3v>8X41IZ<|dxQudl26$Nrl+3btC- znZ!rP9KgQTC92xP{Cu#gDHZ>^Kp!<)*1??j2JI|y?0MSvPOmpO@4iVN!Mn-Z&*PdA zb!gMG=jr`<=T0@&&GjC$?AhN>TRv((YrKxnDLT|%mV@umdDVTv_u6L;KFe1-i~hA! z+KhO+Ogjy0CY{pchooT*v{RZ@A!+Dq+S4E-ErU(*VP&9c{hO8F^6$6Kk;>gN?Km9X zPRCa>E4| zRaNylnrfXTfOlvA!rgxG!%C4c{Gd3M(uDm9XCG^^UnYA6?fIF5iY`=m)aWE^RX$~f z%ZKf#QcXRp_sW_u_af_ObzXH8eY^8+H}B-`%S(Xq{KUpO?`kCjcV45G^;Y&kT;DoRcP#=x#Sx}z6Ioyz1fybFW-KQVvnTb+_1<@`-<4@iuE@>Rd3T_w<+n@D`-P& zpl>Z>6rNjIv)&|QGl|zU>$YX>kIS8d74Rteuy=Assme^fuzml(FYBijo`#7{F%=Wp z^Js1E&3gP4+3)R3ztpe30D5H)<^*HPWjD#ax*hwravxgI-Yx0=rhO6m)K8$y`TVqA zKwp>k|K9NSKZs5R-A2gx%He@`iIgMbs{m;f{Ln`FO?0JKauzmlIeQ%3VLShF(cPUn zLxxok?pZNM=yc{(VbiU;yxK#$D?`&&4Y%;+USZ+ec}4YP?!|)6V0?S7=#=hR_Nguo zO}Al0r*sRr!!{u}U6sljDe0;bt#Ye!q+FA4rG=&&mC`BQ;Mj^e^mlIhs+4(U=X7xu zb3UTZTym*WuXj#28Mzz}O}F!^PWaZwSIl`cG~K#UozlI@cTQRFX)hNYI3f22`o7jM zF4n*|+)%d#*#Eg&^I)y_ca?_vN(iEV-e{}5&7-Bhs&ZZ*UH#_e`22;GryfsjeL}|t z?=$FfzV_0%O4_7^)wa~u?WEK3pT5`;&$n_Oa^k=5ioZLxb#oZJTIUAF2yHtPGh!@V z+7MH{fV<_stJB-xITal~+|x>Dp4Xo4P38mZUFl4bS*msStj2xl{Awdw<_r~cen&bT zSKd1q&(_}6gRv8Q52#)Co$ze$U7dP|%zwQbzaD^AT!neA@MXqqEE4<7V{)Q`jrz^bH753N6~5RN9_$KV<_ZsWg|BdhuXKgSxWeOI;jg&DlU?EIuJBAN%-Q%8DGOq&Ph9g% zUA(mS=@##^mFh2RPuLnI;g1M2_txPL2xA{~Sp4b5AW53P6W(Ko#UGn~K*#?#;Wj%g z_n-SQl#>1^VeYcj;lB|6r5*kg;Q=gmNSY&r^X>5O2tQ?qUm<+j4*!<$SLhHV{fmTI ztI^@-3GcSUzaq@|PRBn-n6*M3ZXkTC9e$edW;^_I!q3^^rwG4khvj}LZ?u*7Ny2_R zEO+E(+2J1%zRM2R68?c5mNS=sw!?f^wO^|pewZ-g(e!Wzus!@eSNMKccq8Fn-K_Lg zgxSl~dG95BjUB#+@O(S`pM;m$;dO-Xv%~DC^ur%P=e?8gFYWN{gpbBA&}o(u z?uA3H$hnj-dz(7UyrkbaJ6uTkT04wiZohmxypZrRJDfv!ogL01yvYuKjqr9md<)^9 z+ToiC@3+Glgz**9<=#N}&vtkgVSLhc{I!Hz?Qj}lA0464JdJQaJ3N{25Ia1H@RfF$ zIYqxzJ3Nl?Y&$%LaHbu;iZFhS8s|vDciLf@Q*5xqLkVxS!(Sp?XNQLn-eZRo3IEm( zUrhKfcKAZVY@}=a{Rw|!hx-u5r%T75Pq?2QK9BHab~ujkXglm9Jk<_&BYcw`_7Yx1 zc-13H%_=VU=M@z%U6k)%lDDL|bfv$%*q>ckSe%nxp64$s$|HrC=1!dP|mkYQer=&y`&COm?QkYlfztle^bxPW_%(*jenU*g;Kmvs0&BJ2!JSFUhZ_PMdt=HRJv1#fugd6fN?X<&|fa=arRbE-72&zv7B3 zV%5y-W!aepWtsWe1%;VK+2sYx@^XrE^D<}YV0v-UqUlSEa?(fI?@~)j3RkM&mztS{ z=ZYyM2-7(hf8`g+bk!HhH0q0F8vR8wjrk&(zWhZpjSb3FUb-@~usA!He{vS*<=T~b zU2*Qx!aQA-iKxS(!r}$lg_$b1u(%|z2n{ZaRntmKtyHvu>BUQnawW#m5GT$*_|me$ zI&G&nxrJpbmn6yiWXUmYA^9vCCNC{0%$<(X!a;6!>GFairHQ{HcV%J0 z0(Hfb?8RzC`I3?mTCwNpKbzI-7u>2AWS14>WR{he(kj%5{3V%+40-D%AQ-sLSip!jPy~GVkvL2J=O~JyV+`NVUnW;CWW~N<#9r^wK zjO%8TZb(vDeqLdr|5lo+fB3wz{Nm+v^UBMiyli|)DJ7Sd7hpw(FUu<}D=04VU-^~G zQv9p@3m28r>acP#q`8rlzINNYAvY03q9T5v$UtT2xa*!xt_qC@WY{ zP*_mDaw64}y|l1gw}s)kE0I`14!Q<`g~Nu^9P$b=3Dxe3 zKrLfcI4nbwZ1yaK0aqTuFSI3tsuQkKb!pkKOLLVpmCRdajgn@irm5K}S6ZFH-16-5 zrDZ|JMA4O$7nj;`wCWi}3yV9RbaO4uk=|_PtQnctU#kkrbZZ@9X@hh_BP`^Tv&-^S zc5ZHIs+y_cWOd`LYiC`5^DH%Y#x--NU2~Jb>9eN^n0(`00W;Fm1*A@%lcA=kW~8Q% z_vd8OZI}DY3KkV*7wS%#c4zmH8T5)6hSc%nh2|tQz0-?a{etY@$n#%XxHL=(o%Pa@ zV+&V=%ir;KnE%`peSx$cS!NYuXr&5@(`A5ybx;e$>phcG02{bG42|$}U`*HvL$}{qXoTOy`LfvP@+M=0Ov`me3XSyb@oH)`+o49G(oVhcu zpOu-CIhJ}rbC;Hs6qlCg$ym}R4$c>rjhH@HEiEd`UYK{Ya8Mdnbe1$p?`HgJnMbv? z8Rjr^wi-1uB)p}E5jAAUJcD_rP8a(8#q-6f2}(_f%Tj5>beVCA3lRp!1ys_xJ>Rj) zZ1JgeMut*H4716nnv5i+Dn0R>m)zxv+vQVRz2<3CRYsXDrmBjXr&M#4dBjvFqR4U} z+GI}gV)PIu@X`H`$Ecca=HVDs+pR>Y%`t*`dyF96N!V2TVkBc@jN~~K!(mHH7AZz@ zg<&4?8I?vFo90IR>ppeDh&#-`p13_^_L{qV#vbn;W`mXhk@I;fU8(f^?Rd5r>m-)A zY*k69(pa%GRyf!Zt7<(_9Ad8VnpK3oOvPHf<_Vu_i843Ds*32bO6`e`KjKpdx|#U( zz1WS2busajywfLH_W2}TqfcHQ@|9ckwi)9gw<4N?sOCLJ+=(c4*ke{jt5cr9@hH{g z9YuIk6k^;L>ei`s11$^hL%tvWpJQS;)Dc)%NX*rRqB<|$zg zqStxS2_EvmvF3Avny=LSgxYAe*$BK%xkky3Xtlwca3~sH(TCT)@du*Sac?f-j7ld( zO>|UKwAvm`?X_)5HO6WLabzkq!BS(Go1&@y8eGp0jqFjW|wvy>6J4{kB1Ss`B_7 zV$^ZZz*-@uf%iQh0NIf=gm!W3I?=e(Xf2CF=5ZGW$)$ijA<2HNHG?Q-`G#G_e9gazg+u*^ibGgPgIk~I3hu*PPEbRJ>GBI@;y*iKYplwTrzNb4KjlLd=Al@%*+V@Y_ec}p>W!n_Rz;b6h>mIk zSxdCZB?cAUbRjV&jq-Sm$ZUsCvhMLo$^$-0bI2!ekNCE-PDc5C6y)c|@A8OF?DVLu zFefCVyO$alO*QwKMmjDRYDZL5t*LfIuQRrrYG=193`#o>Jr%k>FA7hK`TKNa+m=M5 z-&72ck&f$!+C{DdrrOgD3(_tW%I>Mlj@sl=`3J0gZAQOCM%p1S%!-KidE*E-dTHL; z-UrNll}7!Mw};ACDnF{iP=yH2dLK_MDt;z;H;HK_T|8%&QC^QptyX%hxIQa}wHRu< zqL*+TTNTNq4MNTo7f>n? zx7ASTu5?qCni^MweR9RYLV8@Sq2{&6h1!jt+HOQaZid|krQa5LPdMUHJB1I;V^2nI zYGgd}4>V|D!&mIQ!q*LTz{nzr$SqvEwCXboeJnv{Rys2+iisgAK^}KFc4{uTYb|A! zJ~(P*)cm0F*cqP(Q2DegekM*aTRqUGbdgd{oC+W_vqYsKC!YJ#SJU${Ch)2wp7;u{ zYWC1M@eccS!C1Ve)jX5>$(Nc!ofr#L83_(Toeug&nQ0yL^)gJ|xXqG|IMT#N4QziF z{S>n3ubM>Ptl@^1!!UEI8kJY=qWLryO^cL`K*yT1 zU1h~%s}w);uxP8rcRa0vPSP-u^kyBmIr6x^fmL^IRd zac?1Rf;r!qYP!bDqm7~IW`oDb7((A@VnHf2)m1W1c-t`dc~B;E5B_-`NP!6JkGl90 zh7{Xrm^-47OhBovm{G6V?h%dTo3KPTdsmQL>qJ77zk?3s8yD3-=-D3?=<+$V-`p|) zLw|%)7}9K(J{c9TDQmOdp(IBb<9LlG7uDAI&^+NZUwG)LN|Y**KB>H2&*QN)(pAKr zFerfbqv=nInh->v`&sH1=?zIo{Yp><;W=EKv<-jMs31J$| zf1K~qe@!-I&rSa|+K@dm{fFP8{yS_#_S*E{>o#O>Pyac1o9*Z0Hgw84Z9lVbr~lYD z)PI~w(0}7>=#EJ0TFU@3yppdN(8&e2L zv*u=wV@*I-Ac{+8%)K&m8h_`8=qig1Sty2;8HLlPg&@1iMy3~}QaUI9Ld8%<%1B#E za5rPt+>}h0K!yAeGRB;V3|nL&C_^!x6QwRHLMZqgQXT6oc7<}`;LyVCNOcy=#G#@5l46zY^U!qn2z?3L%J5J6c&$_gSsWvuKiIg2$`L9$*svSa;91wGiU zDP?X>c44-)-gAyLjdo>`rLZ%KHKkMPvxqkwU0pDKPEFIj{L=-c*cUPR1Nm`ptPF?r zGo6;g_{*P%_wpFUXg-6mz$mtmGtT43T?FzjgXbuoqc~5RQ4%mz)D=o4MbQ)Ta_9L{ zg7vHe_CFkQM}D1jz!x*= zwt(w$l!{9-)QN@21$evwy5zZtJ}TKzhssC~+;JyEYhYFd_<;x3aByiTQ>?p`8U{4i zvHk_Dx)(lxhrSK{z=Zpmp#aM{Ngs7N@mr7)Q1+fD0M~72eh7?uj5!H#D{uvH-VdM` zSouTb1Kd3_n)4RH2P872WM zcQGXgHtr@raQoBbA5PspgB*eR4e$dzz8^fm70)r(0+#%m`UCE3Wa!zzHuX zRSO(@ka`1-dJ(>X`7cp#z$3q*-hi8ai$Yw%(E1Q^10Dw^0k^)49DuW5LH@uAhbafR z2}i$5dHy}}7kC6617`g}sRrPNKQf#GHv9?vz^Sj({{#JhrkoM*_ZOw&fk|&Dz252iO9f09E{SG2F`Aw9#aez-^y?l zI1D%ySOUxdzW7h%13Y~iJq5nr1|HzND0l;&GSM-hiZ#^RzyRf7R0Ve=o1f~PMh%ZikrlO)~*!(4VRFCFp)iXXuofqF-MHlu|#vy*A&QsC-)3tIk)^S-p{KZ)LpAFOWbuK}Elwpp4mll(C&(3%_(6 ztKRObqUQBe(W<|S9d?0=dhr4k9k@`Lv;E2pT&lb)E>q^d%hBD-fmf)g@)5{$gfbFF zqPHWJXV^&9tuaN#92ljdD@LoB4P#Z*@v$nV`D$fu2oMe^(?1Qo(^R*u(^d4TYth@; z$~=67ik?45#Wc)SF&Q^1PyUU{*aM8ZNtr9=E6;}cDtbeviaVS|-fU&YEmTqQ`KntI zc49(-@?-%^3Y7Wc5*5=@q&$IQ)qQ&jZJ|VYUI!j8QBkkorn(2psJ}AhSplppQ&H?Q z#N{u;7A{i;8_ve&<;t6}LdC3Gsk~L+P~8$%shAhq4^_5zC?ZgMtNF(gI*p|u?JpOo+B?S?~AXf z=o7C(-(lrR!X}PAtfKY+4;)s}8Lz2s8?cKtzgM0Ez{9^+(fj_WJjbz#C;q6sE!f1< zf1(avSH`N>RaDYnl+nO%-W$rO;@9{Fa%dublTzxa@(jTyjszyW1$>LKz*Tq;Q?%Es zqG+;W67*lZ4UKQ$g1_QoBR+1B@)OseUwPYSL30{4wP*Z8#`o>vg`M&LC^!+oUh>zU zv4Q*zv|-Ci>)GKu?JAwF!g(HJKT9fkv&Y8cF@y8@t#qo{&Q|MF3m97E+X+s;3Ao0Wc^4Y4Uff0g#LgKwIRFDMZYRW`m2 zcKRsBT=Ex6&riCbr^(JAhwYF*zYV9_(2B**+RqL>|LB4qhu;i4eT5AjdK`T7rWiVy zi+z&66*k;zLn{_LZa+KajvB4=RXFb*xY@?H!%m;Xm{0zka-IAu?EDU$;|E)G476ct z$n&s}=d0}JAvUy1LC>dJc(9TBZ@vvHZ0MxhU_U$b+}#B~`8K{PCqH9T`3vReULgD7 z-*!Rw9y@s;g>8y>LXAsfDK!)6-G=jQm}|ol8?Lfpr42XPu*QZvY`DjU2W)uAhOgVO*@mZVs4lSU--i8d zIK+k{Z8*V(X*SHTVU`UGZMedQ>ul)M(+_`Cf29+Sz=#AQ5{O73B7ukmA`*y5AR>W? z1R@fMNFXAChy)@Mh)5tJfrtbm5{O73B7ukmA`*y5AR>W?1R@fMNFXAChy)@Mh)5tJ zfrtbm5{O73B7ukmA`*y5AR>W?1R@fMNFXAChy)@Mh)5tJfrtbm5{O73B7ukmA`*y5 zAR>W?1R@fMNZ|j^66lq<*~myOG5IfkPU>7j`YGP|cTQ^OkP~xD(7TY7@q$z0oYWof zOUxSvtE`KsW~C_AE0CFa%dBhXmS2^bnUXRtGqWr|yEHF1b9r9&;>?`lrA6f_yjhf2 zo|&6hm{*=xI&BV#$Ca-v$;&J#T3D=ZxUI>vx4~0s{G!3rXw=ktYATJrRT}Hu@{}tJ z^NJRg=V#`XmKK*zO_{b#RaCs+X8jpXsw+n>$j;5oE-lSonOT&#Jk!dfQ#(mCbyrF< zO;|~}h0OhWbdC@;BWK!`1-a8y(3>So%kx%r;LDgX%jP!-qn)YaZ8&L6Qx%80;5TDN zD8s6vDwtVG5}sR&V0k|-Z$(aCNqIqWk*a8n)qG@RI0TPfkX=@glUY_?T2QozfAg}J z%*dHFm#WL5-ZINevkS`0kij(K#$^{47UyJ_7ne%JG$`NsVUXxEZkm-cGBY!0#ft0& z1`h{k`@27MMOwyq*GE{P=rbZrEfCBrM{AK$_m1uKk|`7Qq79J zPx0R`o;3}E~HU@<8K?ui4LnzD}x3nxTb76K)Uiq{+v*wP>v@C@x?H`nQY(a5Z zCK`}82D3*-3!&diLv5rK7B8X=YLhnQ%4xH&&5;6=ocH0|_Y|>xBpc}@vn*OB25&`q zW=UzmlDugmqSCR!3EY}^u4*_YyQCzqC|4-cC^iHEQMUM0Sb#&QtJJ4kXp}Weemf|y6;AvyZ@`}n!R|-*cuhKN-e_m1Z5p%ae%6yp%a-l^-4=?D~zDb9nF*er%U@djmDvMzVZv@cFL{_ z&K<5(P8pS%S<3K)Hmk=6RWp!~b(0+7t{7vqF3P9+B229?Y64aJA}%-8o;ATh*hdvKB|TCf z3eHpMSesa1;0yR7rKp#!(E7wq3Tr3_L#1=zWK)cjanZt(xC)AvlPytMPb)@pty|g9 zc=U!Ed;xdFYsaQN-M(s3yMw9&S11xd_)F4J;HYWhc5y4l>*{G2k%Og5Wjmn8oi75_ z@*Y*Sx6T)EQR}Cs9+s6o7OEM0e(&u0y*A`E!szNT+Sf{#yE+)M16mtrskx}h7qTy9 z>rES%Ogme9Z1y3;Z(CGKdald0Zl529ZW!D(HQsO-xC2;C7jJ6h)z~5JiuuJAZf&Rz z`opeT51hAU!1=}AP!O~bRO1hZZDBjeOos8@vrQ z4b)npbUGLtTY(E18-mR$smyflz`Vr9xgH;yHKC(_v_Of*3K{B zawoaB(kFO4A)E(~q*eJY_F9jVd6R8^`y)7Nx4*CG#`jh9-O89$-xQ!Ey`84~4_oi0 z{UYr3a7w$=taR@@l`iY`G)l^63v7Szw)D%L@rq$M>?5t0#u-em&$d&~n zLF>MBhqduTgxqC^zm7hw^opj2Xvn6_>Q1)?Oz>MRldN0P&LGd1r}w1)?er@8MW%Yajc3u}KZhz#ItJE_85FIGCcoc$B*RLt3+cR{3~zMg#qe8>{21QiD2U+?9fdKx-BA?7UpmId@GeJj41Z7QSpN}XxBEkiUHU}2 zD1}A0^*_Zws_U-m(=g0WI;1hhV zw?8|!<9oOLx$0AVZ~M&71nbp-PV3EqdDgoF^Q{jC7FeGQEVRBDSY&-YaIE#;fyLI9 zgB;c}iV~etq@Zm69G~0l&y_pyeYgF&_6vMp{3SlO*qc>w8 zS?P8$d3NArx>>Vs%Cyp{X1DGe$i-k)CLLBPQ55!D_+0c3K0DvVXZKcoF4>09^&jGM z;lJ^@_7f_BgLAFdGM(1HGV`phnfca7nFZFTnT6I@nMKw&nPaUVGK;ON22JhV8B^H zPm`a1U6VI=;=;9LbR=BA+&J9;qRuqMO4W7ovaqdt}y(>V~K~WQ3 zV2cXM%I8q)yR@{bz}4DX*@~N@)%!J5B!VI8wrQ*hMEaCk&+V6CEx(Wg&9`3MufSIw z&MPc(&GS)rUcg=1+NT~jJ%1jWVJ*IpBEMo5K4z0=*E7Ttd$4S`F0c1xbbd$Zo4xhjSjf=4t~E)eSe?6@4i`m|NKCTT)sp`mDM*6?1|lX z{jeVYUWUE2a_p3fGRg;6rFHc% z>M!ZRH^{Fx`wZKC55n6-sjxC~DTTKmg#MDd4?^;Pdk|Gb*4M*ItUnw?b&cJ6+5Rsa zt&)KC(m|Ez{w%Db%yz5u2iHP6aIcW#wYKASalA9B<8LVpXQ}ln1!%P$Y-N-M8k-_j z_N}q?%z@hJYdw9C7Str`-h-(zvl-peYn+NjOB>sJV5oBi2#1vFCN?>nT{ z-xRJV)$&8^MlCv`eAXXH)X|nkXH4hi_8Eg-?}X>w8;A6A-n~QKbYF?E7ttruL5V&T zaa?w&l~HJ|I)vhQNQw2pp>&jQa*(@C*E!hTi?+LUWZ7~RX&vf7sw<4O_>gH$k)DFo zd8mrpq+p=dS2xM}=b_a4az(u1h^rx7M}1Aw|4ff1dV2YB%blC3gFiREhkpyjqvK^< zXxrby3ANy__`Yc!U377!bcNl`^lWF2yV*^>=ajykhoODviUeIM-c&ZVVii4+=@e7X zhjRJpP-7%S&rn?Bl!{IdJYM+rFglx8BiJ3!;rW4gb(qnng;nU#Jvcsq2&jTc0`Xpp)ZE@G*y^8;(#9Nj4 zqr>e|yYg@59f9Le8wxh`p5@l3M_7Y$dnz>R{sWxW zLkH(sA0BFV<@Byj?SOS3W>%Z|!FhqwX{b=Opi8aK4jXH=4KKF7qLeNC3`MHT{%rdk z-`Cim?K>#N19D3%V=FW3r4e)mi2bd_dSwKa2II#T>(vqY@xQiMZ;mKP@N$2;EoWAfW+S3cY}u&bcgemu$g=*Lv= z--UW%%^mn$u|(AlJr%jN?I=oc^n9{DK8l)_w&LwOr6y@V8s+P@qp4QRZwiFnwbYl1 zK95i|j&aq3Gf}nRjIS0pA2rF^aa4)bL7wLY>_#bHg=SFYLnU*9^}&dIb-7cSWZwi& zY^tTZa_jk{CRbSx98HakUn85BUyW?Wv)_GDWBnz2o;wuS7e1}criKp1jdIp*_I1CC zvv?PMs=6=G)Zl8gI~&53&pVDm-K+oRv;OB8D%iGa;Xk0=xDMKCUmZOzquvB+Q>^$2 z+KtjQeDo)LjgK}YKR%|=+I>us+vB0eCHH7h?Mg?GD(foy$vUc7>vumXj&^HWoj;)x z{2BsW_&Pp2=)0AHYC=V~$E#AGb@#D|LA;^C`t7kuQJlpH+GqXYSjr~~$V$((9zHhD z?q7(P`K(1b)X;n0j@EPd-1awou6hYdI;G7@qQUCQp)wx-^NTsw0H^i(Px7prbGVDS z*;mbt`FOG~hNX=@mE}v@vg>%tkGxc`{Hez0f#V9S)yEau-IcwaO{x0# z&*LXq+m0`>zCYfg&Wgv5%(Je}F0lTd-FFD7wpF$rQ=~3$*6y61Mw_+hr<8GaV_o)EpzdT`-=$(%GI|@hfsiAq!fZcO2 zIT$*_OZ^TsV5X9uDD-(NZ+$bgi0=Qbh5L`CKXgBy$2a8GXZu%J|2$!a_0A~js#%EI zca#0O#s1uGf9|$F+wC61<@V$A}X*3Qv|*0-aJthO;@tzVBR zwib<{E|;}vV6;&ymg;a1jYy#T@45D<6KmC&Mz{SqpE@+iWIZ{?O2^ggOn126x_OMt z>Ka4603m9Uqgc3nVHZk^%es9(*ZEf4XuIgVKZa6s7CmLyIcCAEk=rpMnHX<3>UX&gMep1y+qt)zdQ^skHl6^}0|pRqgxzKkBdEN!vv$Nc$kr+1TNQO2(H+4JYa z*x0x>-O-iSmA0GytxR`pPj@V%G^|Klk+w2z6$R3fp4D!PmuIx4FQZUar7uWZkT!bs zmb67_j@-=Ivu9^!E=U`?Dt%YR4)SM5+N$(LX_?D1Mh;t+R-PTn-kIK(Hfz>!ieq+m z<=V6v*~50E4ShRpVOnL@(AjI!w>WY}N_m!tB_a_q{K!rA!{X38C>zNOyx1_*6qe8jAhPk9l zv6Zj`R$P->&#`=JCv@{am^CFyGDG$ec#y2IVEf{1}xR@cO%yf6noWcXudmWgd37;`^CP zmcn71%DPg44ldzJm)%rlv1 zl_>js=69JFGS`(V`y%ECm>u^i|2`$g@te@jc8f%-zf#%oEF$`{m5{GOu9n zWFGmn^6wwa%a~7_qV(3Y%D$4hzDx0OQGM~!qqIwVcSN@u^Kg2wn`8(zX%;!zx z@R?ULFJ_)tsqCk|q5OMUc$4DNvz2}Jn~FCuFM3Py?CI?PX2o|f&;LU4pP5^}RQylo z9sg1MG4tqe6c4CU{xvdZGp}MUWA0+Uka^g*N*`vfXI{j-fO#A966W(~sPHzJLtQ1ERZsx7bOTSn4Ip-+%YkyF@o%wC%v~!hxy+tPpZNFmPnWlInb7s2Y z<7X=OqnQ^n7c+m(d@Az+=PCE|na41%WXl@5)sA>x5A$(Dpp@4(5YrDff;8mEFxelDUgHmpSK`s(vY8&S##*Jdt?; z^X1Gdm{&7zWd4SEH*?X2%Kw~UDn4Q6a^{uH5#~>sJD5*&Dg7GeI_B-nH!}}ENcq2- zxq$gI=2^^#&Q|`lFrUV}p1GBI7jp-5_QA^krgv z%>QQI#{3;~`A^eq|Mss|>088{&D_D9&%BO#GV{=pN`D@6C9|Koo%wR+oD-D$o0&V9 z?`K}k+{wJ2`DNy;6P125^JwNTm``PP)Ts2VU_Oj_E%S-YJD7jY>=>o|uVfy{T*G`S zb2IZy=7r22<~x~Nm{%|_Vt$5sDf4>f)y(fPuV>!Lyp4H~N2Sj(TE+JW<{aiR%q7g@ znLW%^%nO)l5TAWMEMsnEUcpwO_>VFVXMTZsH1j6r66Q~sXEFbmIl_FP zSH))`^D)dznDd#JGnX=VGM~%5mf6R=iTM)dZsr@9t&>!G?q$wmUd23;`6cF4ncrfr zWd4-d!)(>6^hB6{#JrICc;*h~am*{3%bB~FUCbMqL(JXG3z&B@-^Og6tkSofc^LCk z%-PI;XU=8b$~>O=E9P0u1L{=z{LF_jFJwNEc_s7DnY)-PnKv@mFmGdSX5P(w4RdC$ zN>2xK7W2c*Bboogd@A#6%$3aD%pT_Nm|K_+s8{J*!h8(#3g!alF6J|scQK#Ooa0pS zZD1~7ZeuQCzJ+-P^Y55F%ug^!nAb74Gr!Bcg8574)yxBYDn09&4`bfOd;;@M=2Ms* zc`E*AF%M^UF^^`xi1}3JE1AogZzVozP@7ehul!rVd?xdU%$G8!&!PAZvaH3-ImFQ4 z${b?e%)EwqRDtsUedbc;U(8kdN11PD{+xL$b3vifpXgWaTbRc)U&DMg^UKV4F@Mh7 z&0JNa{QHjC&Afku@;|`*Q|2p~TbTdBd>eB&^ZU%XW0n8T0Qt9Ho7Fj9@m%J$%)esJ zoS^LYFpp$@lDU{UGpPJ)WIl&^A@g$PF6M8UH!&aHsPxuE6<#s(aOMY?E15@Jq}H#?Kg-<3JT$`bV?K|$vQ+sOW^Q4=in*P63G+(kN14|$uVLQ7d`OcDZ{%d< ze=YNP<^|04%=a<3F@Mh7$$VM-51B{y11xI` z^AXGouT<_&VQy!h%G}8eS03fmZoghYc4|UIx2p}lk+_!bHIF=kMwrtEDK3qUlayrG zFw4r(81@|6M=G=JzQV9WKbrMVvR+v9-G*MbL!WiBa&Q^zg+-rb=yf~vBUxXQ9sgY0Y<(f|XBIu4iD47{Xw44&_Izbuf1t`gVbOE*KdRU5&}S7Y{m6rq zURd<>IL6+S9A7X2(E{#lwGdbxkb{WOlhu;|U>uiK%Q`)%BRLob|6 zuiHIv#M?TfyzVmC(W1)l2&4Qqo~k@3I9P=*EaA&J0Y?7mcEo=p>(f~;oJ_CVp)dZq z3jcmSe!`;P#_0nieBBPcJRd2g^dbL*;U4E77q@8n(e2QepQZwMfx{OTeStDZ^|~E; zc^-r3GYDT;^k(?F9r~3V{`(xhu;|V6SGPki&wubd2;mEh-V9&2L%);5zk-jSu;{7V z$lkyentL$x^1KPppAfz2*8wi&%f^aFu_;qBoD9Zil{`_1UZ!PNvuG z(3eb9>F@lp3SU_CD1+F*6`Ff6^!2Qt!+K%Zp*8io9r{JAFJ!&2=x2~0*uV&1x9jEi zE2F#~-5Oh-M-ij_Vb8d@kEUnEoc~JrdSdy`S~DtQQvjFeCoD9s2pKKb`f$qBrXg-41;_>nm6< zEP5)I_6Dxd+=HQC&iY@nUKsWq+DFii4Gg_*hkgy~{j3)jJ^a8Xdfl#mOvjFzh(?jkIF}L$BMRFFC_DTU%K#EPAv4)a}rBvi>mYTguT`^i-|e8yMk>doc9! zd=1arpcjT6@i*@ubUXC&JPyz2pchW2*X_{D^E*7xgI+k9UbjOp&-?KF4|?Hbdfg7a zJRijKLg2*8w@;njG7oitUrq}Jz%kxJ(kAz-0nO?U;FV8FS{1STMWP05Wy*%H< z^G@i6lj(K42ab4~Z`4=I4Spg?{UzhWFkTGd3rqM**?(|_VMqLBJQ>E9K`#vVD8J_U zquZe`DN_NgK2+(2ML&${HEdvnuiK%Q@oN~*hVX^q9^vnB&vrg6JRQc@K`$(Nj(#+J-46Yl^C%G7 znmGT2lj(Ik^lMqah4sRs-<7WX2UlqB!FK$)fj@=y7-xV@^qmTO;;-4EZ=rev+a0VI z7QLB%-46YH)}O-nFT$elP~Jzw*X_^`r}Gxu(X1C1y&1l4hkgX>GgvPydb9nc+o8{8 zed{6W_zR1^17B#vp9;-A82WWoZ(!TWdSTd6{&pM3Pq#xa<2Ny$6Xj1>^sC`HZD542 z+o6~7o*4fLy)fJ(e40;fZ(!(kJM=O>6yrsq7lwQ2XVZ=i483lLen+iswtmCu7Z$yl ze%%iJc6v^P?E$`i3yXdk7XUEA*X_{Dcvg&Wwc}6EJ+O&>woO|6HM<>u=B*t54$Y4A z7Z~B|cIb1$Du7W#RQSRYzS(}$?a{iPhfu;^zP@z?FpuV?*PtQQvjdPA?vrh3vHlI#3n$a-cIb2Y2H~Ln)bSTirq}Jz z&u9Hq)(a=o>vrhZvi>U83n$a-cIdO{z8TxItQStE*X_{HX8kv;7fzxaRrLBP^Cw`3Ugl3=J_Yo`qF<=IkLq_cIajP3g)vQd|}a>`KQ~Vm-#N3{{p?R=q;oDr`v7)^(uatFJtSOMSmu~ z(1t%1^da+Uz|e2H0anYx{2J(mVMqS$f{`{b^tv5-nSX=%IM55jJ@n%p^g$aKdfg7a z%-6yE9q5JO9{QQIV*^94+o6~FJ(%wUy)fKEzXD%q!yo8%JM=Oi2=jxW7lwQ2hvN%v z_yfIehhF9nVLlP`!f+40dHi)d^fKQF^N*kx7CrpH21fY09eSCcg!xL)3&TCMX8o(% zp_lngnBN4wu;?A+2R1Om*X_{D{3pzZfHiHvQ{h4Hfk(9A55gDsVCYNkg_Y);(sMZIg<*%@JbtoFe?dSTI<$4|FIFY^U4 ze-L`%WP05Wz07C9{1)hiML*hzzix+K=D%P*4D`aHA7SWqJM=PN2J>g27Z$zQ{t){` zbYQdDZ~Vb{T%9oUNB8)fwYkb;kHsoiV;uXN+&v8RJ`Z#`sp9F}_u2jBnK$<6Cvc_*R`U zUh;SvKdFzGJpKSW|9QM*n$=S`&{yjR<9WfTOUMobZ{vvZ{PQT|`KN!Ga&XCUs{9DU zf6CIh`a`#){I#*Zo%O<^cX0UN3e7zj`UR{H95rzaldW0emGL; zg+;$anWK8$4t)pfKN+s{!lGYo=yf~vGk>QX+`#b{7CjIDkA|Zkw$z>xJPS`L_UHXu}`qbvyLKAF$2VeXJLTd+0myg*Ncs3&TD1=KYs$hdz6S(igB^SoCyQ>ruduu7{g+*_+|8zU_i~gweud-e^nO?U;-}$i852SM)@fQ}oS$=do^xZ3!ejMwC zlj(Ik^ur%jdOz!hMQ=Vo)9uh#KBn})VZCrNy>5qo;VPy7C+mepKOSFb!=DPxJsA46 ze}a`(f}^?-`6moJj=!nb?a*gF4z*=D=|T&=u;`1GIU2rhhkiWkYgjKV`tgQdw?p5? z`rBA9Ecy~duiK$t!}`CmURdtbvyL3o&wfaK>i7femTC- zhCdaC9eP=R0qZe9FAVoMerEesw?i-MHDLV)=!HcOKd^xjzHWzJ)_1^q56}z4J+yRM z+Zz~q-44C12Z8k=pcjUF=pDAYCw$!wy{sRB^(3Gd7X4^LuiK$-dr~>T`V-I#C)4Y8 z=w*EhtXBcOu;`Z=;p=ug{ZAVi@B61M7J}FAVnxe?INlz|iY<=w-bRtp5SMFx)57>vrg6eGse{0==;4XWhCk5jcIfN7=r`K19trfqa0>kbe4!10px5otZ-34lfh*ZTJJdZiimh4>)bM(hI{q!XHUHHqnb6^98eQi~7KPL7g#QP-o0n(;4&C zbjEx&oiSfcXUtdA8S~Y2#(XuMF`o+e3;KL2u*|1Qv*=vI2XFH)Ar_#&%ME^lIMMw> z%kxTfJNb?L7lzl!zZMPye~|yW9r-Wo%V513=!M}P`q{K&14FOdp|5 z=Z|iOejV$JSudPSuiK%Q^>DC04&pB?db9r0?a<5mIap5zdSTHc53qqN3_J9)-VWB^ zfnFHyp*8C--44C1&x7@PpcfXsgZ#h-M)-#`24EG41ZhP$w483lL-g-&d zu^tfg!uXm%-7Tz^EGu&vtrMWAB^*16csEFxsmM@ z;zZ|1Bd71yAF1>S!+)gDd_JPvkw2YlRR|piE4{Gj9UK8L@>jP*znb-HIecN!cNu!! z4*mSUDhL1I@P(73&W1` zV}`HWq2I*%H5|UM=$FBB+Q0~3w?jYwAGX>01M7uFZ-%eip>JdT4XhUyJ^gHNV1%#R zpc_pn}A^s5cMZiimhQ^WddDF4EuAAv8l;ZKEOhhElS!+LDc3&TCi zpLzUrJM^+%8`f`wURd%BoQ4EKq`*X_{DdT>}D4tinH!w+m= zgs=w&@StZxUsu;}f8DGp%7U$@)(O)7#|56{*!iypsWi|REy^s-(a*3W}mVYo;7 zG*!)xTv0`?z_~~ZEhjRGBqTgbKuiK%wHY*3)xc^;P^otC= zZijvm>u=%s3ya=i=yf~v?X1W8k0_tQqR%z-x*hs1)~`I4k3X~MGY!4ChaLL0?EW1& zeujRwp%?d}ANj6w@D%HXC45t_+Y$ak*5AQ;;beN<4t+Q4f5!PQEP6Bjx*hr*tRKpH z;beN<4*f3H*Rx(&^yc}e+o7MoMdjaXeEft(Z(bk7zKBllY$dfn-!r(#VECukHwRv7 z+gr>s1^se5j^cAcZ7X2=Kp$&g3=tH+dzv_M4 zY^^*-g)a>Elw@m%GDr2g9r|V4l!Ge|QhH(0cNu!!4!!3C<>1<5lwMf$X85`t`WYW8 z{YBYIFD!a9|8zU_3qMl&DXbS3y;)!BcIdOZm44;XDtuwlo9ByehkgX>PvQI%7X31O zp$&g3H1}ZWyZ#NUWgV8S!WV`e$8RL<*uc>1cIdlV{|}D8u;_=`>K?srhkh69zhb?x z=$9FK-41=n$I8Jc*((0RqAxe}y4{XH>zA;;n_2Yqv%P^U=tH+dpZSThKbNh-7ZyD= zGwe%XHL4_t4Blj(Ik^k;si0vOHV3yXfZ5x#DR zem3jh2*8wEv!$ca1eiC(T~Iz+VH2sutUG}bK7j)L4TkZhI{0HHtpEJ(Cc>S zyIAifJM_YEpGdFUq2I##Jk|?~zLI{&21fY09r|3ZVAs-jgfC2A?eNz^OdA+_-46XO z)}vbrdSSRn{AbXP4Gg_*hrav^+iZPHf1nqJ`$Xy2?a=#Kk7aP77Z$x)f9ZDUWj%1L z4-UPs=s9`O`0I8%{4Z7f@m$6ZpIP)~d&95Up_lc?u^u_}!lLKqd^CLB4*iU;RQL~M zD7~=g7bCC4qsUGCHO)c{y?wWp_ldIv3@+_ zFAVpH|7=t&w1J`5?a){5vdtEri$O0;Uu`}7zy^k1w?iLc{V2|VVbPDU)jfLM4t+c8 zUuM0q=*{qTJM^nqU%>e%oJ_CVq2I)M7l$t_dNcmI9r~TD-$~z*f5OT1x*hsq->3|l z!sSOenO?U;FYgDy`vMTYu;@#S{MYT!H*)v`s+3+hnO?U;FYh0~`v?%eu;|V6N4G;S z?<>Ij3!oPky?OrYcIf5(26*2A^unT_X{2AbLoe?`!21!P7Z$yF{B=9@^8N(8PXT&i z(VO*;Ziimpw}AIAKrbx%Y>x}icb;kPJI%EB2ow0ti&RD-$ zXRP0>GuCg`8S6LejP;v!#`?`VV_gEBv3|48Sif24G%L10@z+NG;iCpWlcYa!6Cc0G zaCJO{;XjT?H?^~{fpI)^JC3Kk9|!Nt!SNJ^d+2j%#|DO8w?n@p6BarTIDBFJny5VJ zcIbDqzW!JhzOd-o91UN$L%(}4Tv9uk^}?bz%cE|G-WsCxOIR-~`tk6bHgJXJ9$%p! z&-ymj3&W1|kEb0Q7<%0feH-gvKSrfr81A93ryUy@dfg6vH|tONiP8&;o=!7+6TNPS ze#U;VSk~>V7fz7+?bUV_shV?b97fzP?xD@39UB;W-44CHpA74WK`#vV&^u_y28Lej zcz;DU47}m}0@(ue7w;F)nWQn#!_tY7bm>pcW0w9$^moGErS+eJmolS25iI?M-!e=8 zANu=Xm;OBTzuEjHIiwB!DPZX@S<5W_m*_8mUHSvie*%u|QhM}9fTh0y{bOM1|3ZHW zSo%ZI{{>11ygsb#SWgQq>t$hmG4OVuvSYn3 zu&n2W^{>FPz7^K9vfW3Nd#q0dmi4Ew-V|8YlfwE@U|Amu>p_8Ky{C(rW&I|s*95z) z$AtBlz_Pv)))NEEdSO^!2`uX;-OViPAz}R^*kyeqtY-wyZBg;Z`b6OQ%vf&-Eb9qj z{UC5wt8$O^fWWfe57zeq%lbW-uLqX-cuSdOejVo1!7lUVFh33~^WQMv4J`B7FnU!T@rYmQG% z@q%C(4~X%9U>V_*``wj3u z39!5`0`I2)%ljwrz5uYi4*>5k0L%LeFy0v~CF@%$1j&nNLb5-iUf@q834&p)xgBv{s$#CnNfSq~BGM}pV6)bYi7o?u*uv0;5t zu&f`7^*X__9w*js1xpI@rdZa$#Cn^s%X*rpFw6R!cpri7{{$62-fsYw z_ZQsG+%{g>@%{zaM=*cNEbnW;`vqX{X8-WM0`MYc-2Z{){tow}V7cGK`ubp5KOghG z!7`s4&$Ge9K2`C-e01=FWr{Js8!Yp;G2RR;;ZF&+!N@Ld&ujPC-EWX50+#y^wD-Z%p2z(ISndz-egv?*{{Zhh0L%Lf@cshu?nx>? zFun#X<7Y5l1uWxHF#ZHA<4X?9RxIOPrZCI+or{=dJkSzm86WZ-vyAungjvR)V7v#y zoBxSrZxzfkz6#^HU|+aJ*)hHeEaR6j-U=+^sc^p!miu|!zk}ue9QWg3x!=b9HCXPS z(cb}<{tWb=fTe!~{Sn~t+&_T+4Y0+G@s3~_&xrAJ;Q1W?v>ak;Lo6?W$g;4$2YAPL z#TXw4F6Q@}VZ85w)Fyk2-?xYLO~BdwK0u7O1CQnR^3re3`+E4gS5ss}0^@ z@MeQ|Dvri~;K_D+qkOo*IR=k5c&x!^89c+_3k>!c++c8%!SfCNmBH5fTl3_qsz9n<7)a-o~@M4Shz6(>%fNs|)nY@dQ7m+8yzxF6y8ANk~Yg zJLC;Srh99>^tUGEuu?jLobofg5&QT}@OVPraNkG7kX89EPB~f>)TE{mQZ%i$Htdb0 znsubB@Xk|3pg)?XhDr5JffPz+Pns&48dRS5CnHGoi?G*|;%U$mm~nK=0+C><$?JEK zsG>aVciKo~#If)A(Q&P4^7{>ks%EJPdc3X*yONpc4trfS!N%4J;c)z^m)Lnju-V%W z=Ukgn7zxgY#386(r+ecIXK(K5;7Lxb@YRYrD3`}Tj^tbkvQ&E+3 zk5qYkq{`nTRly#q3in7=v`4D3d!#DvkLs+Tr^)Yi)lt3b3%Eml9^W4KQ@yD=Rz(ZN z`Yd-i;x#YVaV3w#=#iZp49)GQY_c!liI&COzDjLOh%r*;vdHff(h>(v@71U>N(z;$b?YH-OA+* z*GZd`2bfqit(Rh!MA1trOQPtdkR?%28FQWQ%&!S~>Hf3AO-+Eg;mXR2qOh;Q)7Uu9 z9jd8!1*i>P)Z`09iVDig=j4?(mzGu)xLR8)TdN9PO5aS`6b!i*N{l@`*VkA|_gm38=+?p-e=zJVZBAmIfvbRonlrwc1lGVMaos_%$Hd4 zY21_6WRisG=+iNsBB{hD>N&aJGf6M1y#6O#mqg>1EFlRUrx>58=e;L9SrEOdN^+AH z2D1oh$I&c8RaBDQ>O`d?R6-Y|p0W~atE7_eEYyBO7ijEhs2L~a_2L<&w@=UVD(rp_ zbzh|Fk81n=FecTpuJUW?p36lSX1k@VmkV~ME~h;wExv^#Azz?QuP$OeEwDX}-pTDF zUkf9lR+m5M_RvSoT(2j2s&Hmct*Od$(SPme#6*(ZRFflr4l-lsQteR^Y* zx|kr0Vi#+Rt-A82HASLTc7jSb)?Vvx3fITxP>g7!5-W_s9LT+CqTvk&`$;ow%qYD?>p1}})DJ|7XRDmzxi*R=%okLnd zk5*%J+N$7k!{oKs<7$kA%8*OyWTkp0)=RUx$Pdm7lupCBuQd+zIE=*wKJ0ELujk+a zOmjm_&WuHurYjQMx3Mh7vz{{Q7DBeDx^k@B)9j`v2r;22a91ocoTZe#T6Xs8noh>l zT{kxN6^+!2cY+v^a9W=cAxG2uc9hcTk|+v!sYD?!g(w(xQ-ZLJ6Eu;F*wd{T<*_yt zZ0Nnj^&IxRJaucu7oiz+rc#NTkXu^m8gCy6j#-s{g`KBQ!PD{b2{ysnp^O(^>tn{kzKYE);i9Z`Z}iON$wkpX}^7&T;{=R(GQ{F2qfd z+vA~IR({+}7bbO$t+FqUxQ6x|GLaPX1=#DV_SLy+@W54XM8`YBU2)tkFFVV#@2gcJ zdg4(UAJe>CcoXkXyBCjoiFC)gwA&6n=}Hhno;{QxUK#I{+I#e@K0%(3iw2O?2~HKW z{ScoBoHo6<>bZe;=B3)6>i2ow6G)%u;@G^cFv!}@&MO$(nZuHG4<9;lscKd9f&{0dw9Zsw%K{S+us-(V( zC4sMEO6u!5NqrSd0$;_H*jM!7q4{#AJ6uowx;j#)t&Y?qt0Q%{>PX%7Iu^wub+hZ% zVuh|cPqnL>dh+Hr2B}Y$e{6I&(IALg>VqY-<)V(qv)p8J{UR_A_x&^sp{c=Z*SJ&p z5?`p_6;(IY;>IY^_1U~U=hJGFHW*$HshP?cD@|B9SgxWqHmz^7Q8yKh4vp&^)FLF0FdIQvbJGDkt>OCRF-rc5h5R2UQ zP#G<+65lDOfoJuN@*?qWOr&R8n${W?Q0z z#7@kL6;Yj0maNhgYfsGVUuH-oiuKB{w{lLGr$jw;GtQ}S=DJ)pEiLY9U$fIWmcIL` zdUHiDHY(bNGfJO*{iI{ntHV64LF|2wCOX0% zw<@q6zxo?++n1rhJp6G{hSN2`6^h^~3EjHQ4M*m>Jnj&+KI|V_+^*=&U9^ZsEp)Q_ z&|0CR=yp}pqtJkh`eAG81p+Q$bA#FbgB7@(S`KrO`hR8^CwYRq9M*_Q6(Fs z7E!bpsFG@eISux&WhEEKiV7gYqpJR^&I$TFeXCGM1gP7Bf}y7nT;tktPt~1J~ zO`K5fa#c|KB~LxQQx7?eD-1f+f+0#ab@ey68}Sq&{vk#xZ6NA$aTQA|RPNbg?{@I$ z!$lDc*Oyflxoj%-Uex5KGz8_B0vDoC(CGGs%BGU5stM$fwi4&WvCYn++}zwEzR)E~ zX%RekMdk(h2~4=RUab`Gnybd?j}v;yS!W(@Lj1mJ8e&3i^j;ncZRDkJk~D2fm2sp%m(w`) z8j&i+*m^NG7~>IOtZ|%r`qU7;S4&irkHcl&RqLZE3GOi0=KCP(+BGsZQgRC;r)Qpv zI>c}-aaa4j_EnJ@`}Qx*ZhsR!{Km~73gR9Wpp-6__60JJA`)|jwj<#UmC?CWHOWOW zpxna+b!^-P6~B~MJL%#{u24b6`eR>t*_)~}T~(7^;<(UNMqZOHuh8Y9mYJU#l_d62 z1>ldkAx*?<8Zw$>i=ICMjTI1ka|1p@~+Bi z>?O4@dfTVEh@P?dwJ>6t zg*{2!2VV7~?DY|=y(v?Dc%4F7>G2`9meYQhq^g1ubI`_7drx)Y^C&sK)9LF9S}Y@Z zaaUy?rzxp65mGpey#~~;0?~(0l1Y0Vr@bi-y~i zrGH0yuY;p2=KuK+|8E7_|8Vu|gY>|w@%413ByKAwx#-5KjdR?YdR?4?Kd~ipkK?mv zr(NIi>;0sQ)kjY{IyttBRZR;v_aLHc9S>WXy3y)b0@na zZht9;x!`VPYK`^?-s#k4Z%6BO)q;*ON!=cAO7fsu4>sy?%#Y&bRQ=b2dG+q-uzhtx zM=vC*mf&QXEj4sy^{cB*yrmk}nX5g^nc%XAix+sjweBXr4D&9tJM7~+IH;jUp;Cu( zBh6K|2ST~R{vhVuMCc(Itj%+1D^5Ntml3r7>Hmz17~H;FSdS+bDTNt4x|0REQ~ zptUXNk)T>)g*ynM`C%OVxFY6^D+hK#GcybwU8Q+6ib0-VVoaP?6sA<#eXXU9e8%#y%SanFMlRV;YCxIMp7t$5V8Q(5?^50}pP6_kD)U@Le6-dGd{7-c)#-ElbaXseO`j( z#d}{6?|oss_l5D^7sh*E81H@IxVXG2iubE1K2M9{g=6F6RXjG%`+ao?F49;^S2iAFqP=LRt`?HStF#{+IZiNf3d!!xG9#kdz2`%;vI1o!lC_~D(#~9%t=&3$A>sJ{-^l7i9d?*s`w6@kNG~5uq8_ybwyy zSdUWvnQx}@y#dUdkU_`x%59N)Uzi6(YdW>`p?GM_R`+J+?IWXOuTMtYwF_Avxtyvu=fpZx@lNntAF-C} znZCB45%+%Afqf34k?L6${g`JcoDTC!To!+Emg*;7eoowFJGD&c$HlpigUGqs4-cg# ziIeCBGX6Q9ZA~q8G5NIDhgfg_B=;<}nDx71(d!j1HQ($-XJ|#5X8TpFi94#tVP#r- zbukp!v%H}?ujdSZu-fepBj3+=j>D3T#u|;Kh4zw&2?VhooJ?8(IX{$0!BYH_QE75yF;}4#mSHdt$v8D5-%Pzh4v;; z@j@;CC=WVQUe2dKu`Lbkb!^3p{$*2nu#T3ms@t1v5-<7}uxgM^qGMFmL{s+nCIY5c zdlC_O5V`m9i2AfA(a@$U^`rRcXNul<@8>C_q%;R=?=u0O>`D67i2uD0pZ)1y6to7! z-o!xnrhgHrbcZ57EQTnrqS&iCGl6gYi;h}=eQ&aYedu4X>Xj*bm2~lcmiC!!&$ZIOw6fE!q0D{!*|tI) z(0XCG>YhX`0$;UN|J8)h>W|;sKKnl}dFKEAn;L8Ui>pq$>Z$)^d|rIzu2UNtAG&ZRr4>t>aC}*JM)^+zpuW3*y=k+rQec2;x{jSGU>bX zU)xZ#=HHv&n%WZj$xrY4&{xo0^VXo7c0KsgU0eTC+x5YN*#|s!+`G>XIri?arhY^V zB^~<7(QhAg#Iys~zt*{?qIBFhcP+i`s8vs;pEM$K+Z7)?Jo|-rgO^>s;o$Vizdv}4 ze{}lc6HlLBk-7QS(?(Ym9Q4lfv-H^I%1xw33i`s9#_w!R6TZ`{JDgD)_kGtmZXBK)o9(v-S zc~x)z#sA?2o3A@P5ZV0f$DOT}M{ZfM^X@}$`1Mt1&s>&qc;4AVfBJOC*lV8q;9cMS zmmc~~!-cng`{^mpqc6GbsjqMD2u>-#Bt2(W$sHFi+m!#`#b-y(cFw(V-6j8W>{xTc zhxc{d_e|jIb-)L;~qZ#p=(1PTBmP0?eSTcUwYZIf9{%9(e~Tre;Tv+!uNCj zSk~SA{f&?Lu4((=?PvaW*wfE-pPoKx){KkR=A8O{)$Ex?>EqVy_w_@&ezx1W?4|cl z-5z-T$@fbSd;a-<4o<)Q#JgYq>6XfK%b)vn(SFk}&l>eu?Wn-~173HJ`{SEe4EX(} zLvH*0jgjAdHu=e`r;Pie`_sA)cQ<5t?)_@z)U|hQ%YOEnA3iCVe8fSQ`$ulME$7;W zpYA^V=iAHgcJ)>M_SXJZc-Z7GZo1}~XD;+@zHnImfO8+bXx57Cf2_FTmfA-)-1~=? zz4gB@{lvwuob%rH>Lc?{dwFQ%;LGnjyWqQ7U*B1NZTZlFwO^j|@aVJ7yyB9-&itXS zX#CN)Uij3#Yp*-8c-6E!|9X1;&mwQC9JJ zefGT{EIDM@0rzZwG5z>AJ)LJhGVn(O&c42QYuO#&<({)@-t8lvxYje`pcii(bJws3 zA84KZ%{`7$kt2^=*Yu6&ofB7&%B_5H=!Vws@6LYmo;lxHn@fJwQvCGqTD=2rTpGON z{MDzwQog;TCZ1rD0zU}Bg z4==22J8OgEt4rsMSskAH+nmxThB#*5b;8tlo_OiYYd^}G{QM95e|pX88KK*QFCKZz z1v76L(sJ*?-{n5AGV|p#|NZIG6*HIC%)adC55Dbu?XpGlPbhzRPRGZ!1Fm`Em4?X! z#=N@DH}l3@Cq8!Q;MMcrnpXDpL%%yLr(?DC8{T~FSKkGXe6)P*_uIdEc>47xo-p#iHQPVhcJzdtlTMm;$_1l-cF<>2b{=_W z&S|U9fB&w5Wue=SFMjc{4TTFXx%#Lv8_qfBgNt9W9{KFENB%Hx&0ns);HMLE4;}RD zmN{p*|MKeE@NETeUwPL(bNz>0T3orfeRIQ6FaM$LkUzTr{=m@i{zI<$(Wa%}p8NIF dCm;9ml#a&A-yMHz&NtT9&o}((7Ijtl{{U`jcB23Q literal 206928 zcmeEv3wTu3wf_uBAUxtsW6YU91ya@$&CD>S0uOfr)M@R0z@BP!vc?r~HS&>NnO8tc|M%YS z`+wiR`C!i3d#}Cr+H0@9_S$Pd&U>f7JTt&xFeVrbhW_|}5&mZ)!c~!qH-q5^_}|>m zV3;>=hP7x0kBa}9Z#-uhC=5yXpRA|On`d9R${q#ni!YA~#^3y_1wINViYW1$XsCz{ z&YM?icl@+864e)9O_snj4z$xBg<~mA*6~rD=}+LBw|L=$%N8!@;rRNt+$eJYf*6HA z3hSsv#98z$So%&%gU-3Vh9aeJOl?dSY}va62tOq*5mqwiCn^~la2fjJvgs)^^%`1eQ@3~`||I_7e;3QzF4|Q-b(bM z=z(@v8kLf>=`-u}q>JhrDBKs{yf}Oh-g`d!vhEd#SeS1J$Ks=UI^*yySxSxifPLN~ z$3qLws&CqT0$;eFQ#cmiz!+Tg3OVl6FXG!HUB_3aXQXhP9ykl%efQe$jYjru-~CGj zeR(~rQ5u>4q`u5`}WiXzj{H?O~Gqj&0JiX&YtxClg{^VL|i)l^SWOI zLh+y8IgbxA{{^E*kIUBCLhv~;@W#`} zAYSqC(^r?;7cLohXDlbLLmnzO>#s&b7fKkVhlx-B`ui6L{>6cRao}GZ_!kHMf8)SR zN&TdxVbM6#m6g2e=L$>CFU=jr0yGL2u{XIoVD-dKi0K2QrvD)7=$J!2>((Wl~0I_3b zBm(Fif6v?l{-eda@W0g`EZPm&WPkVUv7><1ADnybD1tj$><8?JC}Y2)U)fmXzc!WU zH;*;qS?%x6Z1A@geFFM+Bh!ZfZsWk8K5|SiGq#2XqTji0g2D9U7I46tz1!cFne+!> zlIw`y#FjXCn`^iIK1G{0-?zTZJfzj%F{4>_|F~DuRu)ZJp#0`rpSR3BY=@uCXhEqv zS7rrokIkNEAV5i*JYOPU$ywiv8VqX=!>=%_%5M@S*Y0)uq^9~_`~kK|@*YW7jP;WD z^c4l_FthBeH%bd8Pmpu#O;0=sn5M~SSWJ5wOjq*ySY}&hS!P;hP`e<1w82g8w^1Qk z4WUKv@HZ4UOAF>rkX&u1C)Xg}UsmK%v~`7wcK>`uTe{d^W*)xnTJU^z=EM!x;aO%* z*+5~m8Gn|gTAQi9Gd}F1$DEJ4{BIPW@c(H}qyOmKcAq7AcF1QLeEpo?eJi=|>GgT< zDLU#87Vq`{d~Umc*W4Do??!v>_LmlY?%!4XuD^6{htDg|c+cluJ#NPe2SmEO#b1_M zvI##6Yw@$B4nGbL#L7|4+S55n2$q>C!Au}#0E4_xh9LJ-dUj`y|3FmZ4ALfkPI0R< z_W{`2j0lv`Q#7+WcrTZC|Bl&7bp*X<&btsu2x4(P{zKl2c7Vv;{*!YruOSRUE>{O3 zgqc8c?GuPIU71Iv?cws*Q@jJPA4cFHe}1i~-sjCPA|WYm@c+7~nRx%p*U$8VIc2H0 zLuQj^5N`D47UsK>-psZZ>dV6r9q!B0J(f9^63bl6ypo1!-`xlB*!JhGU&!0qe375c z-klA;joWGZjfc?JN@@>~7c?cK@;rw3XatgWavhRt+Yj=QVxE?U47NJS?Py`of7ja! zEY{i;#fqC4XD-4ks|SVGF3Ni6zj0 zvS;qc4?5h)D5F$koW2=>uC=-fn9OM=##Z^N}8Ma0zLXW=IN5JHsd zKfTtvF*rK5xI7}F#^>#R2nCpD5-{NODxu&Wb7`88v^gOZ1c@_`{|S}cJ+9OAcp>_> zqAqM9;TQqsRY(obn>|_v>91&iU;jlFDjwhE&tPf?={2E-)a7=dR_;~6%q5z)L|6%I z#RN|m2Un5MJ%y6$nlE1G<8?7nM560{` zf08z#7SK9cj>q>xS#e?qsPaIS7SH$wMVnJ>zy=ZyKy?Sf1h3CB`ug`wk3EK3f$ry) z5#4AqYBxs>lmP^xEY;Rb15E{ccO-Ex(^dgYOPc*<#qan#XCxgYl-=W!4nkj;9($H! zGP^h?WUXKuDOZ^}l8XzM8Q4*|zaUTGePqGlDaKzK=Bfn7lq!z#G-0Gshf~Ap;uI2H z>^2=aH9Apko_G(btscS;{nHXigW`>HYf&|UIQQ~&jzWxU z91F(>O*9TK*Y-nd6eFjst?09Jjfa4?d3@edXtthIn+L`C{kIi)tj2GV0%~^^eIq*? zjdJeCrYCNOk_!BiI1w>Lgud)4dJlRMedcUBS8)xdkH7uOvWU9-5En&(`ik-Tob*BN zsWX~wb>IfL04)?pQw`~kUByXRz!@A4A>^_bkwpH|Jv${pWV}&iMR&u11wxYsP)zqI zTaEwZ98{aM?W{|Av!wj_3Q8IWC3Sdq+sb5A?efDE1=3&TNtr&Y>zI8mBCN>Du@)z7 z5lK;mHNaZZ!VCiIwJQXQk`~}gdWr`~>zXM;eafwf;L6?S9ew@VoSR3BQki5`Xt9}>26*uVINC?$mP-L zVaHArne|qNdSpV%k3Kxr+QLD28(bTK@8tBe*Uk_B9ak-cUx%+eKm7ZB;b)u^9+u9~ z{?P1M_gLnV`KGr=#C}`<1zDl$%r}Ym)skA?>Sm za+FkKa7;?z7B=x7bUFc$EG(vQb+Du~@0`7D?+3!anwam-|sI^Fh zAt+RGaB;NuTpy3!M7?-dksn5@2YetE!Y3$UeO%3Zc|^};f`V(;AWpXjzjr?3|68%g z|AwxCeCzXz3%*5-%_)*74;_y5YAPQ$aCXkv>F=IJWJ5cW6-|m-(n_injgO#77Li}Cbg_qJ=|pByLa^8^#BLaP zcQH_O7kf$C{M|))nf~t7yi9j~ud7XML;)$=k;C-MG>jwAn$*=|v?fFn(<;)BG{26w zsd^y7&UFO_ZRG@0r4RYgEuY+sCru!McMu8|HlJZ<|X#gs)Z%1>Wx@r?*#f?O3;k3JMnaca`)42u^^xZzJi9 zYsacZ^pgSHjJnv82D#rmB3iV~Glg~SpTr{Z(LNHRNCIed<si>rVj=)K9C^#~r`A$X^#BI+8cBGKW2J?|wW6eaF)%zR`zY8G_Z z9+}Gdx_aCJLT_nGpoKf&PTIwBkFwUv+Jp@DD%K8t-oe)opt++p3r;Oe`PchM8e>$P zzvphuyEBuTC~sgkoH?cQXyWds*&h)+j;rj{=N41=IAsDyB}^R>l|`az35*Fgmzh7U3@w-qT#2aT}KQl6lk z82!zJ4Zaf7V~Pg*@C!Q~@!jrw+#Uq7D1IDcjc#&%OtKLyPH6>ofYtLBCp}DI-sG6Z10bn|1EgB3vUdYr0od1_ z>0^J?+p{9v;5PLeQ18k?(P-YHNy+GLBPN-No$&U|aqEO5&|Q@DA+f}}I&~5{yV+WW zF)V3^8RD%T;tsKGCy0JMp$M>4d6Ib&6}I#Xo~nxoc}Tj))tessMEJat<^xecf>Vho z>dtgg%aOoMf-^nSt+$kI(+ZQ&7GSR-kV-zNlSL^LkT=p)og_G0DV^6l#I~FyOoOj) zqzYPj1+8YPPKsU~D^({Sf}NiQ!3JB+Q?u|h(*wuL@MXY_O2Bps7Qv}Dt`({%5AO$2 z@L_~MM3|s2-v=!N#vesTBsFyu@o5x)DSr0w$@OU@)hv1$zS5A^BYMqaxH=QPB@z{8tyKRv!_*5kJ-%i~5eF&fJ8_ zNG)bgb;^S7*OO7aqIOBA4ogi9X#ZZxWZ4g%v8I7A*i;f-_9lOAw7PoNUR$6BCGXJ$ zMg2_D9CeawZq}@ORVs^(1qmpGU5h8>Y=-KlipHq!YWEP4(N@C#_6I2b@QOBn z;5g_Gwt&`aODRKZ>0YD1HFyk_*vo6#eSDYyu&Cj13qerLy4&jghmRiy(6I=sJkhEN zs-~I!hl3dR57iS)dxY}CF_cf^wc@NkD?e%#C!gehZ#<)D^UR8N32fYC?j&6S6|gkr z4=FS`tf2b&UU+2bu*1F=9ymVih;RMsr6b!>$#z~b-}-yoMjl3+9PkGYy$|a5_*;*E z?cdRMl2`e_p*A>{*M8t@=pM1pc+zI73uU*RludPqWMiAnc#^7d5>DEaimC1-)%GN> zNtql=ek-ZXQY%A0^hm0;PBOjHg4L`Vmnsd6nMWYsIXC!dkMpCwpn%Ca=+z9C}1 zt*G^~>6P8tJ9Pd+Du(QW0FlrKcaUu)sqadyUrELfB=4t*{_b@{ZiRk7KIGjPpmN9! z;xTdPC*^uqydjfqXY8>V&)9OC9nYe}X2x_y$vtrz2ElBPx|6y48x3!fVqv{WM9^95 zacP`0d%2sf+#@wvn^7GY+;C3ePjQd=hMuzXlMcyUE_kGP>CjEy9p9g$Ah8SaGdNya8h+8;?e5Kh`DVTsh)fI6HsJ+=@{Agjw! zfk%>M^pP;6E;u3BvgxeUh`-E9o567PvdR9^@#fWU_c#B8@&&FfAk?pLc zREiPVvd+rJV<_8MNvRYgvgMtXjmJ>7vyxINMr12ID;tlYY-c5R#Ga(P&Va~idZ%(<_drJab;+HWNL)~t&Ru93J5^sBU5>|RW7NC z2d&|t@sX)D0&8tNXe|egk4&udA$RTC`4jCF3l`5^TH$eSitnD?qj>9;Xoo9?I2TEFb zbKsgpgJG&WaRo-6O<0FTFQTcTwRStVLUoOH8Pq5iSz+9j)F|*<_v!zVd(s&UN}Dip zK^59IO729MKNyZwM>W!5K)zD1xQBRUwGGahT1y?_hCy1Pk3ZAgH|I@r56i=*jnl~Q znWC-fY@ebPbhb><=5+4Q4)rOI>(i55pPm8>mX7>7PM^-fGU@pY0;tB_QH8pjv?LTN zwB|n2nw>mM=_k0>+$|aRNXA{1#Em%2B)dl(3@5d~galvvNzOA@njT#MS?3i{*R-0a zsB>DAr>Kdoipr;O)7%Al-%+i)?$VS_MwRTyZXhp9PH4(4VAzr5s*;6$@I}!Lj)!K(jpR-!Bu~jz0UD$wX^`KgFBGoGJqFQUb$#u#+UXn>3RC^xX{UQo zp{dkHQ>l%nqK!}yFIB{gig;0xScwzub28jM=dIJT-~3;;)Bi-ZW{7H`A}T6@YN3s| z=)pGYC$IO@=5%>|13hf;!aY_ARe(uaH3_i1-Y1fq=%M6>u$oX~dh99=QeMX@zok*O zyoHs~i_^C_-cW9Y9b71~wd#q|VyNMUwq^G3=Rrc-zwcDn4^j7EL zR8Rq?{Z3ypSVsG%tl7Q6FT?XSY5f;ueOG$yqsxP=E{csd{1<+#FF3HVT$nXk-p8Nz>Ce@KUJd1ds zVk0D)*lla?t*)kFOiKx7Qr>~mZMH7QK+OAzvXTa7C${VB!?M~zxwL6u=t|;3UN**~ z_u#fJ3DEhAXa^Vb|Nrs&P3AQIll9vq>Noh@^?SN2TEF zH0$L(y}Lv7bd+zXE@W9KiG35I;==Ql|AF$JMj?86s$Skxka5ocaDDz;`syYfYZ)fI z{~P*h&vC(}%g%wH{1r2%ThtF^c=EWSD$kd+Qr?FvNj2mi1r{((CF)G&TY z>o2L$f0L3WxTWA2vJdQ#*5@TthluqphV|t?3s3tgrx!-XF?HrKNxK6M{bO4(P8rzi zphMDbQ`As_dOA9$1YQW!NBnhukuIy4IV4?!&8d=x>How7FrT>hhI{JSr5I2pcL{et zhrYl&Z88n9+rFge^8M^$dU5*F`5+JfGLQUD05aNR2ik|+<7thgQfsVROBFgzvo zV>UI6ub#5@#rLv~?;EI>^Wd8m#dl8_-|}Ft4erdgCZOvkxX;H(#Uc7pn0n7%~l!$Lgl&wrHXGvwKatZ9<5 zi-@a$FpB*tnl&B1(F?{Qdyk~a#?wGGQXwkBYYFru{!TBH)IDrD@KfJPl{6`nbjTqt z>m|L1A4i4hJnQg9epxpI*l0ik2t1!Pj?AD{4eTTYB5UnPySSouZVxsFXXYl!sAD zm>-0n_J^PjdJ(evJ|-n*_T5@hmFpDFba=!g#mMS@QK9hWe+#dbTa>q(rY`Q3kMY=g zHZ{r zS3zAzMS#yg&RJiUsuz0`DK{>{Hqb*ha$-gd72t61^vrk)pbRIT;RTOBz^7R$pWv}| zIhCJ$q2)hSE_j`d@-gA(W%nu9k=g4=7p3>{A8!_=-y=%DpGvQ!(tiPU7OlM)k0bl@ zgDIel#g*Exi1Y_y)k6;S$J3=$k>ZXA6ma1kFC!T83`bRck6I}?g>p^Y;;^1 z$}coJGDG<}Mn`rCyHXrK3gsso9r>KSI8LIKtzw&@a|zDAAHn&2ETIhc_YjoWGdwlj zXm3O?i&gPpveCW`!8Cy;P>RSH{UpXyJ{)2t$<=26QItc9b{pZ5T)p-r_GOStHXH5m z-<74aF+f98+YCGvwa4b_b+pLZ?Q#<=CZwQuQ?|ikN*6eT%;GiXxSX&@!t#fSg9Iai zE2EjUG92@2FKv^;bQ^YrJ82y@QWj=dvP*`6{xHL zebMTHN#MoCPDf^Nyn@wiXM$8l=in>mSYTFq->W*E33tA{riyeH+r7veQH+J{KmHsTpuj7Ur4%!1GdGx9jO9(pw?YGL>x}xCtP*Mv3 zjP~I`ybo_|3!pfJy6_b7vQKtPBU?PXdJDZR8nZ7(piSMh9+=7$k!a~*Ts7`#JKkH(79 zlPpY4g%BlvQb$qwrXQUqAyh|Lo*?SFh&)$=q8g%z`o#*Q60=NB*v(;rN+Hgi5^FWY z`6;4dOfaVp4TB;y)ac?|4Zr@z2uHrcY>PDj2pqW%+H#u^Wrs&{^9}0o!c=37Wm6QA z>T08-FvKfvbQH%3`Nq#cpUBHvhCt12@w`r+iBVpQIxQ`na!V1Q*851=#QGAo@)|Z9 z2z12uKcEdNn3ZDCjU3MhUkW35_*3k18w-=)!xqu&)%^7ydc6cNwD81i6dH(xYIY}H zO@q;VVa(-$zk?`>p#JFy)QJ!}^ZXEmd`V!po-F#S$%S40hJem;bO@+vtf%lu`XRy4 zD}B#w1a&mm1+;J};MXl-e%--qBk53%pWzG`?fgT*j#3Qtb$Btkeu12Uzv2fxs9_dV zAWS>9YlAB{Pz(`YhhPg!M=)IME8v0XN7SW2TR7o2pnMto<`Y61F4FF%{>|EXBrn@> zad4uZQK!Z}7BE@CtHb=F<)aq%GP0bd&o&}b=MM&~2?$Xa-*SxjAD%Bn#(Ru6b>N<) zo|M#Wc{CPiNrSPmRrs1(3QSvWw}hIE_Mr;A1_)hk3HgkUi{;iYf~j)r@o$$^j*V8Ow5c{Z*k5Y>93E83y{(os zalf015kxyMGq6RdH+nBgm!Zk(L29q;o`g(d+*DnZ7h4k+V~7-cqBiq_)t4)f5GiyU70Qkt zC1!3b!aMw+r9^b)U26< zY>v}I7p3uK##TXy)nrofMUCm3BH9Dq$AMyWNRXfM@J@uC*lAvTz2m#8(CApvU? zP~UKs^LK7QK@RfFBnPG~8z>qgNlyRG1TJYC7UK~dL=x%Vd@n`mG(MP3RcHutRKjkc z94re3hFs}*6_HsKc^M)V^{}+Qlf+NS-Au9~oATe59%%6Y2#B(qrSVN!hgM3MvUmeS zU_MFS&uP=vLk*HQm`E?X{GAVa=!JH;J%j@o4`S8Rk>)Y_zs7c_F5Qh|3#GfWd`F8f@^sMa&Df>2JI8nQ?u(9f_>Puv6f+tqWhAAfixhgz z@b4(yo#{I&U-Ts)XQmx9V$b*SHs8?~u4V=lvbNrLwDszpMq1)*H2OcBfu!;uczC#p z@3Ctj*d`8p2vAq|^0Y(v<#Ln4Dh}q##?;| zj6DP;>hoHTW4Gzj%YV#M?!7hSTVP9T=a?D|l)b@-_4#gQ@?lb8Q)PWX!e(9 z4j+rL%4ZorYp1WFWa?oLVczfaK79O3WJxh|j1S-X4p9{VE=Srf;BCG7AbuC7G2q?X z=s&#oV+x^m3(9}cVDx!s&iWkHo$5Cewq3q@|}+~7Q7B0@hm5)I!GeZ<=(Y2P7-b=GNrVDAV1R-$A{+WV9+bsq$w z{0B!-rOdH8z=abyAK_TbC%z-aqj~FLVU7wPsgbv~A!c_SR=)?Sjs|?0r4zXc)ng-KkP3ZsNaJ&Z>(hsyn6MIeIxBv|; ztNqWZoug{mPG!2qpho3FHQTT~F;+Eqe#k*+Fl1wO5+6n!RzhODi3P&H7i&*o^$fg( zXBVf1w$T=L4B~}At`FigT3}FX!2^u^JGKDogLoaqh(VlkVG#Eq28;qHKV{DoIciA@ z#&QqEVOt`AFqY57SpFR4)dz82qWUr~26j&t@+sQ2O(+LjM$H(3spRljoemT7;__zp~>!o|FUcNDW7k8m-6q9|bfE1)IVeUMA4 z1Uo_lil{uSm$-`aT84PN#OOpdW#w(Way;|p^sbGcMcc#0h)hgmrw?)vLdI?(ko7)l z&=}z+fyDf{A@~y-!{8WG-IVjfDCiOBIR*bt@7nmi*en=J!KxSvUZZ!I4_ybi;6>LD zWBetnxV(MGj#+6o?AnM1grzULx6vEU!C?wK{u#Il*jTC#+{HJ7Xf)2ubg`Fl0n`9)~S>E7M|$6 z?Zggk;ytJsdLL)L2-q^C?CMJ>`PXlNkcE5PF&;v)`lU6pQwt z*d{*VLjaOLnXK4Vc*TS8K)u0=-Ny0gMWO@T00iiPjVKd61RY>AQ4#I{)J>=ZPzyG> z_Tx7VrJw^4#bDeu7%;aErYN;XK4wBuQ*W*QW5$2&80XDV}3QT1=+CTHS@*3g} z^`DTn#%PDaE3d`+rn~$Fb{I9b(7Y1yG*7B!uKhUo>N;ksyb~=3d@9Xw+`}zE%V({l zES##Dg+$UIG;fhjTQZ+ItN(6p5SDus^(*%5KEP{aV%o69g4>tY8l967QahC}~c zhjv^OTpk?3)6fR@Bdvu^Lt1c7a5j&HEZvRRT9!+(lY$n+g7QoHQ2xgIyyvnWJV|lg zfu|VM;gdKLddu2M5&E#~rud|Q~DdeWz$F2cSg-fldAH~97U5<^N@bkfYQhG5t ztSwdjZSECDa#fEG(m`WrJGiPpgU_;;y^4y#+>_OUj4avtp3$bR$w-&gIT@MIqLKy& z8obm$L&dn4+FJEsMvmN6kda3;*xYL}Br@@Ee2P}L$Kt#b)qq^S1sU-97Gx|VI7`Kv zj2wfVTYCt`NMa}{2h&=kJqLc-JMbpl!9NF0a0gEhC0)u>vyplgrE+(0K%|Zj6mX3+ zu?w61MZTLv%4I>gMJ67mXr7^tN1LkP4KY|i6HEf4eW}qBD#(!FSvYP_lC?D%MKBIB zA@ghp&tQiMUEX3aRsNQ$5E>8zxZT(vT6GKXWP8{`K&uTd&ni0>Q8h<1DIxYpL`oaU z$h8mTWukEmkfTj|fXlfCG?-jpLcr*KB#uEIrgzRJlPg@FJ;|m$m{GKS5VDf@bS?7I zAbcq`R$d*|BA6RN9(W!oFhW-MgV<lM(uh6gS{6IN z%Or-U943ZS{{h3h5X~8`=8trC8BI@n#GViL#Mj$7AM5!eL#=q{HwD19VGI8OoZ506yq#Q>u0KbjnbSA z=}_i0(W2cjFXOy!h)+ehFmKC4JPGr*Y`kQL!X5TClx9&I!;rL^MUd>@fEza$tSW&X zAe05Dh5}f0?I}Vr$1Kk&S~;%!=(R5}RRIM3vzIMGe)6S52OE1}#nG;?xroC;g$FiZ zGtI$*w}cg>71MC58Ck)|i5liu?AW^m%P?M3y<@Z_4i|zJzU*_FU4-JKjY@8pV;m7U z8wJn>09@%>(W?9~JN6lq-!6vU;BU`Su!ciwSPH0o1hthgZm?%SLV#|aKyz^h@7e!X}9qjzT zc(_qkag0vPgeNW`L>?f(43=19(DKoNO_i_XmAaHGgcr#@Jsm#vsX0VCSdX0rIGW1> z36!k~I;^*%EC<@0Ji27b(b6dfEjwJkxce}ZlWE;Voq7g3^_1_BP0Qhc>InjxiMiwW zyD^)G%Fh5aw+0h2^ilj7nku`K$Y-(jZz$mPAc`dw@R?2RWY&HMb{hYvDEmb`w!TbdQ}|@X z^0y$WQ_D7>1>MewUe~2Xzax5dGQ-HY}+4wm`20Fk6b6G}?w9Du?P-!=~~Ik~Ut4i6@3XEJTqyegN^Y>bTN} zjeUX5gg{^FV4-RHK$k|dUibK`_#gcsD6MZy``YF~& zc-jVfpofs<=pbCA%xRnCr_2s?AYfZJJ;+tK-(iMYqzaQP@J;^g4shkX`ULl7La?ps zdsfxYZ{opLT^z*dPZvmRCa-`*3I-<@q$Cn|zR;0?Ll|5sTPu6*LqQJK*x=4&N1CA+ zFGt6MGOrNI92+a#+sGROAK9O}gf*Opi3m-@urx%@2-G1JDZ0a=kYa?xg0jIiQG^Fr z0-k)#x=;c|fi8+_;wKdlfVToBE4ZdlcL)pIad20>v<#`bR}NNxRk&>QKnIY)jWPs< zld+E!B1f+ypIegWViV$;_!?9iN~FGC8}<=4T5J0^SzleeWFGk(M(C6KS;;nS1^2`? zVj&5uJ>|93dT%vRJMqbVO?YyTBNg#0M=DPzFSO zi=#RZCvIsz@7RdZ1P8j&1RGEaTu;@U7NpcbCvU{?PINe)B8YdcAy(2@)xrjo2=V4) zbC8k7Wv!%zv{NUQjyoUJkFhAmeoJ}W#sUHS8}{@?9{kh5&e@GfikPGgrN7gv$&c=*>$e&_YsNX zQ~T6rGerx%!a6w!Cz4ij>7@87Gdsy+!;D5ps37A+u$rcc1CUOBlP0$99T?s*D-LI! zcmE!H>pSdaXN6SEKW}fXhW&p5_EsHX$H`qX9?0H0c#^nrmc4b^UT$x_2Ix3@>q$&Z zVQ&pb^46V@t$2Iu{Vqr}IRu5hRo?&sxgdLMBvJK`?5&GM**D{{)kkGV?X5W|_}unZ z2ttVZeviGicglau-ug8mkFmG>KPF~gn7uWd=bbnRgAQ9j@Bf6ob?pf%hw4?s+IR)Y z-ZEk49JRN)5&sYDt;Z&Dfpt&ZgsCeKyviRbb(RsHRXcYBj z8oh<3LEBZrX&61BlrVCa*NH?x($o_QBnJ<0b|W+k%}!wxFT$JzUcG9z8T5sXau50_ zW`vY-oQ>cONc-z{^#dz06w)d#cLM60MPY)|?z%osz&`^a*_|pTkMIG491qu(8ZK6% zi6_S2gKQzV6Sq~h21VD3qI=oDBUShz??KVD0<13}cNG}As;o7bZoNSJb@)!zp4bKC z8zP0*>RbD2K)@yx4BdwEPqQE(uzY+wDaJz13)1clTd)P!p3rRe3O1Ku;N}^I>MPo4&`tNGy0)WQLEJtQ!XS>KxH~{{ z^J{S>x0`zlYHtJ2wIv>m+s%>2z!G_Mb|jm;IK)J=BU!_ca0;^{lCDpH{6c%@>jdkb zI3}hF8(~C~<6&TjynZr#oYeA}bdQUoib^FhCvt`OsZ}we&!?)tDNtS213`ZT?~pVa zt08G*Mxt^e^qHh_v1S!_5t-aVfzV(>5lJvATNkb1_e}p1ft@6rWSH6Ff&PyE2zV1+x#) z4M(wl9FLFG%axLLnT_Tg`%xRHHa`8N8Ij}iU>Yg~_6g|NCmu@AMc#PzMW>EG)P>lr{TsLBxv`JF2 z1(NjRe6IA9*aA2XW2U43fgsQgfESU?r*{o@DVAfoz`aEM224R`OW=FKAzMTYHa#Li zKj5qHF~@XMDv?RlE+jzaXwV%LRKQjefr7A>iip6CaUviQ;w!1wUrOq(%*&NQ46_^n z0J9Ea+QM!kU<(0Hpr2dm=SKWMD{`(~fV*%1->KH+;O+o)uA1mrK}u>O62(mBq=znu zqW?Ii~xV8>BVEd^`j3d;N<9ZjhpNI80y* zF7E8R9`RG)3+1I7CHJy)2QKYzG?E{7g_&AJtH^<_Lc@ssz3tzl5AmHWIGETaKtsW@*=N=8fI_nE?B9aeo1e=Hq@NY9+$+)2o$k1%kT z_UA(2$jlf5HQG#_?dTTNm*0O8cZyAIpy6*&a2UE@9Q@w^4}9#$xUI@O#czlWH!58@S0_yu(K^*^*&V%o_7lQA; zczpfOgHOH?e8b}L4LA?J?oTeDzP|jp`X-+T-+x^QK6@;_^T_ktxWYNwLB3y}Gh*?b zU!GSJ%DDAt;m?ojZ+q{ItMB>bdDMmAtBl8YK6yUyNiP>U%zU z&b<(PPsigspF9V|U{DE>()S0Pqdf}Z@tseee|sVLzRruQ@A>3e?u$=yCz<>)-vdN= zkvAqB-Cu;ODp$_Mg_UO0<2M+g!JMZDnjXzZV@cYa5nZ?wVJMs!rk6dEc9E3rm9{@H zHYb?;`Jv}<>mAPXzbAQ*-$3!%-6En{@-jp;Bh)Bm?+<(hF#^s!)8lIhJ1)3Vv=umK zt&Hf9-6K=wx*ulmdS*`ZOAWZPb-$Ew#a-@UH`z=t?jC~M^kmbEZI?+2bFngcKr+2} zAXhOSkc_xkmZw~$&^>Ouq=bSI?YOd5NwAOTlHEi419AtBeKg_Ogxm!q7@dGJz1WEC z&9aeRJy%J_Lz3|nzZS1adVN}6^$oZE?C`&V=x&?wW1H!f4%x`B5Ac3|gOYs$w6!S4 zgLL+a*A7RpUhxKoQq}58Pi)-2bjz<^LsWl-?ju3fx}>+l)k5@|5gpR@ND)}~AJqUF z+J+;kQ^FnqTw*s0w^@93g#+FTk7dGvacPQEQ$g_J4r@KYU z2GKDM(JiUC%HebHy-CX6$1`K7=cINa{sy9p=dKk;O1Cm zL<=>Gq^$wlvU^bRHe$Xv0Ng!N_AZK$)sJQGS2xJn#Hf=B#Ka>T_e^n*OhYqJ%|=gi zXAGU{9@5WdeA}iTL317%X2Xrb;2?n8aor#l)TJ1MihDwWjqWBqq!@9{LXTp^oxz>5 z(T6;}qw&)=jNfETgur0DMN+YlZtS3sW=hT@9&B%Gw!DRIVhE0iYT&8buw10TN!65x zy9*Q}N^G8{*>jt5`&}*?5>dDjZ$@qh-yUa#c)z$~gp8}gw}&~Tm|ppcAee02i_=Dm zF(4be5cz?UFtcTv_FyjVPu`~(kK2qN$dDKHYn$;4P#Z!uPXL?eGH@L$f{ixf(Tkod z71cvA3Pg@7hMPBJV*~gwO)JRlnwBsp7ZTj27_lYfpp?M3?13Ku-Hqz+!Lton+LYY# zW|Qj*%qQgRZ>2~Zf~=!3vZ(zLFK6!+5uZY$AiJNUTpT^beIq0uJEhmdRYZKN0&0yX zY}p5qWsib>j&?>!&fXalp=5(Wuk50GyOAG&k#4Y4BB_jtpnlO1I}351PF#Mwb`b>Z zTkqin39N>y*cy>O(X7p9y-{A1O(ny$HH$5fi)L3iN16pHWs$%(M={mTAibPcSu0@^Zu;Vn;Hate4#L zIu!THW#Cpa`6@#aaYGQcdkM`AMC01--Ya|-B`t1=)hR4{;q2*-;Tvn)Xd+q;Q#Dn+N1T$NGnJWJs-QL-4bPRNMAw(<9Hr!@;X=qZO z(UBp!C*?u@u_JnyJgUL9!;u~wV9jpB%O!Bv8CzE09-IrS2iLxcbbCNi_dC1$*{>p$ z`Ie9;*>RBq!(#NLOaeCrZvc22g80fp%c`s8+=exmlXGwuf!ZN7Ytsz&!3db``S_BU z<96X&OQl;wQwQMxqzS3To_f{A;q!c>mqldI9#xKSDK>U zmCmsbmKHQi33p{85*Yb5jSDIx206M6=6K@%84K!28*4UHgMqZaW=C&fI=MQ9OC~B4 zyuVA@FrQDsAG!0000yIrR(LEl=AMrHHs z!SwpY*wbHR5~eX0@|KDZ>lmO%cF45Je3K5(`!=2r9F`k+m>X&T4CX|mZ zflU@u(IvdxH#W6UhIyw>#hMoOeW_*X!~pD;F3qx5ISf|QmQQi52SgzDP9tn3g69+- zwLIuea;L>4;(jeS9P>~wX2DO;d)E3ZzCR&{Z*}A@aO}9T|EIxFQC^FEoqyq6CEU>3 z8F;Qq$SlwvOi0!$6G$+fUA>MeU@%x+4!_x_?9_h{pU2Oqjoyf2hj}=8dpaRfn|klg zsj|Ywj+DDmh-tfZpGB?A5Heow!RI0@>p!Ulti}0VI{dY=so;YG({`Yyt*17%ojp&8 zu>5z4s4|ma5^MH^bmzNC%nX!N2lhIZ%KJ(TFB*au@oj)8`+cPAi`0PD!XXS1Y#8C- z9%CtC6=Sa=jrB)d@D;fH*tZ-p&i}mt;`w%vzx*lQK*~F7KS&PW4U7|Ba2)&iw=BNH zm+jt^V7QIykTofdRzyd671Oj%7}%R|b(%ElBwYDxN64BLBgS#~=z9z2OS$-N1uetD z?`V4RIlN%~q&B=#;0do-<1%+FPKzVPSe};LCAet)!{85bFcqhX#WhA3qZWKmb?h}p zIAZ}=Y!iEvZy!9tS^$Ohn49%YB5>E^Dk+^HxG7J7CU{_L!IqeXwLgI|_?(1e6dcc+ z!d_Upf8 zMs`-rlW^vW{KD$27M0DKkx5VI0(XyV&AnmGGJjEC;=Wv>EfF4AF~ zRT3g6jjah`r5n+Y5&cyNlUJKGf0Nm9#inGF3*KM8a7Kr9`pZp|h(<%8Cy@|N!ic%3 zNY3?<8lF_dH_y$@qy~(GwyB@Uqj2%nci3=n2pRDk1}=N&aw zK1(3$zpFs{@)1gIyUB?Uxf-Uphh$2*yG+iD5TM+zlH zy^fB+EKc5Bcp4UA7(5R-g%NUo@qOf^oJCH4zZi0EK_E&_C9t4H`_j{x03mmmM@~~M z_#HLjB!uZC)$KO*p=2fZsL7QC?H_oy5AZGD1AIRLF9k5ImpVVKklbr4gc|@hLcLLK zbsolHE^OO>7bLy~t{J3JbPGEUuV8414!8KeZI?N5D1MZoX8g_=3Z9wg48@plzHcau z2lInN3-N0oIv>AlhAx98kqE$4#Z13*%vt!IVMeCZ2hD}}z0ZtHscX#3@M||yrploe zC>}3Q46PO~PY6n);bsMlg}CCZCJ){ugYJk|ESr12=KF-jrZ68U0Q+t|Lyh)UW~OceDITBfE0KE`zd+&f4dFk+;1Fk zaIr?}aZp!uk9_zVPPlS-yMBW+Sfr4*{9Eh{h3ogoehB9mixrr3a5wS8D_Av7n!Xc~ z!4)%(A+1Z6)NQmd%Ki*2Hk{}LMzTbnLlCFkb1(?-wO>aC`vsz~^j(GMLNb*!d`@l! z2b&Dj$=Zya#Io~?3Y+`y8zK2N_bZ~e2#q*-6~AHMWC{8ATw!Q2Z*$oMTZ zvvLoAh&&o(lFWkYfu%M(e58Bjd|bm&gq_yz((tZt_V#Z9{7-E8zMbmhc2vVqZDy}i zX7}*xc)5HlLp3u(8(Tt&8JLVMvNR=TV7$FW)*i10iD(zO>2V^T{w$%#EATMdFOjvk zD487rIsi~C{lRwj$ST(VA@9~&s(K>ka>J`WphET1eoa-R!-vTz49f$&EMkW!tA_C4 z4`mViL|K%~R*14Fmi|Op_`GDesQxJFme`U;@sg|tQ7^0min2U-5oJ+p&>za8HWX!f z@Wuv-vM84RL|ORKW4J6U%$>+a?~gzdM0atl=wdq`zw0i8E}hWd(}EKs??bTHT#vV3 zhnA@c!UIQe*h7G$DcEvF)whH|y*(v(52f*dqmc7%j%csNAdEIz;~Zsz(NT_272kFZ z>WStG9w2K&KeElkdwCeMo#JQ6P(o9B5W_Cv<>6T8i-_S2PvS9<3;V`!d!IlSy-fQP z@TaU|?TA6)b|*sf*=~`}so+Q3f~iD$n6OG3UjZnw{}O*f9b;jor<`QMon0p~^)^SoU4V@@oPx9TPC7f(>u16|u5FnF`=v+Bj zoCHtIu(|CSbQatW-vqP|yr>7+FF-Kg+9|f|VOEMG%z+Jrf~Ih>`wC*OId!0VFj#qK zqf1V0>X-~VI*x5NZH^a$F9REM0SiCH&xJ)C0XDN6kdsW_dw6P`eH*b?aa%6<^ED2C zo`X5!nlsv0b!Rd$>MAe_r((dMo6|6_=vbZ^cpWUDnM*4Jfc=oHfiRnPUk11&U`$R> z0T7#qwMiDYrPjaz^E(5MB`>s%d|@byIycVi0u5dgIZ%Q zDut~b`1C5G-UU}22$pcBY!2f7L*${EIe1K)TpoRz*i5sEST4qA>b?Z`Q9v?T05gFQO7J0EIE%?=$F0x@g;P^ zJkU>msUOqEE%(Id5x4y!a7sTP<7E>8@YW#CyN}rw2v|asaP#nuvU^q`S*XD=vbLR6 zGX|F;!uS;tjhXOYfKEmPKZJY}u}PaXA3#$OM_b?yv7LYnC7o;`MF_9*@Y8ssk!Sc3 zya4j%2xJZ6bWfa!i0!FS>JG5qiG*7yVR~%BVUO2~^%SRxXBO9>bZ!WBKhW{8Bx zDFGu=G~qUp@M}t-6GA#`4zR05!Y?QRTk)a^14TkDC5(to==e3&xD|6iQRDr1<2Cls zw9q|%19Ax7`~{HUG;VjsRWGx0IHwcAK?zW51gc6vB@+}D0`MR18XVig*}U;f07Vk| zHDJ&lSUP?HG2rgK^e`2)07eO8f)|6`Mu6^#lM%Q5(paj0goKKTLqQ_ald1)lc~LA` zVJxicR~*GULXi@S;uI1hQnX({-A_=%Vxd|^!U9UL#wOH>1S)#_)YydI#p10XDE%ud zbMPM9Vfy+%)_;)$%Uo7_>0SYxJwv}v4ZfIsZ{VkgmS=r90W zBsBWrs;*i9g^vKjU8w3eO7ru7904wN4wr|PT|)@8i9o&qn!f%5rC~whm5?o zaLT!kQ>`P^;)o)rlpj;$civ&sIl+cFA!3huGOEeaKs{-JH-AYu$A7aO4_eG}i|eL; zOVNVe?0vikQ^P)h0g)^1*88XtW2_+VNaJ-1rrTD|!N2f^&43YhdaF!?K@T_7zL|A@~TSHC^f?2GT2a}EV^$3P9g`bPa z==vuA{sG2s_-nt0%e4+CNEP=(bdjxnh^*N&%(6BkL)>9YlVyDPmRUZcou=?$+v*hU zVO;GRvR^N|*Y+x|Q}*j%rr2^%EziO>M`a*3RIqgWrL=3jOFjjk`3J#E0&ecDIip;4 zCOC*b4c28j6@mv`!!VoJL8?x5pv&G*P+A(y6H_G}Q-PN{F#}5?FzN1r)Y!BFG7%nx ziQqbAs+>Vl)YJW78O`i4d&!4Crwat-b}Wa}+p7$uCFpchB^@%z-p$Erk?@H+*SE-w z+XQ4+~$E~b%jllFYCAZ!&4I#(q>>aV{2=ZatGGlP6XyGHPH|I zHB7&KS{&_poc4V4CZqjwR2ls8IMVs1m_GEvw&JR=i}RI8a6B8p{WCGe+qVQd!5g`i z5l>xzM(5!?eL?3b#G^K1|AqNGM7pYrb8!oxzMqRW;^MX7Vt7AY4F9sD{QC)~F2cWO z&{j}bwXr1TM=AI4Sa{%hS|_b?;2-=|E(O5 z&ZcXK!SM$i2gBz9pe|{;{};FO;jk>T-h6tihd5o(D_zs`A-a|K8 zdU0p3v)w2S#P=`}7oa=K zCv-xds}%IMp)>)(w?0u z_n~5ncr^I$;U48}PmmMfQBX0@KwBBH)}MLy0uCQv!e72PSw)5$+3<8vPddw1F?R8T zhZyTJ5TIeA5I|y`rvI<_7EFD-p!v{r^4;T0twx9W4M-|jxE6Y-XXVww@pSz-eW@0_ z#3IijHWp|gaXQ3`;bhCxM$J3`ksu^&HP&Sg_LN; z@Xe&N*R$y6AdDo__%|zQ;FxG~eU8pH#XaF06lq|81y0PXVD;hd9Gs6xVoxImQ#Odq zTHFAH4u&O_-h|Q{u@G254?r@FqgN=ooy&iS5h*-~lD}1L=A%|Cd_Pxp2M1>FR-0+)Xr+}f zNgauugSTU4%vAXzkl~OP4&z|{igoJ1G(<#+;rdd(>RUPYZBykEDj4~vI486lOqDMa zdARrygYYJtHo?g}(nFQ&pbX$=T!~&w+6;^R{MH9dX=bNkq(CO@K|x0{ZONmd=xUiJ zF}Wv9u6)w7*fJ&|b|hpnJ$MC%0Q48E*U)7VxhL%H0etNRmB(HS@C2X5<>@GG&aJ6G z8kuEsk;%=C``k_k6UWo2eX9cjBur6nZs+T`hxOXKehO@X7w|(0q8$vf;C>>i>Z6ok zh9sJ{EHe}R=t!o@cEYYLOXF3=r&AA6m`KNX026i@wAH$FT8sujl!w(co}m}Rpj(z6 z9Jr{OUb(9P8eDy!edHpReD3n#q*I+EAGPaQQ`JWao7oyy=7)8_q6@x z;K&&K*jyJ*c$yT@8B`o*#3o7Y&znn2Y{z+STwo9mDQXUmufWtOho{SiA(LdmQ51l` z2&3Ltdi7kqC*rh8ZL};Q4`gN2Y{~u`(-z50CJ=7h!gmL-Fmp8cXf?GuUiSy=#K0?| z*X}1lHC6r%%P6+oua{o|2b3do<$V5ASb;M<~GvmUzc4i#GJs<Go`N26l z378+ujz}K!g9&ixBFWH+qcdaitfn?;OWJfXnta!Acxm&8^~U*wRtPkQ5dFWLpXu|5 zS5Q=gb9spKhYh@%=<|p3eZ=m_`2$TRh2$^i4;MwyqsBTo2h-tW84f)sU&$eT;%N8>ZD?HiQB@%mqG2`d}3zsX%-8kPs zCU%I{ddTTUumqErep8N#e9D2Dz(TusvOeKR$;UaMK$1S)KtIB|p2bI>Ztxh5FCE@w zBLoRclP(WE*+7r3KK%`P{ziGzVhG|BMKFt_U(+s;-wsdn|XI0~t?mM-1iCKozjKeqR zO7TqC5at-yUq*8b+9#vK*mm_r*l8g6J0MLjzFJQM0*TT0ZlVDZl{jiVr2Q6jX(0@Y z5b$ux!XF+N0~}lQxcK`0NJET^G!jC(nl1)L@QBtYFm~dJx&YGuR8{W%*+b+5$il&g z$Snxbe=h|sgoenq+_O*pE1>~r_N^n17<()}e|Uxja+RkQ82;>iltVfpjl4|DCtG0j zNd&;i9YAD^o*6m+z?|)MKJU1^{TV0W%;gD3GjSVFLJAin@&n~o+Op;n7&TaXUa$dY zwMQSvgY5z(g_sO2aOC+*(sAeAo5An8bLj%FaAa!y!S!yeWkHC5sMy{JA(^Y=;P?DL zRZEcbZYu>RU)rBKOu=Q1L2x++$h1_U5M%(s<(suysTc(p!+w;LP7ho7*?f6cB4A0S zBl6rf565iRAg}wOKw1h&$XzaFj%3~ST-vgz*C7=YE8kiovykbf&Xr5aQv_{%y}Khq z9+dB?TTa$f#F2l?pSBGp{Zqo}j}YmrM;YjFM$^&N6sF_F?shH@@%=cYI73en>3i&0 zObcgN)3oBn0mTiZ#?yN$&*&{2pF#EJW;nnW&Uun#r<5ba?)M;P56nsu@uY!BWZ6dvrCX%B^Ke!=z- z3uIb79k@Mww0cPG;o5uj@b>(H@G)^APPJPEV2A!*6H@?2zrz z&>jvn^7K`cfsXWOI^Lo{l8)&1aI&7_(AxvLGT<|`hbA}-u{}I|mzKk!wTBZ$26;R5 z@MH|_fpJ3g_8=eO*5^N5f2)m)dzLpTUCfgGIUUZxthfFMTX5PsntcUF-G};Kb6kReLODF!6vjFaT!*+40+B zxdy-vU7BZLvEd`^6h zV)@5E&jEHbxt@(J-mpHOZP#F49GsRxQGzFq!BHPNA@c{|(nx&>Uc|89@*vZ$2iR-5 z8(*w_pNk@l{R7eeFSQ17&~n5*gRmA`Dt6kR6k2T8QlQ|h$%w9AP%mV(XZKs;VaMLa zevA4r8PfJ!_Hx6l{g!*U!9^m>2_iyZRyW+#f5EojvYou?IpHS5x#8nNei4b@wfr=3AFFwmE4ge z$_2?i&MWagH<7e4#4SQ)5hij@|F2S9d-q~h3?~Fa1P0T!eRCa_mGYBz@`brbHQblM z3(R}d1{%RPrtTvegVXkqgOwa!5)OYOhZu5riaX3oq6!N+`Tnu-2;vDLiV@<+YI#Hm@((&EQh;kLs7`1|Zp9k0?|`0bdF&6L9rP z)T#D$its`o18oQKdspgqL~{j_XswXAP0}(3h|x}ohR5$~t|MH5(KB`A+<|-I+PBvJ zpvy;Jyo%mGfc&l{Iag5nG1jCFm5GxZ82x5{SM2xY2Tpg<0wB)b=~P0spm&^Bo{(X4 zNQT9WuvjU_{0KtIx| z`N__}Ixfq-maPL%7=%Cgok}VIYO> zK82Z!20>z_QSTU++oLohzFLh0C_O;qjI7z2@kY|sy{KdQz_}Ev&rvY1$+~bqj%A~i z^9&TC?aP9h-QSffry6~t!2_z8C^$cQFgkG{55DsvREqJ&I;2u$l%>Gvzw#NQQ0j$| z=6k&q{~$dKtb_mi(S>N}TO-O-vf&N!M1p!3lBk5|ss}UG|jv-m-m6H6J*xyy~ zW4OUXV;?`;I{HVN6NS4^!KvLt;=qAboCAM0V8GH|J`@$prpf1f163<=e_PXf1%hOR z8i`8qOe)3NAI-8zdjtb{HNtv0wj4$~{r}KfBBb;zKAY5)h;gdW9#JVdy5(IC9RntOoc9pJ)(%}|5zV}k!;uNam-}h3D&2Pu9e1{k;QfVt; zw4Y{wVNS~|uQ$3H>d*2HnjY%yN}C==y~D8n65!SJBxn?fk&3bHHgXA|5ir!tr7b%k@?LF zV9QvmwkT8NIp5!*WjlTUz}ar2=~};%ecb4J*x7y@L?2nUeTYff{-K<*JxKkP3c&_wFIdDB<5(nY|slxjpHR)m# zwQuPBjw*kie=N~EoZ`0Bp%^h6&ua}-6<+%NK#T|(T@RZPp%Tsr?BpMARk#K*i4hFY zC`=mVr-SeVOv!hkQsmM7u|sj>tgi;<$dfGtapWPR>tS=`p%q8U5iepAN4_kMVc%zC!_)(2fL-{RS*I|3-7pP9}E;p3cccr9-2z45KL6{|U|- zf#uMrli|t_lB*^vSGi_?rhY=ARG$fxK`J$0kIv~=De9x3x_ivv0r=Uw5YA9SRjrsN zLza6kN)oyhNN^w9<-ZvBrQ{V)Ua^eRzf2(?l59hQn}2B!dd5!NTI^8^V`#}IC8rY^ zg*4y6d*|iPlDtQC2_g@LSFv?Qad3^IQH65m9`#oi5f5`fEnW(=)EfIIL;Oy}SFr`B z;XvBJ8O-ldOJWDaqotoN((ge0PQ(WePDcoIbN~nN0B>CyDK2^r3d zG;76+*|l~`CAA2Sx=?&NY(@rZ@->gIUOzzmE)S2dejy;(2&KJUV}VDPN91`GC{^JH z>$XRP{0_EoihO*dm#`ouGye}DIzWl?wbY*AoS!!K#;r-hE1bdMC0ofoaVK4@89o`S zeF(dfhfT+dMgbh~s#^hNF8DiAr_Jx6khuOi5`5eQ^sxB%s2>tF0n0d<%-K0TP>Leo z>s$V;`alGEqJ4>#mWz1A;dFLByr1~rAYmK*5&^7tRz$?s`YY}HPBV|n?+bu}F}e+> zZ333t$Qnq;@$z)%nS~DL&L3#m?0&2*B44X{+Tt*3JT$eWtF0GPaBb&o3EA{QT?|`9 z{gm~=rl&~T0k;y(aytOl;|EOtOp*S6k(z@FRHW~SNdL9vJbkr8Kv^jg5r)@qtv>20 zQu_CC)=`G-$Z)dE@Z(rDM20sB^f+cF611z&5b6My<5A(rH4e3IIowe9!_`TYm&WfM zzt(a{>CGH4z4;=&(f!g}sHMm2@P97i8I&2D0>7y7nctVdvIZ_Y%5HcJA?SAy?bkX@ z=KR1oe$c}X;)d(aQ9ZF-#YgQ4PP+~3 zfAqTB2Wl1w$oEgo_is3y6K%DIjp6_P3RuI7mji4Jzl{)jcdV^kJ&SD9cK!3*vrml? z%u^k(4>q1_{KI~Gn1ydr`u$kw3cytiX2e1~R*sGxgBsYdXN%?)%|;tK0QFkJ7hHg` zfsPGoth7m1=J_j#tHjO`lmutzC0GdM?9B6bssE<BMytJz-r*?y%z$@LAkmK zAqM5DmwWc94-p#lZ~wZ0xN2LJ{}E|KeILU^Lt9#P)|kS%0=u=Mc&EI-UBw! zrW;bwdSJsg^#w>^2emznO~yLxGCm73m8gg+hUdGtW62yhmXPj8-tn5Ze(Z>LHH=4& zdY1Ob5t-MQ5Q6%BCm^4~&ozwy>Mp2t$3qW{-sHpx7P9Zpvg3Vmn3V3}xPg2%)@SL( zhD;nigQ`5g_F@B;5V(xl_kM&trPfE`OW&V`d(~ll=NukB)P~lpiHn+iHea`q_E_BB zJcZxh#FR&qy53uY@VeCX4%z7Lr)< zH)_<#^v~ewPYI`gu}EKC5kbdqH9CSYa5`S(bZijrDG;6-cX#hK$S<0_v`kMC`aQwR zG5n(HsfevhU1ve+`m~T|$f=&i6Tq#~cM-&)o+uK~!ZyAJ(M-tqQvU0`O3RVJ4~C)! z5JTlmAxM~YhQ6q!vl|8q${FfV|0diiPGb~9Gr7DO)gNG|>03;QQ%!~e-l<)=LchYo!9`zJLDTKOnW=3yyZ?IE)hb{UULN|mslgy9kpF=^u5ji z1nro&j9xoY3u7=z=lbbhmma|wg_KJ_n)s8w=jf0JFU zT})Zk!+76OlgI@D{XC$demMQe`?(}^Cphs!+48O_2uV|5*}j)|U4t{-QTGFuV2>DY zO8qCS8HIEH*Cbi_p@Me}tvH0a{v=LffJw zVbc|uO?e*W3S5R$)G$GbDY=6CfDZ~zz3z{cWDg4bE z&IMld>R$?QJo zkbWc)kgCSYC^p4#Hhrqzhdx?klg@%fbg6oe6u|=h=n`E%HZm%Lo8Sn>7QrYUEX*mm zhq-QKC=ZuB07s2tAMXv%8AmKV8u!?EZ+HT+>#qxlT`vC>cGU~cNk8vLYBFpT;07K* z@bl6B{N&uki~_2KEqga{o?)G|O?^8Ds|>;NWLMx~yX(x0v5xbjIozQ(W0QLWG(unA z&gYZ?Z$~erVeCP~^)b$&#rg+kX$Lz zZoEXYmt(jZ80eBh-uI;7uiF2`+7?$He>axapAhzvlBS zX41K-!`U~x0P!fpuezvwMSpm3djEuWPb=a5>0Njj^uCYuz^lWi_fLpN^qzVUdP7*C zG4eQs)oAv}`q4PQQ2P6zd* zMeZc*KpsL^!EaNKvkLyV^s6ub>==4Jia`hX?>^l6k}C7@$9Ylp#Xx^6=-&bQkq2H5 zS{_=FhaJcR@`3Uw9yA|uhmnt4K*y2fqZsk19=)C0zdY!?oYw#200{I!;W~yjFR&8J zADzzB4(HBZGgMkMG~{EFoC*o@J?G8?W@r?cPNFVXF>njyQRfbH3G_QTKXo@9?SjmaP{gR;4o>+t~p)Kj!XHXSfJAX2{GJeWSnu8Y`KT5iK0inQ@A4-^$uDKfl)P-0KPlh8hVGSNnQWeo z)*g;Q$Bimr7{VdF7c*kpW21Vhj!jm)6e}F#lkcQJ26v11djh! zdkKs_jR3~DHUywE2{7IbBh{bV2v6jB12G>CPJ0_Ocl8$dV>bQ~VUX)ae1xDQWANhb zU#oABKWVb5Zvh-D=)o8zC!hRK@uDy)QW`;*(Y0IXv#}eU??3IZ7MCM0q0c9BqgHw% zpNLICh(Q;ZkL8|y>LNk|E?u6@xuZ`oBIfg(8#JDX`{nI)oJt=-UqDnb-s}w<@EDY^+_B@HO{%Wp&kiIPBA)j2`vaj5O|?%A*#U8CO3XRsMY{fd&6$PEptH9 zK?`gRLJU}728rp4boCAd(=-=Y)Hys-pPDP750DDfI%P#*5Vmd(`n^?Pc z6K3c?imb+4E)`tndROo^GO1PI+Xn4)o*UEa;9$uCG4rx=Jy_C~nEFKIJgVK}z3(|A%3?n@&B|Yz-6J1>-Jw~=XxrQ=} z_4NKM$#sB9;KDr41;cu#G5%#o5TDkg`jyWVfhZ3PpCHC2%O_SWfiUxWR#hL(Z zkQfUHyFkmt>N8GM1>8maG`gxln#A=hp}zwP1Mof&3fNtN;}iGb(jhRUcaPNzSq~s* z-tce?T>2T)jokkJY*G3RfO39`yam=cNGq3zA2g!#E6|0+2~$@ADX4;e9q8B76XjId zBvq}@hCru=o;@US$fz!CT}{;T`nJ65w-M_TI6l-aQk_UQ6^kI90|rS_1YGLFXL&O| zWi;cers-p22%pQbE)-mbKzaU6bYjTq4>}11EOE$$ID^#d3~q98KRVKtgJR23oqW5E zmKXQS<zz3-kdhy`+zxFiDY!@Eg`!-%5tw7QcD;o_t{~3F2WGhNM3Vn| z7pAfK!7L>Yz0A%osMC8LA)Jq)mecQKS1PVCea(+!Jrkbm{($LWXw7M^od-<$8GG`R z=JeoNSkl!u@VsERidI-NURrHa7o0%3lP><3bTxg$(97~yh#Er}XrYrXza83RzFFXp zd0}C2>IB z-zSWuhbHZ=v*+0wPNQ9KnHxWJe$TUt1%Zrr8sF|dQ9lolftXen%Z^9-v|a%3C$aSE zfVN1yU$@UY(TE=X3q;ReXjbk86NTAWQ{z#&fX?t{Q3|rQ%oE~95KXdd|rzMHGO-X5izdoet}jJX3jQtDw@2*>kq#j=AW^5 zo|V|^yz&QT!(j~iK=?iAWo4%|(&MHcrGd zy6UUwP%-D~UP$4A0qXN)cwXK1IAmz=D|vw?6j~-PG;+kguWru^7Rf?f1AO`ZatOHV z`9okd0nX&JVEPo%$Bm+|BU*K?fT|{qs;ylZO8Ve}>LTd8rmGMx;VNg?f5d(PZAl#f z@jTaO$y|FcCj;g93{;yUbh1`wh8Y&*_1q-$Qo2e((tKolk@Z0yJjIm9?##$d_MS(r7lRlyp{uPZF}xN(vo-BAYo@jvzX?rhvnJf%Rkg4R=6n#QZ zsp>t>maYtxI5}y(PSU0ZxOAO@Pm;EtjJ8vOwo}(I%1I|*u)xn{MvQqCR(j@kZ_)Cu zQWUOMUwHU4@q#e6HH@d>HGeT5rM|diKPpQnSb_{8e85xT^uHy~GdVN9Y8Xv}5FKd3 zjT2f3xDE}k`_$EA@#1ukSnrM)?-$}mr5NwA(bcJD(gjQ20Qh18t;CaaDmgji1~2DXbinLdY8fnOAX^;2q)fLGn~QLLY;_Moi5v?^R!* z%Z(d%+rSqfybg%^w4{}+JTG*UzHrgQW6{k2ygHd{jKJ{) z%<;4+u9AQ9_in;ebn-0b5*7STk+zaT)G$y(_?;ztoLn0qSPtjo5Ll4AZ(1a|bHnQ? zhW#sSDd0%!foGafZ0hc_PTo5^!9G<9qz=rz_h#Fvpj}t~#2eTt zFi->YHjsfD7v7Pog~X8qi6aNL?h5iioTRHi74fJOMi|H$B8)dTy1OKwNMS?kE)zf} z>ENdBvimq-ffONAdYo3+_Mb`3n5aFZEJEk-bTRov(%Mc^CDeFQ zH`bgRaj3RWE$0J<(N`3?Xr6|Rre{GJq!~e3(xwxrEiR`fDmIDcgGy=#tIwdS8`++m z8?Q)Vji);O(0+a80lf?$`C)%)Ub3Mfh&vo7@Eq@X1{`MUe}Ry;S1mfg}2|S$MQ%}fV5A28c|5&Ghd|WISK9oQuP=mG(E^rOWJo(%3o@V)J6U}NCpGj13;hV_7G#Jr9wRa)^3xCBqs#i^ zyJ|puGbO%9Ab)^S9G{W0Dg&my^N_0n^dlL=pEHE;^86rHt8HTU`I!*Vs_`HNWiGMt zt-#efHYm@rFx@5&`9}-qkkB{!fQcmaIimE?8N@wIuHw4HKJaOE zR0z~Ap(#U61efs8Gp6&nw2eb7C~cZH&A>+ZP|vYEcRCf#Zkt{ys3{Hc4tNl8sqo_f zskzDiP`!3LrSHT&F(=L!b(#Hb5$ieXO5u--b52~G>+e5L_`Bhcao1fzVVq~;mFu0K zf$yz8u+4d9U6b;>bLW?41#ix5-pZXe6GJJm@|8$3ynev%A7}Y-yi!aG-cUbyl*Rx& zTv>wV%8!2FxC9nE=!npC%sUN7Zus$_eZ<*!Dbf(RQ+pccY&sgvVcxBuhM2_HhsQ_u z7KZjpLoVI|56r<^k^uAR3AG=tA&ZUEuW(ECgqD&vlx!u>}I`nuH5(?@l=m0k27Nd^5ev^)=vm}g!#cuNj*Q% zN26Xr*ZHIJvmkz;{H&)$hmfB_PTW3q)2UJUS%>^&QQ`OK!jLnt5Mt`Wx`w0ZETlFu zAt4p_IUuC+NywAL-r(gu>Z#pfTDf4@quR&`BU>_0jOx%!cz~1O9zetS3iSW7eg-d1 zc9pbib@O_z*P(yi-2NF)3w@BRo2w)6IRo=4Hxo5i&EE*8=O3{Vy%=3PDig zb}o-D);Pe0()fVWx^bt;k0UrGsLP8XUFwo4b_$T;$wUN+sHu_~(M-C}6j}TbP-}m* zzbVdF=J@{!wMMi7kCvEU(h2dV2x_=H!T5Fqmj5P%#yVcH<=)dfxyJh0{>JH$xn;5sWU(i5rg?MRT&H=6rUWU&Tvxce=^n} z*CZiPl4~q*6mx|q&Jg_itAzIhh9#1)yu;6XUHN&+NXp6Akaw_Fj{!7wQ|FgrJE%v% zn;4$9gZuAn0^V&G@Ii-wf6El`DK{WCh&m7r8$|5@*dS`-;HD6PfMuVXS{1eCcGyOe z4M&6^j=F+ag!6A|;;4jzJ1m&(%}oZM(Hu6lax|Ml@E5yEn|6?KhYj8i3gX|?%Kd)V z&OwlDPm_!Zj^+7ZQ5En>2ol)uq9A)W)d3LtJ=`JLxg)pOTU?b>Uk_x(@Y}no65rbT znf5)Kswpfn#|H!AHj|yFyHC9ltQR^v={*KI0Y*AI&zb92=Q4FOkC7LA>Pp1G3)avh z)_8vQo|v~B*x}$u|r@$ z83gYo&wvw21Y0NouEao)xF5>Fb zeqL5e;|x@v8|cVCD%06H?Ty*kHKdwsTuKr=RktH|GMe`}XW*$i;8gzrN1#|3M&W%* zeV-hIPx&ao&J#s@dRB{iZ1*B`0vaI>jgWKbHDpA<8_D!q?S^D^9mwQ&9~HR zA0*ypCgX14Ks zw;yX+{278|H8%q9X|P4()8g9(j2|oFtDn=Bl}=v|Y+CtJVvQs+VAi=84He5WI8cC* zLtCHG&Ue1WQE=v9F9UUKT5vvr(YXXrIaLIZQ}@5aNcHv!h~9&o@B9&yYxObsqv^!x zwBxZbzMN^0Kg9S<3uu93u}x@orM`y4zGfF5tpmAo7qyM|mfsmRQUe#;oT;zk;49xu zeLB3E`t_QVa81G_UqZvKGq!dR+nMo?)mAkD%MXwNaOU=1>MPV&K;Zn~sLs4#Ob5nd z*WNgCruVII7Pk}Sh4(J8*|r)J{R2r?e*!;e@Z#RQ)F1u(cK+K;4Bhx?9Pamiw~zRZ zg)l68=JpZ}XK;0|(?1`}z`O9bS84pfx$(#Jq^oX$pH_b(!c*8l0T!Hddw6{Rd`RU5 zD(tGXtFU*%-YM;!uy?@TA?+Qox5M5p?HD%;+F);!c8F31A=pFQ9$Xy)eId{nazQud zrdqIuXZB)iH%|Bc=<*+fz7|_Nxi;FoJO%R9!26e%9fx|s_b<^@&@W@*L1=I8s(_DX zc{c6sv`|+w%84q@W(?L5`&ox5|Gb}7{+4k0)4HRv{6PnmzcZLfz7?F?){ z_;m>1hwx~aaKMuG9t_;;Ao^>L8+Y}fL>ga~r6CJ~2E8yU-oFx2`u_0A`}lfsbQyef zY;+kUqM)e^V&Fqv;iGtcII2B{_ZMh!bl~&-rLMp+)BDNY+uDuJDP#o&tjYIvN{7`=vl``HeyJibK)r)aj81GVL;oUZ`9m|LymagcF z#YXIri)A#Zta9OrMm*MtS8C~EL6}F)*d0TpVWg5YW@8q~)92w}|9q@%7GSf1{N`h9 z&^^N#(eMv6?(CY+YuyX_sr|dK@<}xXE7*{QIAM@u@W!BjBxw$g**FJ98e9Z|)^LKn zhO(#rf!;-Q`e*7TE$r*~_$2DX#$6EP_8e$@ow^@c{#45A2d;V-7^~h+^fRb>W4h;Q zRd05f7refR`nPtV`nT3t|8_>zzY#9~d0<-ZLF?b0?`ZW8R}RF?S>LPcx|p`hsO#6A z=*j^)ga>7Zux6B~jHxF@jqk?$r+CEfbgGOl|GW9AAG>z`7o$W@c#!C(X0F#fOs{PD zsdV0@-*KBYr?__Z;X27))VyB)dgDD5m0u_M=jufDqh9JI=VO!vc&C*&!Z3 z_^VKOe0WD2A5No&7cn$^=$)*MXL$kikB0owOcn#ecWMbR5(rdOSG?HekCyH-#6F6l{lyCCjdF`IJU(4(?Su$B6Y6xn6#V@%0Vf z(;caNbv;#jE??cpw9c6=0#;1+1QoV|hwth&89_g==tz-;*wE2KGpICM-byS)@ zmJeoYHf*j1uW+a%sYnBwGL}U^5T0HvELt%W1z^7Uw@Mgdk z(LSg>YxpyDjADLQClF$|4A1ZG8^Pyy1J4hm;ybkXV53E?L(FgnTVd1q);cumfA|r^q4Ylw81NbTv)cr|!|%^7GT<}xXA8&~iy$NoiEHT@JOz5G z!%Mm6GWXnY?#r9=0#FnaF}{36P2f}2jEhbLze^OXcmwAUyA$=Phv<^I@In1!S381f z+u@?RvjbZ zpQGC({zVb^Uj~Up>gxi^bmCU!${+)Mg9yvfv5b9 zj-z+O5f$or0{%T&+Q5>X!Z+OI0{qvQ4bXª>LS5xt5_{4XEb-zhzjK%V-g2oV7jImW*2o-uTAtYTk4zihd z9G3Hzj6-?jocGe@K}^WB`6x~?s;$HueMjpZ zl!e$Y(8Bwo!uv#c^)Cn+j`s=0)9<`f`@~*M^75|5ROg!e#%}?ZzVW~2-|P6dnSXEK z-y8Y&xBR=6e{bdA+xhnn{%zskKk)Be{Cf}oZsXtk`1b++eTaWs`S&mU`w0I&%D+4K z_i_Gxl7FA(-w^-q;@{o;yN7?<`1g7KeSv>p;@@`ueT9Etb3H=TcH^6xqPJDYzq`F9S!B=Mg}=j^iz`^Jw2Y_7l#3hRP`;l7HC z$@C7H?jaLqFMSoSk*SSL-;?QSGL1pi?|Otx=aA_>GBuN_g-jok>9=G`N8|4LHJP@M z=_)e4M<%*PRWT9Wepd~d){zM}bo(lvC)0&wvc6?GF?xmbI9}pnI@Ad z0m4963YnZ_I*CkIk_q#+zKVy*MEj)`{~{A36KP}X+K+LqVhqHZuFuIdgG?WgX$_e= z$aE{2ULw;|WZF%pZ^%Rno+@k*pt@Sgluss-e=0VT>2@;xj!ZX@>2GA(Or}@Jw1G^Y zlgUdaD}>apOUN{iOhse@n_1U+WJ;lMd1RVJra5G)AyYb;(kaG7GA$z0I5HKHX$+aF z$z&rFE&{WzSTfyCrk^lkRoq7=8hG)ak%{5_ zVArE$8cwDM$aDgk{y?TP$h4JA>14W&OinUgO{Vk6w2n+AWU3?6S~8WBX)~EFBGZjz zT1=+9$mAr`U&%C^OuNW5l}s;_=?pS;l8M$AE50PtF=RSGrbIFg!*sgKM5Zxh>cvo1 zkwzvOe=9P{^bwiz$@DKWT|}mKGOZ%h9x|;V)01SnmP~&k)9qy1MyC78bO)IpC)15& zdX7w6$n-Xud}R8ZOsip{g4TYn(5<)7@3C{GOQwKU345e_=QA(9K)=s5yM*oMj+`)e z$&X{>W%w8gZJFi#xBuKJ{P&-8MLJ(;>4g4S{`#kvocXA`#2owb( zep>p9;)co+PlLC* zjBlC|--^caBSNOvi6GPg!+cRdGp~%zPAgBXdwV z*<;MZknl|r#gW@sQ-UIKRYh~9aEgvomT&(URCxFuNqST)4q4F1t*f9g&10w`l%1$N zPCo=aS}lmg8jY0bjjreDjrcDL`lQ__z3rO)eCeRUK55q=tRCsa!BF^P$a_@2Bg>W$ z4h3KOlClOb3N|a6js4Nd_8E?0r2Ww8X*ILZTkLIYh$22s6k24Me)vWIkp^{`lbi(ZWV=U8A>0Mh_<}NN-)j)0qOY`UCEcPtS^(=K2ESWylGcTuL;gSp7IUaXb_JTr>8=i>I z=H|>uuXg213?TM~GOx#rR^+K}SZSX)abg0a3gfA4@X&DQso|qtNo{GFM|7om zwKXfLJI!-QzpT2ts0sFby&J$wv}t%8^8xdL)si9Z96=M-pkqkwi+5 zh~%wb>#3?OF2$#0RavPlOfiu2q8y9ru(GOlMRApfl~&c(mDQjIHzY9JDG;I126Af~ zYf8yQt09`3{gm+yr|_`-d`hbt)>fyGe=5avd+Vuc^^X|s5f#rMa^#}YA_t|#^=m3? z*u>JcRh28)#OmTzY?8OSZW6E4%lJ>`5?vzGrKEF~%$_+@Yq}{7mFvnpUVBZd{q&mC z?PX0RWo4xe_UhuM%Id~ydqZPgU2VO$tkh27*rbYT4?{X-_6c+B6SD2hKA{|+wAqtN z%T`aSX{@T^zU3fqX2SReR#snMTi?Kq6Jcg+SUK^PO>{dK7F;mXp1W{yc1{AzUpRMZ zUXC5VnX~Ljg19c+JcSF%ZCr}5rjk!(c};0qxqUuv5qX>o^AX=}Uy|=exN#{B6=hXb z_Dj&X?Gu(YRMf61Ec1Fvgq&GdkCf}ZmFRaStS+l>Ku>RmGxzH_6e0d z-o%R9>atYAsV88-Tv}OQQ(RpJf{;gyHxp_bz2Ru~S!az)0V%1eKu}gwT3L>chRAdk zWallNo5K<5Xj3Ou)|6B=mX;;3>}AVx^Rng@at2_sX17mh@RrUhIrGf4G_ct1%C=9a zF0L=Bm?gR@xYg7mNPT6AcS3PhC4^=4aud!+soF(!*SHj8%Bh1mPa5B#`3Td5g-CpS zgRrm(3#Sk*bDdePJdY>>Fj1CPaw=R#3B4d zTPjfX!&T45H=H%Tlu=XhTqp*7mc!gpf|D3aM5C6i6di!8ro1+69S-ZB{lDaqU)+5@!{~4FzRQUXXnus}Y9zT2)(JJz;j1 z*IQgtk=AkZH)i~7lFj@26zkadIqVE9GDnzi z8?MBRIW14YGBFPqZ9npOi3}%jW)3@_kuE8unaIX4HfGoiHhyUKbvG*3KPRx;mGz9> zY_ei$?@rT1#$GfX{a_-y$vo`lMD|`x;+wr?UZK-d-^kC?BQoc|u4=1s|niB6zVqcqWPbRTzV~)8aiM?nU(K~`|i%skqp*#{B#Ee|y zVV`pB3hTFKcB^9TF|$79iXY7Efa!Dc1|JI8r!al048s!Y{U+J$qGM1^0*b9So_&O>8OJ_X8X0@vWW6&E z%h{)6_r^R5OZ2Kihn4waXXE5Y?8x6*l~-eJ z*IAWMV{O-16@Oej@ZA^}4^+R4`w?ZcHGX7gyz*fDNHt#B6+fycUU?%PJ4T|^sD7?h z>exAqoihsjI`61kttcqlO;+}SVpWlt0&;$1vV9xJt~H#-v{3xQn6JxzKj(rpZ z&w#~-wE8R)@3OK7V{tI$!`P%R;*_i7;t~D&xOjxy8uxd^dDlAPTdUG-4Ko;JemsM^ zzA1^fDJ*Bi-3lwjO@@^tVQ5-Fb>gF4W?l!S*Dgn@w z$Uwm;>tse+dtjf%3vJkeIJPheJ3Zxze~M$zD#w0lVc#n8uUObars2CRNMO{X7WOCe zD7+caSUHZ_|#PsS@JOpo7{pq$%jVeHP>VGky--&w6&64>3=yAc8? z=!bMiuqBKwvA(LXi*DqueYcHOj+q{Rj!A_1k1?BJMmon%yH9z?3`YJB>|dKIVE&i+ctOK(hEd~e zt50Fsym?ai;SBjb*?EV;R@P~>4riw^cA68|YYc9wjHNoaDeO{%n*%rHoe!gi7~Ezs zHp7W}d4=vaf}y`S*NNvHu0{3M_>qMTQfEj({}Ohak6QrF=M;8>!43Cy^PK-s*o_9a zBF2gmZZfe;+K@{FpAzmju}k07+>+Q@#?~hM-NZKXDognt$&h{gCA?{3Ta?duP~rX< z%#SHaPnlSo@`~swKPaqIspM%{)6?o%B^m|r8O;f^&-)|Mxk%E~&pzk! zmlXDl?mx`6P4Su7SIR=l1KL-@3Gp{8$_244TE{K&mdK_tHZ5^A%c|jN_P1kzn3cGJ zxrG042=*)|5hLd!I0ziWSw4<=Ctjzp3v{>W_B8@RI-73=PZv||;b|o?s(({&F|pf} zC&&?bPQr|tt+}w>#9mgOlx`zfIZjYmKSrI=>*)xFYF2OUQP?^ipEWD?Vm1#l1o6&b zPB^C;sQ#p&&w?MxE8}7pN60Vz?MuV{$Dv@~LeqK6WPK<`dEfLp$l_U`bP`!EW4ZBO zw$Mqq$12Iw6W6l~tiM;3mGQSI%7*ygD9W{@%|S#F>7N-tGmL+vVr5yVu<^^nZlE#A zdYo>jy7dtdU=+F5j8UXjPk$tjnt+N|FtUMpcoup7Q!P{2M7!XH!E9sCBkuD`(hVl| zmI59k55w79C{#$=f^J#1tJ{bssu0}VFzYsCGv5|2}nHz|}~>U$tKT^4_z!Zwi$ z@!&Ycc6R&{HaEUrIY0gqr3gZtAQygw7lTrob&igg+@{4ZRc6QMhuzYY)Xew?O^WMO z#;OuFo7mcs7~{4mV0OC_f452bSc(6gN%>Nlry&O|r?i@_55}?`W;bJ-W0IbLcoT!D z=sOePe19y&LZT16J=!uRoEGd>OHzdWc*~3^`#HKjdhDS5vgeS@b0+ImX60p5hDMU$ z1$zuK*JCl(z8Ga^4CvQ+0XZtwy4fTVb6&1sY=z~0^q188N?OmbEYQ;?+Rv~Q>UJ7) zUPRCrmBc?$c~T$3^Jh-h!$jMOe%y&ZE~|vm5muxdX{Yd`V^5EWZ?sdIMBd#>5^iig zLZwc`o`65xRv;A!+>SC}UIo6 zYeUVm=AC+Ak7!tfOUXHfbD>FfDVjBmjO0I+}RA+1GtqS z`m$|+MG&cW0JZ|Q0n!-K0q8^DrULe$|E-u0Ixw!@1=xBXVQ`M6(gk!E zBVEAE5{Pnu&FIfM09&gV8;9%Soe*HF0DHWQS@V(3IviF8>^XfggY^&qMMf_z$R?BlrSiF>d(3 z2>gJVuK_=x?{x^efbDNU{vdbUXQ%)Sy@g?r+}{P?0p0r`rUBYIK{vSr?gk9KkI4sM z>j&uC$o)g)1JM34(nGDVe}Z}i*baCWpzl+NuYj2<>KPzT1hFE(P&d?GlKbc2FJQ|T zNSFNojdTH`6e|X$pG*uss#@13JzG4*=bOY$?oV zAq-&M1mGk)paak`5$TaTpc~K(wk9%_J;0kbQ(71^7oo8`Ow8hlVX^jDW+_TwaXrJB z#h1jC){)F&9|L$ii)}f9SvpT=rpy#(YDr-UMX5{)oyko0vsj|zEM{uPUl0D=6IiTm zB2(NGnW+_jwno5T`YCn2nZnd4t_i&&g}F=$@QOr3x|i<#M0h;U0$H;@rv-&>vXp&=(Z7z^w%|wE)jONb_E% zwBO4xLt$~9_c4p@0T$QuAhWi#B1|ib>G=!t@>dqy{8!NQ2(#M%20YuDxph0@ZD%p9 zJ6KHfV=Tt^IE$%!g2fa)iNB}t_cZL!z&*qgns+jD=d(<)?`CGU2kioVNHh8mhCaiG z{=)tuGdKSed1_~h?PX?d$6xa+@Ou?}Lcf7NLup38fj&d&e1lnA-(*VYP2hZ!nVLJ8 zDf2C6V*g^M5dNCqM!k5Knb41z*?WxjyvNK%=u1MK;0^kWBJ>w^AF&wrG3fpn>3xDc zegd9-$`tewrdIsfyO^n^3u$z*xXjO(;{J?TTDzIG=L;6gzG4>l*DSv0Yi73n2j*{> z(((<9aevE(wS0>>-?A9{em2aF{-EbOq|wV_ZQnCf5&l}gM|wXnYtfHP>G_exgnnWs z8#G)y{>$QR2UvV3{)+k#4ob{B``EBdreJo2HeykR6zAtsSexx8u)& z5kE6oiEW2u(|o)V(|LlT^qim!vz@36bDXHewVtGy947-e{(A7|u!EMdiqbh&F}qJu zlnV4>C`FnU^PjQ~mA%QP*b?Kc%^+bg_Y?H+?mQkN?zX$p?rlSfM{z^%;AkU$!z3P- zHNr;50r2Jz6)(kC*6H+&9!k8y@;8|K0Q5Zwd1pxZj-s#87;gZ)OO5`ALC=Xt7!MPz zL+9f#=(uSp_y)-La$~#!{AU^c2cw60eb66dzJY$7Wx#IuIjSP-M3vL{5w|( zn7&fLtrY@VB~)ejwn`aJLM^GSm)ow z!l@F@me48TVhJyjuw25e68=HL2PAw{!jOc|OZb|E?@6di__c&TNfivlk0jhlF)LV<>ct(wdnr=pZYZiex)8 zS!DnTwD5Kr-YwJdMFwL&+3s652@l6L0@lfJJ@SCH7A_?HGa)b1&!_ooP~vNs{&ky0 zxK^2dlkC5GCA7=%P6_KIY?J;i5;6%p2FZs=rT;fNAN@lJm*{*97EW{bX@*0Oh&65-k+AFTtX5a z3Sa9W_#AQ^%%sc3_+ie&e>4xHpGFUxDe|SYSK*6xCgZyw7cT7*9*Q2j#HZoWU`WIF zgm4K-7)?*}AoOT>?6Uu9Zx=3@d-303bZPOl{AQjVS&o{&p8t!4i(BH2rmtv_`qL`u zD#9F)ew`8yMwdoU^C0x}X!tOnq+h#)(e!l6_&WU}I7`SC`TRUbz>!h*pQO9yZg&Yy_Im|esFMG_8{Pc47GLGmZCVBV+Ehq*QVXswNYgVEPM2z??J z)&r!xOKV^BqqQ&k^+?Dq_-OPU7=%8}zbDGyCh=?jHky6l7tMby92+hCGbQv%$Rpv? zF7Y*w72z-s=U#*HRg0&k?~v(h?i#eo`9S6=0-sO9kc3)%-yrlC$@sJuNIzN&Tjvw zQDI*^2PTA}ukMz$jQqJP{O8Ef$uPlImrQucO%EfmC6j#-V9Lm0IQG)4w6rTNo%ots znw7_!Ip#ud+T^OTnw8!P4?mkeH!Wv1^L0WqIsYkID7wU$HhBeZ3-FY2t&N9_@Sd`Q z-%+eQ*!9GmxelJdup@}cNz*1eeUYTev}=cuwtJwY4-xTTNl%s8J_HJ$Z@N-G0Aq)U z;$T#SFLdH+=%5!mbEY<|ttp`r=rBdFVTjSERRVVz>KJZWO{YtybQ{*!Au|Ih|qu zAvI*g+cxZxfny%LDIlgjI^xh;0Al*|78EksTiQg9!=w+f&H>TZ=%@pAt_asey=Ti- zh4-NyN=t_crYB^+qq|S$>y7L>m$=Xy(kQf|f5Tl}XK*;;%t14abVb+wf>==dX|wpj)W&2M2>4v=J+E=Fi^+4WHs(WPM)gMGNeW$Nb$+4cSINtgO-CgLqUre z4F}20ArK6Ol*1_^Qp_Uifhejd{tOY-Kvb0$yD`3~dOd5=OmfI7t%85FBd%%ncU#lX z&9kzp$%CtO4Yjx;gxg-8)edyY&_*J&auftDC~m-wT~A5HD$mNs;`&lv&EW`T3H5cb z;KpJptc@!?D=Kjvte*MW!w5)rC1GueE8@QPqD$M7wQe-(E|qTw6p+Z|Ma%*3b^BU{ z5PO4G2QwZ}u~C(wkBaC(twpr`?b6jcedB>P_x_jSl7 zErS?t`TIKX8t4EF$PfwQ`X^G_0Zrf{Ov|=7^mR|Q>_GRfJSIAOShPILZYw;t;9k+7 zB39)6bD@C4*Bd!PhsUd*rPsqp8fQcfFc?NsJBY@O$chq4E7VkQRU5vJgdyS5>XcEp zK@>)*oxs-_#mhmc(=ZzJ2Mluk-lv)mhsH?#ErSrb1MmF_ZXOnD9F$So zNI>q9=@apV^cwy%k{?uk$^cDOw3LIFZoh^+NO835t$PSq!)<>MY@Pi&gdt!xbQgo6 z?blmLw1=he$JZO(&qnI$@O69_RWu8vvpuCJpT%EikegL{(|6_unk1gNMMS)tDjl{vERSA0o!X=>3O= z@rZi=A)+2i@4r>{{zF6gi}n6t`9^jQvi=TIIb?xGB|r4ue`vYu*G!{op(Zhe+wKth z>2S*)3|qhEJ_M|W&R`I<(Xy(}g2U_m#k$v$!l+JP_7!68(u>EEg>Vu*O*!JZ3(YGo zZ8g#SWN8f)r<8gYmo+q2m*F7$r8`U!E6-GARzrbRZM_Gpon=^guCA`FapkQ@%gG@> zZ$*9W8jo=y8S8mK6N*BEwF0qPN~$_)Rt_0YTO=O2^qA2IQp%xe$(ysdpwQvrt6@mB zJu-B9Wo-i|dpcJ4uxKer>yUvklBQMFu7q+uz5t#*nHKMe_D(&3Sp4>Q=*GUMuD-Im zET@`6s8Jz|SsSWCz3IhuPyka(iE^AhzX;9-Qdps7r1zIVSy|HnZ81638l66~s34*p z(UM|VKjDz`USCF9C331czc>E^7&GdilL2L2vMPt;Z2kpsW~b4f$}|s^_|{(lpIDJ) zm$v)@xQYwIv~7#TH>4dAES}du4-{n)N)aiCtw)YU%h--1KvC5onp7w8JyxT{Mww=b0aNb)-qRav!b$mJj4fPtxmp=-j_}lb*eU2Lu^)pLSh}Lp>nX;VQc_#f zP>CfiWSx%^A|K5I$Dc}CU2?D%hnl!`;2_gVz`^>QODIT7M34;82cfqgxC6@3II~TU z7G9dy{91({?V*;{S69{)d&@XA9Wi_z4{P-320iuO8cM%YIIdV%R$ohYl}E<1ez8Z> zC*}z+{v$w-h`2~V~?!o>r{a4Tx78a(A#A!V0#kf%`016wEAN~?6cD(rmJ zrqDgY&h@Fm+AFNUNS?k}MAjI-Sy-^}$}Vk|=E-E)N|tgxg%)mw(7?x%ZQQ1zZ{;=! z12x`qin>GC8%5X*m8;Go-*#c;SUR{l z%)U-;k7AK39P1%hQv4oih3rVyUT$66fV_~^XGPkp8*0KhHw%}rLo+#O94F})KgGi= z%;LF_&A7Du{CX0`>e{iRqqAUoLuGYoU0piqs`C`DZkpbR3NwAGEAJAF8(8k%7&`+9&MPaK>|VRJ&{ODP zH^(kvPfuj#G{@ZH)zHqDRkgAfdO-LBJ^Ov^0jxX6`z<&>7P7t9&7g}4&hV#{(WOG_)+ z8|En`ohYH4SmmUJM0 zM&VS?+O_VrBxc?ai;TEIFX$W+eISvjJYYdJlC@|k>9HUZ@+|^)T5xHVh3L~$h)?HI ze09Vy>TN2$#ctGoG1^s^VU?8rHnn11A{PtFqb6<5^5L z``kK}{Usiv3Y5yB3cnIRm%SNJ4TcsH^xWozGr+dRrw#7;U_1(`ib|y#^@4g^oKdNS zx-n8wms7=Fgnwr>{M+mBX|1QqgJeGj&ec#shIn4Mlb(SX<9ZeQDuIMGO%d=fQLOF5 z#geVm<9P2;M%{kdtUHzczxK`qFp{F|`@Pw4UjeznWjN%JV`p-0q8zhF7MESJ!v;b9 z)S202vKz=Ti_gq zSNF{HB+>8nd$)JHGrz9-*VWb4)zx*(obSB1d4qG;77Ls=wkVk-w{B4)N%C7zY==0u ztEj9{g*w0Qb~(>)p(f^|h`+dnr2orgd3ZIa&l*mLHAtT`x8(Hs(w1bnmd~#xm+IQo ztj{``=5cL!-cV;JJrwTWvX&}?va^- zr~~xCRyvmYhOshetk%3B^!d%XV=InzNXvI`MRB5`<4*1~bQtBs#AhgW-1TodF6Rqd z^R$W6;nKv`ltpQ9yEFP(4(_QGNKV0zq-hStbGAu3*Y4!ddOJVdrq=oKHg(QT+tfQh z+or*}W1B|j?rr8c4{X!qJhTnP^;C`_A9A0g!w4Vpe3j^mP6`e1E$|ADF1H?@#yUw;_*DE2bZr8Y|rJ#L)&xL&PU07f=no0 zzfkc~$1!!x->uF(&dH(krIXu%BIi851C<(_(S~Et+OJmGaMthOriAnQ4iYQp;T_51%k0zQ%v$F!JIqt*=S+|*`B&JL z+^b}`S=70V7OlEs$6DvA9qXKHcdU1A*s;O6WyeM*zvCR|?j4()2g#wa*H|&~y7FBe z^KbC);Wzm>u8yN`@$cN*d>o@AyZt7){VutE#!hw4mv*Apot+w-3wLUCF5792a|J!q zuiA-Rr5T$jsOp&iC;uLPhmYgxIQlOC&b`OSF?Aft(RlT$6&=(?h8}D zsu7qPrpd^8lpG#^r;4TTD(jSk2GXl&#TB0OmHMgCmHNfbJ$Rvh-7LynG*Ku8ULD6yr#T5`BCFiKOM72-?OfHQ>pZ&) z2OjXkE_7d#naV;>??SEjl&4nMTIbxjYrS*pu2kyo+UVS~>l|m@u1(JRT`7X+D#dwx9Qz6%^IzrT=-2o- zLeqKDk>>EE<2Tg#OUdnxqAm6m&-d*Xv(K$@e!UyDxO;ElHEW#T?pEhNyT*B9xB7t7 zYn*3xqcC00evf^NkGT;(=G8IxZ3+$UJ5H&V{{G&+rJbFeSYOzkZ^QF;C%f0PEuKSo zWcMX9@$vV_VEiX4N;k3beCH>-FK}LeKNo<7TB!5L?i4S#QtJG3cc+Sa<)dn<*8Ff@ z-@T9pu6REM^Hz4xwsqP23%0H$Tlw2mlx~|jgWeCETjE`3wP9X`O%-2Xn#IM}@GLIAE~FedLMD_+9^hl{K@Oh|`_;3S zI-_jP=V*bvU(Bj??wwWV{AyOc^BX!pwoWO2&By$h`u#B5obS9dYfidjRl2vGE5YyV zvB0@H(j+gf&R9e}N6y}Zo+G#H;mlk{Z7Q9qk zaC(G-^=A&&*mHaweqQ}n$I-v=@6i`HvUQXdD45?V1hIpIefpl1CVyrBxX8aP!fAGA zgwyPPaya)QJ20V+<9}mk<~zTSWV2-aj*4G`qw^A})Q>kvGX8fZQO6w3=Sz;fM8b`l z+teiQRB)h)bCq+`p6<_|?>XQ3<(@I;(!FY(SNEn^kcK>dg}tQAXuq`dIIrwUe)>;e zuouPmO;$Cl$E#}Z?Qy=cS1jtBPwtMr2Hc)mrI2&y7TjbMxK}&TV_st(RGi`f!hv+k1HzefC>9)O#W|_@6}{ z=HF!xhfk}f<0wx+T*cvXJ^Jz9EO+so4^R-v7AejYyS+m_$t<;R^bQVCvOm8!ztfoK zx}3kUKV+X0U#X(bh5OX1c4P<(5ozgi&ZPG}c0;>q7+DeQa$ebo!|Jow<-D~IhuUw@ zlg`uoI#ufZMm znFpMk_g%=1nzxa`$k*5bF<8H^dRH279w(zb8BtQR1$l^_wVz7vc|(JXGnwqs)MU4A zKT6|PULoVz1DuWKJKx`LZnv|3zh#o6oYVF{l1u3Q+vxZYn&yk^N%!o>jsFRCu?se@ zJ%F;#*V)h9g&Zhusr=J^l(VS?#`(v76p0I~XUx@$qGg=d=)?rsqth4japV%xRJS^} z?r*ufsCTf>x#|EFG3SN@Fe^Kr9FHI1RApA{=sWB7r_@@%Ke=@&$632G;Jmi~e9q_t z&bkBWncRUCT2G;!V0%@qLp@^kvI8v7oh!)N@MS81>X^Hne@}dmg>Ag{$UO(lrR5E{ znf+<9I`Us^oL9$jb4A&RYj?0rnm#a_f|0J(*DQUw_~NXWfCZTF&@k z8U$iN?L4&38Hx;d?YZX;tan~Kuzb5wSogj{FWMZBI}YS{+;bpZMT!B%{}GAd6UyWn zG8v_FWUh`A%DmiraoA9tesH<>;;=5AeK3XaQ4Z(mV|*NboZ>-mTQ48H!1?AOu{u3R zzizeecN+$LpWDNFcQeg_M~{m_HRJq4=rK{iiw~hZMnRzC?<7I5AuFq#AG665Dxgm) zb2>UR+nn`>9EYAm&c$;UIgib8XeRd;X(ihS8@&Bl{?_PJ=~nC|0?RIb5AsQ)i5VlTU|Y_T%J;IzWZZ^FV>azFdvImSC72JQJkP7Rb#c)x{i|>E!xiU)4}uJ^BhKIQL?`qM=9=pOLVtFaEV8%e~6ibwnM9UsKumbv_Qi z!N+lR%)QA@<5tn_PtIQ(%(ebrZLF^Tg|cNdcC|R?HyX{?EzaAG^x*c{3Qn*;c%;F0 z&ZR`ny+z@4#`su0p^opWg(>|KQ&v>Nr71s%?9&tMl+2&nk${8;1wp{;wQC zxB9&sk-pV=c}~oE=x}c#bY@%%p);FM2))_l%ruhS`5vvaeN9s#`#N_WUR^!LZrVlB z%T0C8IkX=4JHcUj>j-~QbWxlhy!Vk?RWaxD&9%;rA9AXuRA{`{Jo!T7%n#AUS+@}< z{I57jR-y5u4_SrA#9`Idk13l)3c~k3R8fKY)5G23XY|8iik}A#=i=udA2u#maJIYt z@WS;AwsHI6T>Rt@wTqwg4~sch`Y&9tBVVQ5NvWde^036FfGes7eX%` zZWcoC9YMDe*`lh1vg;{|EBQgC$H&!qT2h|ksE+w3C=`0j?_N_Ajn>rUXN=9rRZq;!Z#q14c$3lU@vVoe zA|o>*qcbLEj8x@kj@P78d76knI?Cr#M1+w@Diw{!vx!6^QWJ^LPx_TgWpgtM|0cE^ ztC^_IZ(b9r$!#(^qlW%RCT5OqJW@5X`RK;Oo98x5#K&fk&f02fhBqIcF-$HE&ln+( z}TMSplN2@pp*{u6_ zBMM=zI+`7uk)JV4!gyqS#yCZe|K+N3vC7-l*2(oKV~X7Df|?6(sI_L?jg*nK?ps6NzY)e2OF}X(ALoJ{74+io+g{=c-02WDak?olA41t6%!? zPm4F1M{7{boZL zyLy9J&;5jWI}dtc(bHV0ojP%-Gy6KOQ0X%<|+ZVlF@KvdWLt{;Z~QVS&E>!t6AsUxaqvnA!gb18vM4{v7o4 zp%-Rz){oH68#C)i7-(Z=edGXz&RXb&**uK@ap-S=URd;_+IiQ1liB|X=wF0hSo9N? z-elH~e^6W4yG5s;u;?TFgEs!d@i&?E&OvNS>#jgA%tjplJpZ7L|FGU<*5?ma#?BD* z!fc=Q|7TNeE_#zo<)6=zdmUP;j%O2d{<+)AKfERiZ@0sLVevnuObg*RnZuuh{w3&z z!|KPO-+Lj_A1wO3<-f`7{|K$Sz}tN2g+*`X7fyeZS)XXq`ZdrCi$1E2-Rozvia+!h zLZ1VR-t;fX|7f%J;6dnxMIW*JH(7-r`WGbpV9}TJpVz?QZOB} z<727&=Vr2R-amH|^Zj#=b^q|1LA ze<|Syi#}!fZ!-Ho0sBuwFC12%I81x+4~aim^ij)yliB|q>@QlRub*&Oy;?Jd{J02u zVbLF9`7it++2{Pv>s9gg6qwho;%x%V>sRqsMeFCW{fk59|Lw`Xng1iiLHU0I@js5t zIsXf@ubh9f>^W`Bt#mM%(~sBE;_W)kGYe_lh(O#kX7!Rens_~(&1r@yd- zKf-~djhWLw!ay4{r(YKOW1tsibH08xwDZQydXrh7gFXelu;?Q!rcLw_MFsyypzniT zIIKPo{i)Cki#}@j$Kf}b!#@H2dC&`o)kosmgUg^77CnsJ_?yiBr*Ps2&M840{T_Z3yVHw z`EN4&KMMUd5`J)4ea$@W!5^R(7Jb(8-(>bb2mQ{iI{k!2pR@EPtMsGyI^J5Lcfeuw zIoLl#{I5{&AVb|E{)5GT>MX7{=2kkGTq^%fkb7qS%z9oUlDF%i7Z!cCN}G$`WY+WAlDzFta{;US7cBbtM%rBTCaduOA8mdN^s-Ka z=xemG8-J5o?|f2Q=!ITb^bt#MGV3GIp9Q_J=uQ7x>7>c5=QTBXyBd07(dTArb7t0? z%=*!!HeUz5u;@p%u^WGrS)WR23vWU%EP6A%?7zvZ&qBZ3GJXAoMQ{4YdXrfnNo)V- zKrbwM)4w49Wu2Nf=!Hce(ayW^H<|rU!2dqzg+*_+H?sdGvp(LgJ-7gRVbMpe>t{0S zvz=Q10`$V7kMIxL_z(MUGV6KGRo=F2)9EkF_V=MZV%mzue?MUl! za{38})n}nU272MJ`aJaKLN6RvUvr9%|2@zPht;Q`ucGG$$6q+CeiZt{pcf9ScRs7b zzXp2Yu=)h_*F!HHR?q9G^7c3Ag~RG6px^&^+<)M(`Zx`A<83+g!eRBp&|e0);vd-L{&Ku}p!F%}g+*_Mx0Ozs%z9bh?mFm&ML(jQcl|e+^|CHr zHPxfJ{1g`bsHHcV^|Ef;@z4v4K5pqvW_{{xZQ)zc3y0N@K>r-{!lF;{58C)oD;-Q` z|DAK#)NvM5J)Gk&%s=;`JxV)o%&a$=^|IdFbDd32 zr8k-N;|TvT&@P{D<`^gIS;dvNCpNd|HR! zWjY;39~svAA?StK8`hiY#r~Vj{%4`T6nbINN44{A_)TX02=qUJURd-gtNbyU_0EOb zgBkQ5rt*(4o#yaIt6@%mIQ%BFK6;5ZUkSai_#fdPwDBL-o6LGz3teq@LNCnr+5b5I zppE~q-elIxdWN%7I{dME`Oue8<+i$2Of zXyZQ|ev?^0{C#EY+`3%rh1tGJe=Gk)3}*f454HZGcC8l{{e(7l{WqEQvQFjR9a=9e zdh>o!;Wyl4y{vE9552JH9pFIS#L7yWgY3?Krbx%A5?ku!mpA2h&EEk zniIUdpT%j5e_-*;7H@r`*S$|#e67XnEIxwT#mw-XM$F$!pU3yluYp${#WAJ+8Q|}L zZvyAR_kp)xtnEJmPJsUgPJw6iX#a-6+k+ni?+4yziFWTp;BN5I;7yO#<|*(Q-~sS= zz-NJf2L1+EU37}yHQ?&KwqZIw=cVbPCSdXrf{@(XR@B{4{pelF*r{5r<1ftiIse49aY1h|>nEVE zguVm;9()dZ;jsDy z^!GzAEc&Pwev{e%9Q1#HURd-amfmF6M;_J|HvWu`zp&`zmfmF6r=b5J^unUgS$dOM zKMwsM^ul5FsrA}}+r)pc=(CppCbR!zu>V`=g~RIOztJB26?$RO$N2|s{HK);CbR#! z-zsA#M&B*@eO;LC^YbUcKWO7WtT$PO{}E;Etc89IEc&daH<|VE$F%-V=!Hc;Xz7Kw z`>f+cv{W5GY4LY0zLl8Yw?2aU{W@?5_-XJO@XO$H!5f{b^&{Y&!8d>p1m6oj9Q-8s z81MwR16+*+T@BtD{3Y-~;LE{r@F@6L@GrqD!H*D|=@a>!mrtdPwxH{+6A8m2F z#cPSV{+{pE7w{7Bz2Jwy&x6@ksrvlpr+Mr1yAcP~=kt$~L$n=7=A55}>8HxiBm9Fl z{?kealMf+IJ)w-9^H0-yVYaXC2W{-?69%(>6#Cnt7Y?g;p41M$1ii56bB2E$ev{e% z9Q4hf*Wniqt9O2{Jva?|VbP~7|4mlu2mQs+r@*2&%Rdgk@Q=uT1a9-6*Dc=sbbbHj zkiT~)Htn}te6_{5S zBKV3qy&_L5V`nM!!fc=Q5pC@1O=f)@`ZJ&x7JZGSH<|Sb=x>5vSoC>IZ!+s^#&5ABvWh?S zUxj`QEcyxlK^y;Jy~(W4zo3krbr>F*2EDN8Dh1bxfVZeF~_=8eQ4kFy!D~Oh=b}wqc7`=dMcUo{UprptNf^q z-TX9cFz2WEEBeHh&_O}OZW!s& zK;~SY3A6iLo~8H)ZTyGR(_~K1iMN!o^D*d!**@#zRoYziCbK^JPi=k@^unT#@DJKV zZ!+r>?`ZRjpcfYXm^OCxCbK^BuD0+~=!HdZ`q#?#*?>0w!}|Dp3Z18+7iM#gKOVoz zKI=_p{jf5jRkhAX`h!J3qMdj3CbK>|gKg4$63`2aKB7#G^fQ?C3FwzVFD&}FcHZ^h zWEK7@Z6Pb+2a7&x_}5A&O=f)__OFCqSoGz>KMwsp&+ms|FqJ+`Vxz6*5v3$r<2{|N28F$d|# zp+6FOVbPn{i}fb6|Iy9aCOu!F7Z&}PGA;OTGV7f!wElb03ya?LFUbD{^p8R>Ec%G$ zzsc->7W(Q7b@~a5e%#WV%=*NZY}0X?p%)gt8D9>+$*hmyg|!QMVbSN5>xJ|)ne`*k zUjn_b=*y+w1oXdxURd-I%YT#E|C+7YCd~(PkxoBh(bp)`LikNq@rS+%`UqI`DNAoM z>vOQ*552JH2I>Se$XEe{Rmj}2}^G>>nC8pA9`WY z*I0U!S?}z?2gkV-dSTJWm1*(t$DtpEUO22i2mM{p3yVHt`ERmHKj@!=egrIfGrd~r zq{%A%V88kjef=jaebn;bWY$M^WS<;o59o!(f5&=%GFj<&*7^%X4;KBfHg@k{lUYBy zi?;9@^un&*@_*Q1*2j0(`URIF|AR#zHSD+2Nt0Qhou&2bpcfW>jW%|}Z!+t1duR)P zfL_?uTmIJ=%=+OywZ8o_9e!cao7bDeZ!+u0p}!G&VbQ15)h*U(}Lb)*5_bhsXIKrbx%8q0r^+5a)<`=J*W{TTnCjsLW=eS=v)vX4G- zF7(2DmY-kd^=7@vtRMY=*1rh7u;^3d25-!)PchKO%=(&r`IqB-_?tTYh50P|pQ4>N z<{*6*`m>=I7X9W*UC;~vmF!1g#(&@RQ)rf?ov>gPk@`8OOO2ye0T2;0X9Wa6R}*@JGOZ z2Y(V=^=%!WEO-a-x#0c5Bj68%ZveM~p9iPGHOJ0ycpC&qz+V7I!IyyJ;2(k$;5;}5 zUJpJ6{AcjR;J3h6fw#Ou$M0w0J-`owYl$N&r24SL;!j(AlEvp(e7VJcv-pTBz5ad5 z;?G!ouf;Pcm>qn;W{+>!;XB^;%RPztd(1kk{W6F9?|3rjqC}XT=KI%tUh(~Fvbz5d z(193$z6LD%QG}0~^(M1E5Bpa@FD!bqykxz}tRI8^Ht2;#pWq*~@gLTk%=*ZI+QK8y z3-j51XgB=}@;?gw-=G&3eT3ZLjhXck2HKd}{{-|Ke^;l!Fq^ag5!!iU4$^0#p9Q_J z=u<4FP4p>61^>sOKOK7Eu=?mhY}0X`hF&e^19>So9w^!prd&zL@ND`TQjA z->bnB;Gcn=kLvLKig_EVulH(x2b}8D%*Rsq(;@%my`LJ0gYKsZ#HW+Y`F;{+_xXC} zXy=WYU5rJ`&aXW1trneN?$# z2*1g!Pe4Bcy|C!xmfmF64@3WV=!HdJW9fxkzfWO@8UMMAnDciH^6$0a2sjUpf*%IQ z!G8iLz^@aV?k~97%N#y4|DR9hvu5Pn`Qq;Z%Y5zcfD_FeTF0sTfwnL6nJ*^ZnjDq+ zDTiP#^GzOfjW(D0A1?#Te2(w?p>|K^YdjPz^DTZ{F?sw+<-Kzf%!SVe%lvj%6LWZE zzPdfH)$V6=TE7^a1D^yQ244Y|`F>sm%lth}Kho~Wd^~4^WqzICfn~m&gGb^27q$O6 zaO7;w_kv}9nwsmheVK1&keK7+P(Kmgeg|`z59WhE*7kESKN&3ZwcH1m`By%8J;MKG z?cO(u+5O0{<~zYM-^tUkFY}jt@CL1y`ANPCmia|q1j~FON8YII%lsc5V42V3Y-09b z=I3||<}%;Ljz7_QnLlF;Ec0Rf=_a^O`JT78Z`LgHRV?|bW|@EDQn1V?@fXE(|Iu~h z?dV(Bet~)1yvYaN>g6*n{=LN?{+ZYQMP8~773hgaqg*yt`Wr2}s< zVvfIzw>kwZ*To#)54L_#>!tnp0I;+NF91vXZV%Ymfi602p9jam7lEZcRnkY=Mk@y zrTuc>bvir|v`3x=miEQpf~EcN)4$U8rTuRJEbV!(f~9?K;jguQX>Yq297lW9O<-vc zdju@)TQkSBUfQeP50>_)Q^C@n^dMN;hjx94^;F-(`{${Ob1pvvmiNgTx{vsNl=|RD zz*4`v2rTurKZB+I)k62x)*BbrKRF&O>z{OiW&M-SfMxv?(aZV~qL=j}z6|@aeuTIu z>qm%Q);|!vtbZ^9y{v!mW3a4$Am%b3h?vWKAa}yP%m?xaSmpzH9xU^LybYH5K(_jg zP9K>MBm$QCKInz%n1mOJJD~r24lyJ~AK3PGFf2m%a_<@(6@!Qa5Xj31QiCF2L>ddc{~ z=U`vP555hKqk&|rM|Aqh_`wKR#t%w<&cVL$#bC*g(jQy$qx8p?`$hU=OL|FvY`K4= zKepVD(jQyWQ~F~|`bd9lNgwHtE$Jiuu_b+^KenWg^vAA4e5F6On}5+iaUskv27eqZ z{js~j(jWVCVCj!7>GeGHa(_$zM@cW~|9C$1(*IHNkMw_({3HDzrF@b0Ye{cuzZUnT z{aVsf+ONeuX}@;!Bif@Sy`}wH(p%cEC4Hs+TGChAuO)q@{rbCzpR`}!43_q5NpESt zmh_hPYe{cuzn1it_Ui}Wp0r;-1(x>fm%!3~UHz!e|I&WF6Ij}>KM0og>%+j(ew_eI z`*k~5+OJoErTtpULutQ08|Kn}E#;TAUtb1uX}^~8N7}gRypTlG$OnOuF2@~bpB)7JO%zfcpSX-t;RPQB9tVFN+|;G@KLD=*zY4w= zygAjQIsSM2f-`5wfR%vVestVYxkcA9}V8WN87&yoC7~k zy!FT1+0G9FCSA%6d_8-C0p1R%B+I?wHt)gc(SFNRg zT`gGpr)9v>-|ZXVkqi^vCH2=lCU&wyVL?U+f96 z^uMa9*7^wS9|evhJOf~vkLU`p%rEp1Smryb-blMA^9R*~Wj>%4Vjfq)b1nxvb2Za? z0{Vr`IU>2(YXNGz6CQeSQFz^?IHW^ZDBSoi@?#ISVu&Eqt=(&w^#WgsZ`_ z{=+k1S`)#+-dYP|&F<9oGKNBqT$=?pneNKnxEwIe@ zUbm%oFOt*dJz$yNX#_lRw>Do7mid=9-b(9bKBZ=`%$L*+midmpCH#PP?_uEwHE%>^ zmC8@+G&c!TIm_D#V3}{>BCyQgupTVqdpD)Bj@^^-x*r0|_}gxpu#X^_5Qn%X-aU z1Izl=KLpEq-gkjhlrFse9xUs1zb<+T7jN5cr^6%bQ6D5s>A+hHI1l%dU|GNXWU#D9 zeF-=L`#%E5!FP%|#!oyCmj3fwzE6io#-khpmi2F2#k@&}uNy4u!JY$_^>u#^jx=if ze+OqjqFX3b0nsIIaOmO}g%{L1FQSqdliL2fU|Fwfubs7fsgyQv0n7SgU0_+C>Re&?e-$_a zz7w2~ zWxdZqaOw^nzI(tje&bEB%=fs{Zdxz%ANGJ{{O1K=86Wsdu#7K!2`uCNcHdpQC*waq z3a)uohv#IljIaF`SjMY92A1)&?|@}I?_TfM?x)_+?kxbzc*{<(jNkksIEwVR1uXN$ z{S_?pzwI$gyDuH`+raTL?cW!{Iq+@Z(ZjU)D`F0wyN7l!0sabDzGwec7~j)3j%fSx zeLn)0@8vNuhy8hAS+Di$U|E0l&tT^WM{Uh}YWHP4+Q+~$zUvgQjMo|g4_D1l`t`z& z=C{EzzHZOGwEHsN>L{>`pBe-YV|>;%U>P6vC|JfTPKf(5?K2GZ1w@IPoRIxp5Q3B8JvAxhxgN98PED9 z=B?=a8{W4r1CQZ-Zd5USAI1CI{V-3!{-0o;!~5`iF#j0dPj}l_yPwDV=BL0>@G0OL zyw6>tn7)U?zguBGf%ngczzO*GJU9aXX78u{%i(=`EqEC5$%C_Se=DlDaQsv7{}bR5 zxOWve5A)Z-IfQ>D)q_|+0sFUr$B;f-d{CQ@BK*gKBj9g>Q{czIQRrV_uFg5{9;)-l zJK%ZXZ4YE~>VFD8P%&N4!?gVtm`9p4r(hm$Mt%bie@OFfUF|wzSnF%Sarn0gJOLgCkAQy%cHrKg2W$6o;9l?u{J#~PLijg3MBAT$`SIWg!haul z6!v$Ut?fr)zZ*P^@ZSiIgWm(E5Z>k}{6qMbFmFZupOC*g!4c%o)4;>XZ&xr=QYBFS zT?dx+mGj^n%zq6Y0Y3%KgI@&8`o#YPN8!7^X-d~lx9nYYE@ z1l>2h9Y;*TuhK+)!!+xQPba@?@v|21TdVZ0{i7{D(c&{L{-(u0wfGT>_pbB$*Jtt1 zE#9==YyYDbf79YWT6|E0SD&`{LW_TE@wSa#{X&aBWAUiPf3|q{IbQdcS$wX=>n+~8 z$*W&t@s}*V&*Dv+z4{|8US;uB7XRAfS1jJ*Ltgjyw)ilMmsz~h;%{60bBiChc(V_C z{hwv=Y>O9L{5gxSxA=LBXC3PGZ?58;dp{j-nRi({Z1GPm{-ecPQ$3L!cisP>#c_*I zvG`_-U$eOOaIgD)7GGoWUo76A%E1}#>RZ&$#o{K5kFa>5;zD_Lti_+w?8bk!Wqzh* zezC>hxA=C8AGY{8i~niyW^r$N?V;H%k7E`uuz0D($6NdNqo{AG(TviMsT zf6wA;EWXa-n=HQF;=3(gXYnHz|K8$1TKuxbZ(IDH#hcDm*T=mcH5Tt|am3>NEuL+0 zlf{QyywKv~EdI2`9Ts<6JZSM+i$8Diu*DZ!JYw-x7GH1i&n>>w;`26joUhP6O#4@9 zKcDum(S8B#7tww(?cbpN6521N{W97wr~R9>f1CCzX#XzlSJ6I7`*pPcnD*;wzk&9j z(0&u`Kc)Q^+Ha-(XSDyE_SNeN);uqkVJQx1fDX+P9*8 zYudM=y@vK}Y2S|a_tCyR?K{v;Tm0+f{)OxZbqBnW{@#q=dHUN<``utRFSkEffn9NH z9ciAW{W;p7r+q_S`1j|9h?6`jy(*pT8c428cXcPzy}f;d>A|kP-ht$@%nJIO=a_xHyx^9-Zka8z{ti zZgG?WS0#2Q%SCmD!)q5lN)`Grfb$GMyRv+aB7lP*)H!KQc3@uHW2_j{eL* z`72_{+PY2-9W3%{{!l;1XlZBXKxQy((vhw;vs!0?N;E?~^H&b_hRB(PIJFM-=rpe+ zAxQMmflNop+o0f?b#)i@4)%qOUd36Wjnc5`Z zh0MJ4KqlGVmt8w|V8DOt1zYdwTa~H6I!ZJ0BjA1r_CcRrmhorKQugWM&+i-Rwez!2 zvZSwf#loT9_9ao|fw|di_u6tP<*H(nQq@jMRW~VB{iIY4lTtNKN;PLvs-{X*NB4CM zb!U<*s9f#pP4}0*z6JY>Ghr?3ti{>-==8u~#(rG;avuCB$d2#pKe0mD!mi#9H!nuZ zn1O)pS-kT^3ul1OXo~p+ElT0Db=>Rx;rY?B?kL-SvYBBe~A= z^#0kJrYgXR1Bpay<3LwWM>gA(?r&e2Os`tgIMmfU*jT@4$qBVBt6Eyx>XU2NCf2q! zB(;1MrBYviGTWDJ?;R{BR+9Rzgwp|;$Lksl>i=R>e@RqEq--<`(S5h zOGhHeWN3MyBC{qN3v#KuZv~z2@&&LV)8D_mBhZhAPO9AvQSklVH}v&)WcpiH`J8JU zIBAI5Cjvq+r=4Gi0!7V~Co?PhDdVqG1|Sv|HPf%^%ISr@OmbdRMh1nvy5b_D>ldBDb{cDrmed!K5w4a#iC>|@mvlq9w)h6k`adkX_Bspp zSpa*jP@i#ZzP*L={_m zvTSi{+hAQX8H?2}PY-mpCv|3C)ZR+9npmuTWx78(*q`ni99Y!eLMaeUHq(QPUjq92 z`AkcTk)!IndbQ<&4)!ySS+9Tix5T zl<$3`a$tsG4nN=r(yPen6ZlKmsvb|u%;C1>q=^drczayOfgGNY0YB>1GUXJDV*RjBwdIAVefwS~ehLX<2zop(;8-L8uKAgxU~6 zu*#+YzpNWH$cFcJYvTOa+27YwI>!|*du^?L$HhbFSh`awN6n44Bw{EJ^)slfg7TAB zv8JqqGL-nWTzAzw3yv+>Hg5yRj1GZtcQ0iejNoVq9i2p=;`v zz8({ARwhTh4;U)UJNu^f`B1+)-QPQ{F7k_RU;nf^*{G|s#ojl?xPfo$-l3jkR(-i2 zFw0lh4nJv4J4WR~>@=o3I_Q-Z-~H*qq@S^E>fy-G(847XNfRExndI`W70Gt~@@-b4 z{nq%c*!Ri{YqjdVT1TP~juwANYoqLp-=gs@?mEfw_SsZ*heEspe5h4@I{Zq%RjTh% zr-^_xZ+1N>YK3nVyZ+#h1m89#Y!zPMW3^%HQx)IW1y9P}i&JsC++8^Zx69w3Q*fLv zq*3QBBo~*l7MG=9o_A4w74a=Cf9*V-?58g(S_;3bT>6;b5E<} zC+Ljt8Gh^{v}Bo$G3vH2m>93cUK7t7lGmtCzxb+;cGN- z%u^@V6@52ImJ8jMv0ZKVdLNL&9t%xU>G7O3f`iB)*bU~J%krU4et8Lch6X-DDMoOn z$#jNZgQzKRaxT?IlgYx{O)}Gy?M}C6tV>TpG~T4B{g`SW5?vjjBF+Tm7R#~PiXAgE z*i;?!=E*6$X1)T`!_kv!6-U&{w&YyQ`yxv9Eb~p2UwJ(~HddOnmXCC!ST5F8>Z0%7 zvK*{5Q3`k46df{BE`&>Uncnnr>OX@!MZF>ouIw5>acDRu_eH{2oVEp=@fWadLAQ)5 zLR=Fw|9eclnb~6$RKSXZ)>W4x#+%pdtmbvyTSQ9%D<1eVfk#}lfQ7tp$xWG&junvTGp=*l-c`W&9}= zu#@t!WC0t_Mc=(O#Uw7=ZBul}xIsg>RB-{@<}YB63%VIp;UFg7%;+%+E`|8ZF4ylq zk{(z|-7ZX|J{2ZXhY1s@H-w4Qo5sXOFH$cU)3ixraz)4Tv!dIubxCbN?|kMB=sv)p?!`O)6h{9%YX3=j2WIFhZ4Q8qTab-mT(EcJAz z590Lol>5pgJ2KrF{yM&lEH`s+h$OeEm~t-3&_9D}pWIQpr>C!X(URq{mKHV}T-o2Z zI@z7+T|qrDbYq8pw}JPyv1HP$wpQ#wBe&&uXPQ+CP%U+^Wf{My*LHQN%BV!MsjF{* zJ<2pvyhwtVp|-(Tj5;CEX9E>BeraCZuHr`~wPvv0moFq+Z8F)LSwqbP{arm7G+&6( za{8vF5|h3S_;hA$!&c85`7-v>H~q!!`c1#!llPNHn74+;wCc!i#5DNxNhr6vdIvK7 zgDpMN6EYgtL8CNUdgKyJK48sR`d&sCoVrO&-7$Jmr#nD*N0PdLuhlb5a!pHSNVoYz zQ>SxPSE;wY7Wd--a!)vsG*Wm)eIj+z8=@0j91cU0qGf1WnG1kDrrc9UxjuCY8oio&d^dnu(r2QK&pE%q_UxloXx1_ z>?8_iT?$tgfQF&Y?!MJ-5nV|LDCZ4Q%&7O1?%!0I$Bk|5QF?p-^%w0Bz#D~qz*cnDcwb)Y^;P3zQRO%Hf}u6eIakfNTubd@bCpc;CH1~Y5?ZtC-mL%lxVw25_FBS%_i-I_riT`X=2r>FOF>IIOd>QXxm zJME%Ag%o>Lk@3XH6{=nXEeTz((IIxt(29S8ksq4faw7Qg-(sXnb7oC@Cd>Vmd@Qv8(Icr)@9;Lo#X^)WZ^{-WEvlFNt}sq=C3sa& zQLDH!11|4zfe~HV8*5CqLKW_U$W!y!$!PIxI(8L}7ObszeGZl7T#qLuF70#aj9bR% zGU6gWSD4yXq(iFVcSl+mIUU{lZ*N+?8}zQ`KS|2RR54HX4|0FTPua=Z=h=p+ohD_=>jAi+%w$ar=0SG$1|rX2?x5!%7EDMhK}m_^cqaYgZ=cM z{;s8AoRk8q26{!X%9ZMvbgb>}Ap1kqQbBDF)ci1;#vS($%wC=DT``+SR?p^^4Sg8s zZ=cSM6hP`Bjkkm%aZ|5BN6w#*r+0FYKW0W9`%KJt zrpb3E7Ij*BCzo|ZXJ#EwpT=-`$14eyc|4K^_2l9=ccZVCw{vAfxi0wraZ?^IP$LI++uyRowcSB=E zhDK&nU8PUX)F(Lz_1Sb+|DwfYt8K38UDo0TTh}f<4pS9@9^C%nfu2NYWPg)`tNSoO zY(Rab(A`FT-||9s>TPQ_M?2Me{PQGwtd_r~cXutP$q49cafvZ&wLxiJYxdx?$E9?s*=R&>lPG2T;Z;8u?jnRA;FGdm|~` ztMu(4?SE1qGb0glusK-bt3k=Fk=8JEg0$i*8hviQKX7 zQ_oGt7~tj9f5d$i^4w*ysSPbI{WQ~;#Fz!;-#c4aWm7}Zy&Qf5G_qXL>}tL0LGPJ| zELyHE;7L7?w8XS9W`0M)d!*E;S-~~)NJ~rtg`Y9A%0e#A4FMsb`WLdxj~ebX9iJLo)<(}c~%f5<%A-IZar5V z^jAq$>g$ip>VaN(X2*hU=`Vo%aj|EppZ?b-sfS=YeMwPOu%0D7jrA=p?cMamS*5-l z$tS}pbaQqexcK@w6{pZq#`{TfDh|pwJI^=HsW>Phv_9ia#YqX{G&*T@I4XQ>r#6+M zoq(tkZjA2XI;VYUC+*HOg=fw{CUasR%|x|^-y@`Zr#eet##+oC7qQ8PRk=pr()d-S zzLS0|it*jFoIjD#Bn&On<4f}peo{@bZg;`xk>4WBA?ffO&%MiH@vnv$;4us=*LqI zHIfI^qlG)-^r^X0iaE^gHp4~A(Mf7ZQWIG$64x8G>upU*`p>~|^9xTlrwU&ksU=1F zEYoWbYo+20B>kx;o62?cd9}p*8olI}sOX1(VWoFgd2Q8E&UObUE2WZE0`Og6 zH4dl*K9+~3PzTv&PUS{3Ij7U@wt42uw)HYAWlGSSZnJ9Jr`8SWkcK7%&+B?Ke;H49 zcF}mQ^Z=R&rXj(N=eiYQt836F)qX_%sMMKN>Fyya zCAmwVKj(xU;>tH`xn?ERlvGOr@w8>BKxFzCQQ6WqpL!)F>AG{%(;UjJYJ39cXRaN2 zRjNg*ATnC0OkqQE5jjea#m4>&JL~NV{J%U+nz*~DKUF+~2XPac?}IHZu^8Q@GNN+P zQOmfsiLM?ed<%6XUyxmTteN*z4?A}jCU=~2P>T*zT0;tHfS;YwdBhTxk=EGlWeF9K`U>;C-#aVss{z|qb-r|N3hAWe zf1TVuk?UnKbJ2%R%HVYKr!Q6VkeH2AOH4{h?-jPMnQwHw8gK8$JWR=8rQ2k^l*9~7 zYf%^WVpn+Mn}QQM6!JA{fcgY3!Us&_Rz6NtqCbTSSyE;tY~lCyHmv6E~Vue4L0+lgNT)h9n1O57KzAQpw;? zPo5Va0|O-2ykwSd@pVxM>a<{YeyI1U8hmG(eP?2Rr(^!^FR?j(q2H}qf8Ux~f6uL2 zzmK*4kT&?8Z}dCg=y$%+?|h@*`9^;n8~t&d6S z{j+!|MevtxIEJ!d%B9M&l2@nf(){UgDatQTG2h?>RfkqFpGIHoK_k=bi5eK=YlPyy z12RX;bR?KJJ524-K-EIsNnvWvNKW^*Lg)fh@^p~daqzx${m}c+wyK-PTq6>h>2WPw z*sMoO$!A%(@KxfXo9?=1B@>p()6q@(VB<-}aFP{9$qSRHA56)|A}J)PN(7W8$bZ+?^P=*3F(ZwxgL zi2F&g%#_lej!SB~Uo~^8+rh-_5?%3Zz%(*q+UL}s);X|Ce}bQvrZLaGs!zuJO4eum zcPtqBuUh?)@)@Em->1v=H}Ff!b~Nxy%JwkuOUiaF@Jq_}Dez0mb|&yk%Jw4gOUia1 z@Joz-1OLls-45~8GTIM4PdCGgO`Wj1@tqE?P^)}3vb%f^gnJ#yx*D1eig{H1j zjVOhD7WlD4*Y85LCb^fXq(fE;`V+jt_Hu(zD#wR$ z2vICpMGWoSOPA;qv1T4SWYw)(8q}0{ER!LkWh=}rO@k{7bLAa*++>9MjZk+CpX3Vz z#AU}4X?s%5r4?XW!Yp5D29Re948Kk$Szh8U!%1%`C2tIG7kTT+ttLfvpD|N)M;=OE zFJ0%Y)39BxaQ3Q#!jF>KYtdJDQXRZ}DdKgbxMyW=e4MBE$f_KO-QZ6S8+Br zwdVLHbEbJ7{UgJu1DE~NxuQ`|BCx110^CX+Z;t>~Jj6Wy(BPMn{sc(Opt&Q@IhD|y zP}KdhmIGauzH9*@F$3>9Uc!Dc_e)5Mc~L@A!j}@N5*`&-S;K_Ply`4%f4b=!d$A@0 zBYPT!=l^#L_@aCccL!15A52zi_T@BQW6Viad9ahZ9qf6mjDD5+_6?}qd+tvETY`JD zF0)I8;A(%#wqh1|94gY`&PiqkKePPmomTOK_8|AoDcPGRqKCi z3ivQx=MQR~|D~+X|58>*&4k9+UjLPpF9?2K&g{{<@`hFHna%pozgwxU1b6 z%+v~w^o@fj_&45x?ZryUE|45EmZIT;iF29lh{p$y0NFCUpwuevdbxPd>tVXLw~s~# zQVtxT)iyXM@*?rZ>!CGtyO{l<9<3e-ZGRz6E;<>FTAjAMHd`*{jc2WUYK^T=sb2~a{J(Li^B)40Ky7iI+`gv!T zcy{ZSQF^+q41QISDFw_+?K#IsJ zUvOPxJgZoB{8x6v(GY81I!Q~2_kx2SY6T#;|$9(CfS9K&KbJUy~%tBMV z%TPNti?)$uM64A^T59*bj7YFhuuo;(oKH8DuR_5-VX+0n*GAL zD0h&)DQ%5=*A+1?tuAs~YqyN5y>+Dm8}W>SB%t+`N3;2>Wy^Yk8zDKs5SK1&s-!Zl-3-uI~m3Si#)CNTl2R>ysa6)?(mQm|MUuwF65n zeNTmJ6t2KQ=}_;JsPIc%gH1y=cbn-{yetV9v$6$fi=$g#D{tc#ozX%TtJNpI6^9deEs&L#SJQ}Cqh+}2ZZdv&_McUq@fDTJFB(4gK-Q=d4;*Bd9Gr1RNPV+^IM zho{e&ZiK|}kyR}iNWv*!q_0>J!NRtR8;wa~+zz!1+qenM{ft|<_-sOLWx*!iOiEV0 zxsy)tyut(NPFgRnpJ!vD@^5NW?W%yuT^eiTk1hH>4fCxfOo{E8k3X=ucH=lr&Q(&& zEFZdJvxSuFo`$Mrc@O-RHvi{Xe+|w5rNv)Aqu~|hd$v^&PIfKavnNcfxj1w)UM0(k zxT1;{4DqVMtq4+X*$;2ohN)5GM&4!6mm&fQnYNd~;8zxHS<&6R-kKHg)YF(11lrA;KGZt>Ponp-& zrNMYJCD+SOwlyW^t(&39d|4&GYX2K=x+#aF%$+x7_eeO&XLYQ+5?8BdzY0zHqo+ zs#Z#WMaR|S+nIlo99y$tK5Tw@x7xteTB9v)Vg@F*e4g|q@&C*Du;p`?`R-kGYQ7L3 zB+ESJ+^jeaPRVyotL@#o0G?Kb=HSV%O1>DBey~;&p)xLc;^W$NJy6jsoEF)YLi2l(YL1TOT{s+xMr0I%5@ zygZJ5f&7DZf@edqbwPu#{CRCElk3_(223V3og=LR=7p*CCr;3CFMsa+4;rEsG#*O^ zU*WZM3f=kiP^b;-9(BDN>+KEV>Jm%#ydiCP+!V6E{P+*mD9 z3=B}N2Pi1+MaKA%^~weXI=uZ~zQNbm diff --git a/hail/python/hail/docs/getting_started_developing.rst b/hail/python/hail/docs/getting_started_developing.rst index 23a3df0bfdd..56c6f157f26 100644 --- a/hail/python/hail/docs/getting_started_developing.rst +++ b/hail/python/hail/docs/getting_started_developing.rst @@ -6,9 +6,11 @@ Hail is an open-source project. We welcome contributions to the repository. Requirements ~~~~~~~~~~~~ -- `Java 8 or 11 JDK `_. - Note: it *must* be Java **8** or Java **11**. Hail does not support versions 9-10 or 12+ due to - our dependency on Spark. +- `Java 8 or 11 JDK `_ . If you have a Mac, you must use a + compatible architecture (``uname -m`` prints your architecture). Moreover, you *must* use Java + **8** or **11**. Hail does not support other versions because `Spark does not support other + versions + `__. - The Python and non-pip installation requirements in `Getting Started `_. Note: These instructions install the JRE but that is not necessary as the JDK should already diff --git a/hail/python/hail/docs/install/macosx.rst b/hail/python/hail/docs/install/macosx.rst index 8b9c8d6b9dc..32545ac4be4 100644 --- a/hail/python/hail/docs/install/macosx.rst +++ b/hail/python/hail/docs/install/macosx.rst @@ -2,15 +2,20 @@ Install Hail on Mac OS X ======================== -- Install Java 8 or Java 11. We recommend using a - `packaged installation from Azul `__ - (make sure the OS version and architecture match your system) or using `Homebrew `__: +- Install Java 8 or 11. We recommend using a `packaged installation from Azul + `__ + (make sure the OS version and architecture match your system) or using `Homebrew + `__: .. code-block:: brew tap homebrew/cask-versions brew install --cask temurin8 + You *must* pick a Java installation with a compatible architecture. If you have an Apple M1 or M2 + you must use an "arm64" Java, otherwise you must use an "x86_64" Java. You can check if you have + an M1 or M2 either in the "Apple Menu > About This Mac" or by running ``uname -m`` Terminal.app. + - Install Python 3.9 or later. We recommend `Miniconda `__. - Open Terminal.app and execute ``pip install hail``. If this command fails with a message about "Rust", please try this instead: ``pip install hail --only-binary=:all:``. - `Run your first Hail query! `__ diff --git a/hail/src/main/c/Makefile b/hail/src/main/c/Makefile index de3370cc67e..d750ca09ed5 100644 --- a/hail/src/main/c/Makefile +++ b/hail/src/main/c/Makefile @@ -5,6 +5,7 @@ MAKEFLAGS += --no-builtin-rules UNAME_S :=$(shell uname -s) UNAME_P :=$(shell uname -p) +UNAME_M :=$(shell uname -m) BUILD := build @@ -22,46 +23,12 @@ LIBSIMDPP := libsimdpp-2.1 CATCH_HEADER_LOCATION := ../resources/include/catch.hpp -# If you want to add a new cpp file, like foo.cpp, to the library, add foo to -# this list. Remember to rerun make prebuilt. -OBJECTS := \ - ibs \ - Decoder \ - Encoder \ - Logging \ - NativeCodeSuite \ - NativeLongFunc \ - NativeModule \ - NativePtr \ - NativeStatus \ - ObjectArray \ - PartitionIterators \ - Region \ - Upcalls \ - FS \ - -BUILD_OBJECTS := $(OBJECTS:%=$(BUILD)/%.o) - # before libsimdpp and catch.hpp are downloaded, clang -MG -MM will generate # unresolved dependencies .PHONY: simdpp/simd.h catch.hpp simdpp/simd.h: $(LIBSIMDPP) catch.hpp: $(CATCH_HEADER_LOCATION) -TEST_CPP := $(wildcard *_test.cpp) testutils/unit-tests.cpp -TEST_OBJECTS := $(foreach file,$(TEST_CPP),$(BUILD)/$(basename $(file)).o) - -ALL_CPP := $(shell find * -iname '*.cpp') -HEADER_DEPENDENCIES := $(ALL_CPP:%.cpp=build/%.d) --include $(HEADER_DEPENDENCIES) - -$(BUILD)/%.d: %.cpp - @mkdir -p $(@D) - $(CXX) $(CXXFLAGS) $< -MG -M -MF $@ -MT $(@:%.d=%.o) - -$(BUILD)/%.o: %.cpp - @mkdir -p $(@D) - $(CXX) -o $@ $(CXXFLAGS) -MD -MF $(@:%.o=%.d) -MT $@ -c $< ifndef JAVA_HOME TMP :=$(shell java -XshowSettings:properties -version 2>&1 | fgrep -i java.home) @@ -102,25 +69,6 @@ ifneq ($(WARNFLAGS),) CXXFLAGS := $(filter-out $(WARNFLAGS),$(CXXFLAGS)) endif -# If no inherited "-march=%", then use "-march=sandybridge" or "-march=corei7-avx" -# for ISA compatibility with MacBook Pro's since 2011 (also the earliest cpu with AVX). -# Fall back to "-march=native" if the compiler doesn't support either of those. - -ifeq ($(filter -march=%,$(CXXFLAGS)),) - FAIL_A :=$(shell cp /dev/null a.cpp; $(CXX) -march=sandybridge -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) - ifeq ($(FAIL_A),) - CXXFLAGS += -march=sandybridge - else - # g++-4.8.x accepts "-march=corei7-avx" but not "-march=sandybridge " - FAIL_B :=$(shell cp /dev/null a.cpp; $(CXX) -march=corei7-avx -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) - ifeq ($(FAIL_B),) - CXXFLAGS += -march=corei7-avx - else - CXXFLAGS += -march=native - endif - endif -endif - # Append to any inherited flags which survived filtering CXXFLAGS += $(HAIL_OPT_FLAGS) $(CXXSTD) -I$(LIBSIMDPP) -Wall -Wextra CXXFLAGS += -fPIC -ggdb -fno-strict-aliasing @@ -136,13 +84,126 @@ ifeq ($(UNAME_S),Linux) LIBBOOT := lib/linux-x86/libboot.so LIBHAIL := lib/linux-x86/libhail.so endif -endif -ifeq ($(UNAME_S),Darwin) +else ifeq ($(UNAME_S),Darwin) + BREW_PREFIX := $(shell brew --prefix) + ifneq ($(BREW_PREFIX),) + CXXFLAGS += -I$(BREW_PREFIX)/include + endif + LIBFLAGS += -dynamiclib -Wl,-undefined,dynamic_lookup - LIBBOOT := lib/darwin/libboot.dylib - LIBHAIL := lib/darwin/libhail.dylib + ifeq ($(UNAME_M),arm64) + LIBBOOT_X86_64 := lib/darwin/x86_64/libboot.dylib + LIBHAIL_X86_64 := lib/darwin/x86_64/libhail.dylib + LIBBOOT_ARM64 := lib/darwin/arm64/libboot.dylib + LIBHAIL_ARM64 := lib/darwin/arm64/libhail.dylib + LIBBOOT := lib/darwin/libboot.dylib + LIBHAIL := lib/darwin/libhail.dylib + else + LIBBOOT := lib/darwin/libboot.dylib + LIBHAIL := lib/darwin/libhail.dylib + endif +endif + +# If you want to add a new cpp file, like foo.cpp, to the library, add foo to +# this list. Remember to rerun make prebuilt. +OBJECTS := \ + ibs \ + Decoder \ + Encoder \ + Logging \ + NativeCodeSuite \ + NativeLongFunc \ + NativeModule \ + NativePtr \ + NativeStatus \ + ObjectArray \ + PartitionIterators \ + Region \ + Upcalls \ + FS \ + +BUILD_X86_64 := $(BUILD)/x86_64 +CXXFLAGS_X86_64 := $(CXXFLAGS) +LIBFLAGS_X86_64 := $(LIBFLAGS) +BUILD_OBJECTS_X86_64 := $(addprefix $(BUILD_X86_64)/, $(OBJECTS:%=%.o)) + +BUILD_ARM64 := $(BUILD)/arm64 +CXXFLAGS_ARM64 := $(CXXFLAGS) +LIBFLAGS_ARM64 := $(LIBFLAGS) +BUILD_OBJECTS_ARM64 := $(addprefix $(BUILD_ARM64)/, $(OBJECTS:%=%.o)) + +ifeq ($(UNAME_M),arm64) + BUILD_NATIVE := $(BUILD_ARM64) + LIBFLAGS_NATIVE := $(LIBFLAGS_ARM64) + BUILD_OBJECTS_NATIVE := $(BUILD_OBJECTS_ARM64) +else + BUILD_NATIVE := $(BUILD_X86_64) + LIBFLAGS_NATIVE := $(LIBFLAGS_X86_64) + BUILD_OBJECTS_NATIVE := $(BUILD_OBJECTS_X86_64) +endif + +TEST_CPP := $(wildcard *_test.cpp) testutils/unit-tests.cpp +TEST_OBJECTS_X86_64 := $(foreach file,$(TEST_CPP),$(BUILD_X86_64)/$(basename $(file)).o) +TEST_OBJECTS_ARM64 := $(foreach file,$(TEST_CPP),$(BUILD_ARM64)/$(basename $(file)).o) + +ALL_CPP := $(shell find * -iname '*.cpp') + +ifeq ($(UNAME_S)-$(UNAME_M),Darwin-arm64) +-include $(addprefix $(BUILD_X86_64)/, $(ALL_CPP:%.cpp=%.d)) +-include $(addprefix $(BUILD_ARM64)/, $(ALL_CPP:%.cpp=%.d)) +else ifeq ($(UNAME_M),arm64) +-include $(addprefix $(BUILD_ARM64)/, $(ALL_CPP:%.cpp=%.d)) +else ifeq ($(UNAME_M),x86_64) +-include $(addprefix $(BUILD_X86_64)/, $(ALL_CPP:%.cpp=%.d)) endif +# If no inherited "-march", and we're not building a universal dylib on Apple Silicon, then use +# "-march=sandybridge" or "-march=corei7-avx" for ISA compatibility with MacBook Pro's since 2011 +# (also the earliest cpu with AVX). Fall back to "-march=native" if the compiler doesn't support any +# of those. +# +# Otherwise, we're on Apple Silicon and building a universal dylib. We know exactly which -march +# settings are valid. +ifeq ($(filter -march=%,$(CXXFLAGS)),) + ifeq ($(UNAME_S)-$(UNAME_M),Darwin-arm64) + CXXFLAGS_X86_64 += -arch x86_64 -march=sandybridge + LIBFLAGS_X86_64 += -arch x86_64 + CXXFLAGS_ARM64 += -arch arm64 -mcpu=apple-m1 + LIBFLAGS_ARM64 += -arch arm64 + else + FAIL_A_X86_64 :=$(shell cp /dev/null a.cpp; $(CXX) -march=sandybridge -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) + ifeq ($(FAIL_A_X86_64),) + CXXFLAGS_X86_64 += -march=sandybridge + else + # g++-4.8.x accepts "-march=corei7-avx" but not "-march=sandybridge " + FAIL_B_X86_64 :=$(shell cp /dev/null a.cpp; $(CXX) -march=corei7-avx -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) + ifeq ($(FAIL_B_X86_64),) + CXXFLAGS_X86_64 += -march=corei7-avx + else + CXXFLAGS_X86_64 += -march=native + endif + endif + + CXXFLAGS_ARM64 += -march=armv8.5-a # we do not test on arm chips + endif +endif + +$(BUILD_X86_64)/%.d: %.cpp + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS_X86_64) $< -MG -M -MF $@ -MT $(@:%.d=%.o) + +$(BUILD_ARM64)/%.d: %.cpp + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS_ARM64) $< -MG -M -MF $@ -MT $(@:%.d=%.o) + +$(BUILD_X86_64)/%.o: %.cpp + @mkdir -p $(@D) + $(CXX) -o $@ $(CXXFLAGS_X86_64) -MD -MF $(@:%.o=%.d) -MT $@ -c $< + +$(BUILD_ARM64)/%.o: %.cpp + @mkdir -p $(@D) + $(CXX) -o $@ $(CXXFLAGS_ARM64) -MD -MF $(@:%.o=%.d) -MT $@ -c $< + all: $(LIBBOOT) $(LIBHAIL) debug: @@ -154,38 +215,47 @@ endif echo "CXX is $(CXX)" -$(CXX) --version -$(BUILD)/functional-tests: ibs.cpp test.cpp $(LIBSIMDPP) +$(BUILD_X86_64)/functional-tests: ibs.cpp test.cpp $(LIBSIMDPP) + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS) -DNUMBER_OF_GENOTYPES_PER_ROW=256 -o $(BUILD_X86_64)/functional-tests ibs.cpp test.cpp + +$(BUILD_ARM64)/functional-tests: ibs.cpp test.cpp $(LIBSIMDPP) + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS) -DNUMBER_OF_GENOTYPES_PER_ROW=256 -o $(BUILD_ARM64)/functional-tests ibs.cpp test.cpp + +$(BUILD_X86_64)/unit-tests: $(BUILD_OBJECTS_X86_64) $(TEST_OBJECTS_X86_64) @mkdir -p $(@D) - $(CXX) $(CXXFLAGS) -DNUMBER_OF_GENOTYPES_PER_ROW=256 -o $(BUILD)/functional-tests ibs.cpp test.cpp + $(CXX) $(CXXFLAGS) -o $(BUILD_X86_64)/unit-tests $(BUILD_OBJECTS_X86_64) $(TEST_OBJECTS_X86_64) -ldl -$(BUILD)/unit-tests: $(BUILD_OBJECTS) $(TEST_OBJECTS) +$(BUILD_ARM64)/unit-tests: $(BUILD_OBJECTS_ARM64) $(TEST_OBJECTS_ARM64) @mkdir -p $(@D) - $(CXX) $(CXXFLAGS) -o $(BUILD)/unit-tests $(BUILD_OBJECTS) $(TEST_OBJECTS) -ldl + $(CXX) $(CXXFLAGS) -o $(BUILD_ARM64)/unit-tests $(BUILD_OBJECTS_ARM64) $(TEST_OBJECTS_ARM64) -ldl prebuilt: $(LIBBOOT) $(LIBHAIL) + @mkdir -p $(PREBUILT)/$(dir $<) cp -p -f $^ $(PREBUILT)/$(dir $<) reset-prebuilt: git checkout HEAD -- $(PREBUILT)/$(LIBBOOT) git checkout HEAD -- $(PREBUILT)/$(LIBHAIL) -test: $(BUILD)/functional-tests $(BUILD)/unit-tests - ./$(BUILD)/unit-tests -w NoAssertions -s -d yes -# --use-colour yes -r xml -o $(BUILD)/cxx-test.xml; \ +test: $(BUILD_NATIVE)/functional-tests $(BUILD_NATIVE)/unit-tests + ./$(BUILD_NATIVE)/unit-tests -w NoAssertions -s -d yes -# --use-colour yes -r xml -o $(BUILD_NATIVE)/cxx-test.xml; \ case "$$?" in \ *) \ - mkdir -p $(BUILD)/reports; \ - cp testutils/style.css $(BUILD)/reports; \ - xsltproc -o $(BUILD)/reports/index.html testutils/test-reporter.xslt $(BUILD)/cxx-test.xml;; \ + mkdir -p $(BUILD_NATIVE)/reports; \ + cp testutils/style.css $(BUILD_NATIVE)/reports; \ + xsltproc -o $(BUILD_NATIVE)/reports/index.html testutils/test-reporter.xslt $(BUILD_NATIVE)/cxx-test.xml;; \ esac - ./$(BUILD)/functional-tests + ./$(BUILD_NATIVE)/functional-tests -benchmark: $(BUILD)/unit-tests - ./$(BUILD)/unit-tests "[!benchmark]" -s -d yes -# -r xml -o $(BUILD)/cxx-benchmark.xml; \ +benchmark: $(BUILD_NATIVE)/unit-tests + ./$(BUILD_NATIVE)/unit-tests "[!benchmark]" -s -d yes -# -r xml -o $(BUILD_NATIVE)/cxx-benchmark.xml; \ case "$$?" in \ *) \ - mkdir -p $(BUILD)/benchmark-reports; \ - cp testutils/style.css $(BUILD)/benchmark-reports; \ - xsltproc -o $(BUILD)/benchmark-reports/index.html testutils/test-reporter.xslt $(BUILD)/cxx-benchmark.xml;; \ + mkdir -p $(BUILD_NATIVE)/benchmark-reports; \ + cp testutils/style.css $(BUILD_NATIVE)/benchmark-reports; \ + xsltproc -o $(BUILD_NATIVE)/benchmark-reports/index.html testutils/test-reporter.xslt $(BUILD_NATIVE)/cxx-benchmark.xml;; \ esac clean: @@ -201,9 +271,13 @@ clean: ALL_HEADER_FILES := $(shell find ../resources/include -name "*.h") ALL_HEADER_CKSUM := $(shell $(CXX) --version >.cxx.vsn ; cat .cxx.vsn $(ALL_HEADER_FILES) | cksum | cut -d " " -f 1) -$(BUILD)/NativeModule.o: NativeModule.cpp +$(BUILD_X86_64)/NativeModule.o: NativeModule.cpp @mkdir -p $(@D) - $(CXX) $(CXXFLAGS) -DALL_HEADER_CKSUM=$(ALL_HEADER_CKSUM)UL -c NativeModule.cpp -o $@ + $(CXX) $(CXXFLAGS_X86_64) -DALL_HEADER_CKSUM=$(ALL_HEADER_CKSUM)UL -c NativeModule.cpp -o $@ + +$(BUILD_ARM64)/NativeModule.o: NativeModule.cpp + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS_ARM64) -DALL_HEADER_CKSUM=$(ALL_HEADER_CKSUM)UL -c NativeModule.cpp -o $@ $(CATCH_HEADER_LOCATION): @mkdir -p $(@D) @@ -215,10 +289,32 @@ $(LIBSIMDPP).tar.gz: $(LIBSIMDPP): $(LIBSIMDPP).tar.gz tar -xzf $< -$(LIBBOOT): $(BUILD)/NativeBoot.o - @mkdir -p $(dir $(LIBBOOT)) - $(CXX) $(LIBFLAGS) $(LIBDIRS) $(CXXFLAGS) $(BUILD)/NativeBoot.o -o $@ - -$(LIBHAIL): $(BUILD_OBJECTS) - @mkdir -p $(dir $(LIBHAIL)) - $(CXX) $(LIBFLAGS) $(LIBDIRS) $(CXXFLAGS) $(BUILD_OBJECTS) -o $@ +ifeq ($(UNAME_S)-$(UNAME_M),Darwin-arm64) +$(LIBBOOT_X86_64): $(BUILD_X86_64)/NativeBoot.o + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_X86_64) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ +$(LIBHAIL_X86_64): $(BUILD_OBJECTS_X86_64) + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_X86_64) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ + +$(LIBBOOT_ARM64): $(BUILD_ARM64)/NativeBoot.o + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_ARM64) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ +$(LIBHAIL_ARM64): $(BUILD_OBJECTS_ARM64) + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_ARM64) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ + +$(LIBBOOT): $(LIBBOOT_X86_64) $(LIBBOOT_ARM64) + @mkdir -p $(dir $@) + lipo -create $^ -output $@ +$(LIBHAIL): $(LIBHAIL_X86_64) $(LIBHAIL_ARM64) + @mkdir -p $(dir $@) + lipo -create $^ -output $@ +else +$(LIBBOOT): $(BUILD_NATIVE)/NativeBoot.o + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_NATIVE) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ +$(LIBHAIL): $(BUILD_OBJECTS_NATIVE) + @mkdir -p $(dir $@) + $(CXX) $(LIBFLAGS_NATIVE) $(LIBDIRS) $(CXXFLAGS) $^ -o $@ +endif From eb5002e5f50d0fdc138807d0999d9f4b120e2264 Mon Sep 17 00:00:00 2001 From: Dan King Date: Wed, 29 Nov 2023 01:19:56 -0500 Subject: [PATCH 17/48] [hailtop][batch] update aiohttp to 3.9.X, remove aiomonitor (#14040) --- batch/batch/worker/worker.py | 38 ++++++----- batch/pinned-requirements.txt | 18 ++---- benchmark/python/pinned-requirements.txt | 6 +- ci/ci/build.py | 6 +- ci/pinned-requirements.txt | 15 ++--- gear/pinned-requirements.txt | 67 +++---------------- gear/requirements.txt | 1 - hail/python/dev/pinned-requirements.txt | 71 ++++++++++----------- hail/python/hailtop/pinned-requirements.txt | 26 ++++---- hail/python/hailtop/requirements.txt | 2 +- hail/python/pinned-requirements.txt | 33 +++++----- web_common/pinned-requirements.txt | 16 ++--- 12 files changed, 110 insertions(+), 189 deletions(-) diff --git a/batch/batch/worker/worker.py b/batch/batch/worker/worker.py index 83b7ac96ef1..1a7a865b5bc 100644 --- a/batch/batch/worker/worker.py +++ b/batch/batch/worker/worker.py @@ -36,7 +36,6 @@ import aiodocker.images import aiohttp import aiohttp.client_exceptions -import aiomonitor import aiorwlock import async_timeout import orjson @@ -3473,25 +3472,24 @@ async def async_main(): await network_allocator.reserve() worker = Worker(httpx.client_session()) - with aiomonitor.start_monitor(asyncio.get_event_loop(), locals=locals()): - try: - async with AsyncExitStack() as cleanup: - cleanup.push_async_callback(docker.close) - cleanup.push_async_callback(network_allocator_task_manager.shutdown_and_wait) - cleanup.push_async_callback(CLOUD_WORKER_API.close) - cleanup.push_async_callback(worker.shutdown) - await worker.run() - finally: - asyncio.get_event_loop().set_debug(True) - other_tasks = [t for t in asyncio.all_tasks() if t != asyncio.current_task()] - if other_tasks: - log.warning('Tasks immediately after docker close') - dump_all_stacktraces() - _, pending = await asyncio.wait(other_tasks, timeout=10 * 60, return_when=asyncio.ALL_COMPLETED) - for t in pending: - log.warning('Dangling task:') - t.print_stack() - t.cancel() + try: + async with AsyncExitStack() as cleanup: + cleanup.push_async_callback(docker.close) + cleanup.push_async_callback(network_allocator_task_manager.shutdown_and_wait) + cleanup.push_async_callback(CLOUD_WORKER_API.close) + cleanup.push_async_callback(worker.shutdown) + await worker.run() + finally: + asyncio.get_event_loop().set_debug(True) + other_tasks = [t for t in asyncio.all_tasks() if t != asyncio.current_task()] + if other_tasks: + log.warning('Tasks immediately after docker close') + dump_all_stacktraces() + _, pending = await asyncio.wait(other_tasks, timeout=10 * 60, return_when=asyncio.ALL_COMPLETED) + for t in pending: + log.warning('Dangling task:') + t.print_stack() + t.cancel() loop = asyncio.get_event_loop() diff --git a/batch/pinned-requirements.txt b/batch/pinned-requirements.txt index 35dadef8203..68d136e5a1e 100644 --- a/batch/pinned-requirements.txt +++ b/batch/pinned-requirements.txt @@ -6,7 +6,7 @@ # aiodocker==0.21.0 # via -r hail/batch/requirements.txt -aiohttp==3.8.6 +aiohttp==3.9.1 # via # -c hail/batch/../gear/pinned-requirements.txt # -c hail/batch/../hail/python/dev/pinned-requirements.txt @@ -37,13 +37,6 @@ attrs==23.1.0 # -c hail/batch/../hail/python/pinned-requirements.txt # -c hail/batch/../web_common/pinned-requirements.txt # aiohttp -charset-normalizer==3.3.2 - # via - # -c hail/batch/../gear/pinned-requirements.txt - # -c hail/batch/../hail/python/dev/pinned-requirements.txt - # -c hail/batch/../hail/python/pinned-requirements.txt - # -c hail/batch/../web_common/pinned-requirements.txt - # aiohttp dictdiffer==0.9.0 # via -r hail/batch/requirements.txt frozenlist==1.4.0 @@ -54,7 +47,7 @@ frozenlist==1.4.0 # -c hail/batch/../web_common/pinned-requirements.txt # aiohttp # aiosignal -idna==3.4 +idna==3.6 # via # -c hail/batch/../gear/pinned-requirements.txt # -c hail/batch/../hail/python/dev/pinned-requirements.txt @@ -69,7 +62,7 @@ multidict==6.0.4 # -c hail/batch/../web_common/pinned-requirements.txt # aiohttp # yarl -numpy==1.26.1 +numpy==1.26.2 # via # -c hail/batch/../hail/python/pinned-requirements.txt # pandas @@ -78,7 +71,7 @@ packaging==23.2 # -c hail/batch/../hail/python/dev/pinned-requirements.txt # -c hail/batch/../hail/python/pinned-requirements.txt # plotly -pandas==2.1.2 +pandas==2.1.3 # via # -c hail/batch/../hail/python/pinned-requirements.txt # -r hail/batch/requirements.txt @@ -108,7 +101,6 @@ tenacity==8.2.3 # plotly typing-extensions==4.8.0 # via - # -c hail/batch/../gear/pinned-requirements.txt # -c hail/batch/../hail/python/dev/pinned-requirements.txt # -c hail/batch/../hail/python/pinned-requirements.txt # aiodocker @@ -116,7 +108,7 @@ tzdata==2023.3 # via # -c hail/batch/../hail/python/pinned-requirements.txt # pandas -yarl==1.9.2 +yarl==1.9.3 # via # -c hail/batch/../gear/pinned-requirements.txt # -c hail/batch/../hail/python/dev/pinned-requirements.txt diff --git a/benchmark/python/pinned-requirements.txt b/benchmark/python/pinned-requirements.txt index 9fc6c11ba50..20502345bd1 100644 --- a/benchmark/python/pinned-requirements.txt +++ b/benchmark/python/pinned-requirements.txt @@ -10,15 +10,15 @@ contourpy==1.2.0 # matplotlib cycler==0.12.1 # via matplotlib -fonttools==4.44.0 +fonttools==4.45.1 # via matplotlib importlib-resources==6.1.1 # via matplotlib kiwisolver==1.4.5 # via matplotlib -matplotlib==3.8.1 +matplotlib==3.8.2 # via -r hail/benchmark/python/requirements.txt -numpy==1.26.1 +numpy==1.26.2 # via # -c hail/benchmark/python/../../hail/python/pinned-requirements.txt # contourpy diff --git a/ci/ci/build.py b/ci/ci/build.py index 7a6a75d98ef..3232ea73dd5 100644 --- a/ci/ci/build.py +++ b/ci/ci/build.py @@ -3,7 +3,7 @@ import logging from collections import Counter, defaultdict from shlex import quote as shq -from typing import Dict, List, Optional, Sequence, TypedDict +from typing import Dict, List, Optional, Sequence, Set, TypedDict import jinja2 import yaml @@ -217,8 +217,8 @@ def deps_parents(self): return flatten([d.wrapped_job() for d in self.deps]) def all_deps(self): - visited = set([self]) - frontier = [self] + visited: Set[Step] = set([self]) + frontier: List[Step] = [self] while frontier: current = frontier.pop() diff --git a/ci/pinned-requirements.txt b/ci/pinned-requirements.txt index 7acf28dbfd2..6ada8c4918f 100644 --- a/ci/pinned-requirements.txt +++ b/ci/pinned-requirements.txt @@ -4,7 +4,7 @@ # # pip-compile --output-file=hail/ci/pinned-requirements.txt hail/ci/requirements.txt # -certifi==2023.7.22 +certifi==2023.11.17 # via # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt @@ -20,15 +20,13 @@ charset-normalizer==3.3.2 # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt - # -c hail/ci/../web_common/pinned-requirements.txt # requests click==8.1.7 # via - # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt # zulip -cryptography==41.0.5 +cryptography==41.0.7 # via # -c hail/ci/../hail/python/pinned-requirements.txt # pyjwt @@ -36,15 +34,13 @@ distro==1.8.0 # via zulip gidgethub==5.3.0 # via -r hail/ci/requirements.txt -idna==3.4 +idna==3.6 # via # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt # -c hail/ci/../web_common/pinned-requirements.txt # requests -matrix-client==0.4.0 - # via zulip pycparser==2.21 # via # -c hail/ci/../hail/python/dev/pinned-requirements.txt @@ -59,11 +55,9 @@ requests[security]==2.31.0 # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt - # matrix-client # zulip typing-extensions==4.8.0 # via - # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt # zulip @@ -76,7 +70,6 @@ urllib3==1.26.18 # -c hail/ci/../gear/pinned-requirements.txt # -c hail/ci/../hail/python/dev/pinned-requirements.txt # -c hail/ci/../hail/python/pinned-requirements.txt - # matrix-client # requests -zulip==0.8.2 +zulip==0.9.0 # via -r hail/ci/requirements.txt diff --git a/gear/pinned-requirements.txt b/gear/pinned-requirements.txt index 11492abab7e..535766454e9 100644 --- a/gear/pinned-requirements.txt +++ b/gear/pinned-requirements.txt @@ -4,20 +4,15 @@ # # pip-compile --output-file=hail/gear/pinned-requirements.txt hail/gear/requirements.txt # -aioconsole==0.6.2 - # via aiomonitor -aiohttp==3.8.6 +aiohttp==3.9.1 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # aiohttp-session - # aiomonitor # kubernetes-asyncio aiohttp-session==2.12.0 # via -r hail/gear/requirements.txt -aiomonitor==0.6.0 - # via -r hail/gear/requirements.txt aiomysql==0.2.0 # via -r hail/gear/requirements.txt aiosignal==1.3.1 @@ -38,15 +33,12 @@ attrs==23.1.0 # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # aiohttp - # aiomonitor -backports-strenum==1.2.8 - # via aiomonitor cachetools==5.3.2 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # google-auth -certifi==2023.7.22 +certifi==2023.11.17 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt @@ -58,14 +50,7 @@ charset-normalizer==3.3.2 # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt - # aiohttp # requests -click==8.1.7 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt - # -c hail/gear/../hail/python/pinned-requirements.txt - # aiomonitor frozenlist==1.4.0 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt @@ -73,9 +58,9 @@ frozenlist==1.4.0 # -c hail/gear/../hail/python/pinned-requirements.txt # aiohttp # aiosignal -google-api-core==2.13.0 +google-api-core==2.14.0 # via google-api-python-client -google-api-python-client==2.107.0 +google-api-python-client==2.108.0 # via google-cloud-profiler google-auth==2.23.4 # via @@ -97,30 +82,15 @@ httplib2==0.22.0 # via # google-api-python-client # google-auth-httplib2 -idna==3.4 +idna==3.6 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # requests # yarl -janus==1.0.0 - # via - # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt - # -c hail/gear/../hail/python/pinned-requirements.txt - # aiomonitor -jinja2==3.1.2 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # -c hail/gear/../hail/python/pinned-requirements.txt - # aiomonitor kubernetes-asyncio==19.15.1 # via -r hail/gear/requirements.txt -markupsafe==2.1.3 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # -c hail/gear/../hail/python/pinned-requirements.txt - # jinja2 multidict==6.0.4 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt @@ -135,22 +105,18 @@ orjson==3.9.10 # -r hail/gear/requirements.txt prometheus-async==19.2.0 # via -r hail/gear/requirements.txt -prometheus-client==0.18.0 +prometheus-client==0.19.0 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -r hail/gear/requirements.txt # prometheus-async -prompt-toolkit==3.0.39 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # aiomonitor protobuf==3.20.2 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # google-api-core # google-cloud-profiler -pyasn1==0.5.0 +pyasn1==0.5.1 # via # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt @@ -203,17 +169,6 @@ sortedcontainers==2.4.0 # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # -r hail/gear/requirements.txt -terminaltables==3.1.10 - # via aiomonitor -trafaret==2.1.1 - # via aiomonitor -typing-extensions==4.8.0 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt - # -c hail/gear/../hail/python/pinned-requirements.txt - # aiomonitor - # janus uritemplate==4.1.1 # via google-api-python-client urllib3==1.26.18 @@ -223,16 +178,12 @@ urllib3==1.26.18 # -c hail/gear/../hail/python/pinned-requirements.txt # kubernetes-asyncio # requests -wcwidth==0.2.9 - # via - # -c hail/gear/../hail/python/dev/pinned-requirements.txt - # prompt-toolkit -wrapt==1.15.0 +wrapt==1.16.0 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/pinned-requirements.txt # prometheus-async -yarl==1.9.2 +yarl==1.9.3 # via # -c hail/gear/../hail/python/dev/pinned-requirements.txt # -c hail/gear/../hail/python/hailtop/pinned-requirements.txt diff --git a/gear/requirements.txt b/gear/requirements.txt index 91f2f5d581b..a75dd5cb217 100644 --- a/gear/requirements.txt +++ b/gear/requirements.txt @@ -7,7 +7,6 @@ -c ../hail/python/dev/pinned-requirements.txt aiohttp_session>=2.7,<2.13 -aiomonitor>=0.4.5,<1 aiomysql>=0.0.20,<1 google-cloud-profiler<4.0.0 kubernetes-asyncio>=19.15.1,<20 diff --git a/hail/python/dev/pinned-requirements.txt b/hail/python/dev/pinned-requirements.txt index 759e4be5ad9..2fff21408a2 100644 --- a/hail/python/dev/pinned-requirements.txt +++ b/hail/python/dev/pinned-requirements.txt @@ -4,11 +4,11 @@ # # pip-compile --output-file=hail/hail/python/dev/pinned-requirements.txt hail/hail/python/dev/requirements.txt # -aiohttp==3.8.6 +aiohttp==3.9.1 # via # -c hail/hail/python/dev/../pinned-requirements.txt # aiohttp-devtools -aiohttp-devtools==1.1 +aiohttp-devtools==1.1.1 # via -r hail/hail/python/dev/requirements.txt aiosignal==1.3.1 # via @@ -16,7 +16,7 @@ aiosignal==1.3.1 # aiohttp alabaster==0.7.13 # via sphinx -anyio==4.0.0 +anyio==4.1.0 # via # jupyter-server # watchfiles @@ -55,7 +55,7 @@ black==22.12.0 # via -r hail/hail/python/dev/requirements.txt bleach==6.1.0 # via nbconvert -certifi==2023.7.22 +certifi==2023.11.17 # via # -c hail/hail/python/dev/../pinned-requirements.txt # requests @@ -68,7 +68,6 @@ cfgv==3.4.0 charset-normalizer==3.3.2 # via # -c hail/hail/python/dev/../pinned-requirements.txt - # aiohttp # requests click==8.1.7 # via @@ -104,7 +103,7 @@ docutils==0.18.1 # nbsphinx # sphinx # sphinx-rtd-theme -exceptiongroup==1.1.3 +exceptiongroup==1.2.0 # via # anyio # ipython @@ -115,7 +114,7 @@ executing==2.0.1 # via # devtools # stack-data -fastjsonschema==2.18.1 +fastjsonschema==2.19.0 # via nbformat filelock==3.13.1 # via virtualenv @@ -128,9 +127,9 @@ frozenlist==1.4.0 # aiosignal fswatch==0.1.1 # via -r hail/hail/python/dev/requirements.txt -identify==2.5.31 +identify==2.5.32 # via pre-commit -idna==3.4 +idna==3.6 # via # -c hail/hail/python/dev/../pinned-requirements.txt # anyio @@ -149,13 +148,13 @@ importlib-metadata==6.8.0 # sphinx iniconfig==2.0.0 # via pytest -ipykernel==6.26.0 +ipykernel==6.27.1 # via # jupyter # jupyter-console # jupyterlab # qtconsole -ipython==8.17.2 +ipython==8.18.1 # via # ipykernel # ipywidgets @@ -181,12 +180,12 @@ json5==0.9.14 # via jupyterlab-server jsonpointer==2.4 # via jsonschema -jsonschema[format-nongpl]==4.19.2 +jsonschema[format-nongpl]==4.20.0 # via # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.7.1 +jsonschema-specifications==2023.11.1 # via jsonschema jupyter==1.0.0 # via -r hail/hail/python/dev/requirements.txt @@ -212,9 +211,9 @@ jupyter-core==5.5.0 # qtconsole jupyter-events==0.9.0 # via jupyter-server -jupyter-lsp==2.2.0 +jupyter-lsp==2.2.1 # via jupyterlab -jupyter-server==2.10.0 +jupyter-server==2.11.1 # via # jupyter-lsp # jupyterlab @@ -223,11 +222,11 @@ jupyter-server==2.10.0 # notebook-shim jupyter-server-terminals==0.4.4 # via jupyter-server -jupyterlab==4.0.8 +jupyterlab==4.0.9 # via notebook -jupyterlab-pygments==0.2.2 +jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.25.0 +jupyterlab-server==2.25.2 # via # jupyterlab # notebook @@ -308,13 +307,13 @@ pathspec==0.11.2 # via # black # curlylint -pexpect==4.8.0 +pexpect==4.9.0 # via ipython pillow==10.1.0 # via # -c hail/hail/python/dev/../pinned-requirements.txt # -r hail/hail/python/dev/requirements.txt -platformdirs==3.11.0 +platformdirs==4.0.0 # via # black # jupyter-core @@ -324,9 +323,9 @@ pluggy==1.3.0 # via pytest pre-commit==3.5.0 # via -r hail/hail/python/dev/requirements.txt -prometheus-client==0.18.0 +prometheus-client==0.19.0 # via jupyter-server -prompt-toolkit==3.0.39 +prompt-toolkit==3.0.41 # via # ipython # jupyter-console @@ -344,7 +343,7 @@ pycparser==2.21 # via # -c hail/hail/python/dev/../pinned-requirements.txt # cffi -pygments==2.16.1 +pygments==2.17.2 # via # -c hail/hail/python/dev/../pinned-requirements.txt # aiohttp-devtools @@ -356,7 +355,7 @@ pygments==2.16.1 # sphinx pylint==2.17.7 # via -r hail/hail/python/dev/requirements.txt -pyright==1.1.334 +pyright==1.1.337 # via -r hail/hail/python/dev/requirements.txt pytest==7.4.3 # via @@ -405,11 +404,11 @@ pyzmq==25.1.1 # jupyter-console # jupyter-server # qtconsole -qtconsole==5.5.0 +qtconsole==5.5.1 # via jupyter qtpy==2.4.1 # via qtconsole -referencing==0.30.2 +referencing==0.31.0 # via # jsonschema # jsonschema-specifications @@ -427,11 +426,11 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rpds-py==0.12.0 +rpds-py==0.13.1 # via # jsonschema # referencing -ruff==0.1.4 +ruff==0.1.6 # via -r hail/hail/python/dev/requirements.txt send2trash==1.8.2 # via jupyter-server @@ -483,7 +482,7 @@ sphinxcontrib-serializinghtml==1.1.9 # via sphinx stack-data==0.6.3 # via ipython -terminado==0.17.1 +terminado==0.18.0 # via # jupyter-server # jupyter-server-terminals @@ -497,7 +496,7 @@ tomli==2.0.1 # jupyterlab # pylint # pytest -tomlkit==0.12.2 +tomlkit==0.12.3 # via pylint tornado==6.3.3 # via @@ -508,7 +507,7 @@ tornado==6.3.3 # jupyterlab # notebook # terminado -traitlets==5.13.0 +traitlets==5.14.0 # via # comm # ipykernel @@ -542,7 +541,7 @@ types-pyyaml==6.0.12.12 # via -r hail/hail/python/dev/requirements.txt types-requests==2.31.0.6 # via -r hail/hail/python/dev/requirements.txt -types-setuptools==68.2.0.0 +types-setuptools==68.2.0.2 # via -r hail/hail/python/dev/requirements.txt types-six==1.16.21.9 # via -r hail/hail/python/dev/requirements.txt @@ -566,11 +565,11 @@ urllib3==1.26.18 # via # -c hail/hail/python/dev/../pinned-requirements.txt # requests -virtualenv==20.24.6 +virtualenv==20.24.7 # via pre-commit watchfiles==0.21.0 # via aiohttp-devtools -wcwidth==0.2.9 +wcwidth==0.2.12 # via prompt-toolkit webcolors==1.13 # via jsonschema @@ -584,11 +583,11 @@ wheel==0.41.3 # via -r hail/hail/python/dev/requirements.txt widgetsnbextension==4.0.9 # via ipywidgets -wrapt==1.15.0 +wrapt==1.16.0 # via # -c hail/hail/python/dev/../pinned-requirements.txt # astroid -yarl==1.9.2 +yarl==1.9.3 # via # -c hail/hail/python/dev/../pinned-requirements.txt # aiohttp diff --git a/hail/python/hailtop/pinned-requirements.txt b/hail/python/hailtop/pinned-requirements.txt index 220a3440e19..70adf3f3f6e 100644 --- a/hail/python/hailtop/pinned-requirements.txt +++ b/hail/python/hailtop/pinned-requirements.txt @@ -6,7 +6,7 @@ # aiodns==2.0.0 # via -r hail/hail/python/hailtop/requirements.txt -aiohttp==3.8.6 +aiohttp==3.9.1 # via -r hail/hail/python/hailtop/requirements.txt aiosignal==1.3.1 # via aiohttp @@ -28,18 +28,18 @@ azure-mgmt-core==1.4.0 # via azure-mgmt-storage azure-mgmt-storage==20.1.0 # via -r hail/hail/python/hailtop/requirements.txt -azure-storage-blob==12.18.3 +azure-storage-blob==12.19.0 # via -r hail/hail/python/hailtop/requirements.txt -boto3==1.28.80 +boto3==1.33.1 # via -r hail/hail/python/hailtop/requirements.txt -botocore==1.31.80 +botocore==1.33.1 # via # -r hail/hail/python/hailtop/requirements.txt # boto3 # s3transfer cachetools==5.3.2 # via google-auth -certifi==2023.7.22 +certifi==2023.11.17 # via # msrest # requests @@ -48,14 +48,12 @@ cffi==1.16.0 # cryptography # pycares charset-normalizer==3.3.2 - # via - # aiohttp - # requests + # via requests click==8.1.7 # via typer commonmark==0.9.1 # via rich -cryptography==41.0.5 +cryptography==41.0.7 # via # azure-identity # azure-storage-blob @@ -76,7 +74,7 @@ google-auth-oauthlib==0.8.0 # via -r hail/hail/python/hailtop/requirements.txt humanize==1.1.0 # via -r hail/hail/python/hailtop/requirements.txt -idna==3.4 +idna==3.6 # via # requests # yarl @@ -114,7 +112,7 @@ portalocker==2.8.2 # via msal-extensions protobuf==3.20.2 # via -r hail/hail/python/hailtop/requirements.txt -pyasn1==0.5.0 +pyasn1==0.5.1 # via # pyasn1-modules # rsa @@ -124,7 +122,7 @@ pycares==4.4.0 # via aiodns pycparser==2.21 # via cffi -pygments==2.16.1 +pygments==2.17.2 # via rich pyjwt[crypto]==2.8.0 # via msal @@ -148,7 +146,7 @@ rich==12.6.0 # via -r hail/hail/python/hailtop/requirements.txt rsa==4.9 # via google-auth -s3transfer==0.7.0 +s3transfer==0.8.0 # via boto3 six==1.16.0 # via @@ -174,5 +172,5 @@ urllib3==1.26.18 # requests uvloop==0.19.0 ; sys_platform != "win32" # via -r hail/hail/python/hailtop/requirements.txt -yarl==1.9.2 +yarl==1.9.3 # via aiohttp diff --git a/hail/python/hailtop/requirements.txt b/hail/python/hailtop/requirements.txt index a62016b2f52..a7d7f2f1899 100644 --- a/hail/python/hailtop/requirements.txt +++ b/hail/python/hailtop/requirements.txt @@ -1,5 +1,5 @@ aiodns>=2.0.0,<3 -aiohttp>=3.8.1,<4 +aiohttp>=3.9,<4 azure-identity>=1.6.0,<2 azure-mgmt-storage==20.1.0 azure-storage-blob>=12.11.0,<13 diff --git a/hail/python/pinned-requirements.txt b/hail/python/pinned-requirements.txt index db965402254..d7af149ca74 100644 --- a/hail/python/pinned-requirements.txt +++ b/hail/python/pinned-requirements.txt @@ -8,7 +8,7 @@ aiodns==2.0.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -aiohttp==3.8.6 +aiohttp==3.9.1 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt @@ -49,17 +49,17 @@ azure-mgmt-storage==20.1.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -azure-storage-blob==12.18.3 +azure-storage-blob==12.19.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -bokeh==3.3.0 +bokeh==3.3.1 # via -r hail/hail/python/requirements.txt -boto3==1.28.80 +boto3==1.33.1 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -botocore==1.31.80 +botocore==1.33.1 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt @@ -69,7 +69,7 @@ cachetools==5.3.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # google-auth -certifi==2023.7.22 +certifi==2023.11.17 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # msrest @@ -82,7 +82,6 @@ cffi==1.16.0 charset-normalizer==3.3.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt - # aiohttp # requests click==8.1.7 # via @@ -94,7 +93,7 @@ commonmark==0.9.1 # rich contourpy==1.2.0 # via bokeh -cryptography==41.0.5 +cryptography==41.0.7 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # azure-identity @@ -128,7 +127,7 @@ humanize==1.1.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -idna==3.4 +idna==3.6 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # requests @@ -177,7 +176,7 @@ nest-asyncio==1.5.8 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -numpy==1.26.1 +numpy==1.26.2 # via # -r hail/hail/python/requirements.txt # bokeh @@ -196,7 +195,7 @@ packaging==23.2 # via # bokeh # plotly -pandas==2.1.2 +pandas==2.1.3 # via # -r hail/hail/python/requirements.txt # bokeh @@ -217,7 +216,7 @@ protobuf==3.20.2 # -r hail/hail/python/requirements.txt py4j==0.10.9.5 # via pyspark -pyasn1==0.5.0 +pyasn1==0.5.1 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # pyasn1-modules @@ -234,7 +233,7 @@ pycparser==2.21 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # cffi -pygments==2.16.1 +pygments==2.17.2 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # rich @@ -283,11 +282,11 @@ rsa==4.9 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # google-auth -s3transfer==0.7.0 +s3transfer==0.8.0 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # boto3 -scipy==1.11.3 +scipy==1.11.4 # via -r hail/hail/python/requirements.txt six==1.16.0 # via @@ -330,11 +329,11 @@ uvloop==0.19.0 ; sys_platform != "win32" # via # -c hail/hail/python/hailtop/pinned-requirements.txt # -r hail/hail/python/hailtop/requirements.txt -wrapt==1.15.0 +wrapt==1.16.0 # via deprecated xyzservices==2023.10.1 # via bokeh -yarl==1.9.2 +yarl==1.9.3 # via # -c hail/hail/python/hailtop/pinned-requirements.txt # aiohttp diff --git a/web_common/pinned-requirements.txt b/web_common/pinned-requirements.txt index 7fa1d0ff664..d72a068dba2 100644 --- a/web_common/pinned-requirements.txt +++ b/web_common/pinned-requirements.txt @@ -4,13 +4,13 @@ # # pip-compile --output-file=hail/web_common/pinned-requirements.txt hail/web_common/requirements.txt # -aiohttp==3.8.6 +aiohttp==3.9.1 # via # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt # -c hail/web_common/../hail/python/pinned-requirements.txt # aiohttp-jinja2 -aiohttp-jinja2==1.5.1 +aiohttp-jinja2==1.6 # via -r hail/web_common/requirements.txt aiosignal==1.3.1 # via @@ -30,12 +30,6 @@ attrs==23.1.0 # -c hail/web_common/../hail/python/dev/pinned-requirements.txt # -c hail/web_common/../hail/python/pinned-requirements.txt # aiohttp -charset-normalizer==3.3.2 - # via - # -c hail/web_common/../gear/pinned-requirements.txt - # -c hail/web_common/../hail/python/dev/pinned-requirements.txt - # -c hail/web_common/../hail/python/pinned-requirements.txt - # aiohttp frozenlist==1.4.0 # via # -c hail/web_common/../gear/pinned-requirements.txt @@ -43,7 +37,7 @@ frozenlist==1.4.0 # -c hail/web_common/../hail/python/pinned-requirements.txt # aiohttp # aiosignal -idna==3.4 +idna==3.6 # via # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt @@ -51,7 +45,6 @@ idna==3.4 # yarl jinja2==3.1.2 # via - # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt # -c hail/web_common/../hail/python/pinned-requirements.txt # -r hail/web_common/requirements.txt @@ -60,7 +53,6 @@ libsass==0.22.0 # via -r hail/web_common/requirements.txt markupsafe==2.1.3 # via - # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt # -c hail/web_common/../hail/python/pinned-requirements.txt # jinja2 @@ -71,7 +63,7 @@ multidict==6.0.4 # -c hail/web_common/../hail/python/pinned-requirements.txt # aiohttp # yarl -yarl==1.9.2 +yarl==1.9.3 # via # -c hail/web_common/../gear/pinned-requirements.txt # -c hail/web_common/../hail/python/dev/pinned-requirements.txt From b08768fc32f50133ef4e3b7e82c81f831f2dd262 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Tue, 28 Nov 2023 23:02:23 -0800 Subject: [PATCH 18/48] [k8s] Make devbin function for manually scaling dev namespaces (#14025) Resolves #14020. I'll manually delete the scale up cron jobs from the dev namespaces once this goes in. --- build.yaml | 27 +---------------- dev-docs/services/services-development-faq.md | 13 +++++++++ devbin/functions.sh | 29 +++++++++++++++++++ 3 files changed, 43 insertions(+), 26 deletions(-) diff --git a/build.yaml b/build.yaml index f2188666d27..ae51f3cee05 100644 --- a/build.yaml +++ b/build.yaml @@ -3820,7 +3820,7 @@ steps: - test_hail_python_service_backend_azure - cancel_all_running_test_batches - kind: runImage - name: setup_dev_namespace_autoscalers + name: setup_dev_namespace_autoscaledown resources: memory: standard cpu: '0.25' @@ -3857,31 +3857,6 @@ steps: - -c - set -ex ; kubectl scale deployments --all -n {{ user["username"] }} --replicas=0 && kubectl scale statefulsets --all -n {{ user["username"] }} --replicas=0 restartPolicy: OnFailure - --- - apiVersion: batch/v1 - kind: CronJob - metadata: - name: dev-namespace-scaleup-{{ user["username"] }} - namespace: {{ user["username"] }} - spec: - schedule: "0 13 * * 1,2,3,4,5" # Weekdays at 1pm UTC - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 0 - failedJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - serviceAccountName: admin - containers: - - name: dev-namespace-daytime-autoscaler - image: bitnami/kubectl:latest - command: - - /bin/sh - - -c - - set -ex ; kubectl scale deployments --all -n {{ user["username"] }} --replicas=1 && kubectl scale statefulsets --all -n {{ user["username"] }} --replicas=1 - restartPolicy: OnFailure - --- EOF kubectl apply -f the.yaml diff --git a/dev-docs/services/services-development-faq.md b/dev-docs/services/services-development-faq.md index e18a73013e2..58ab09f6fa0 100644 --- a/dev-docs/services/services-development-faq.md +++ b/dev-docs/services/services-development-faq.md @@ -29,3 +29,16 @@ The next dev deploy will set up a new database: ```bash hailctl dev deploy -b /hail: -s deploy_batch,add_developers ``` + +#### My namespace scaled down overnight. How do I get them back? + +There is a Kubernetes `CronJob` that runs in the evenings that scales down +development namespaces. To scale back up, you need to use `kubectl scale`, +or you can use the devbin function `kscale`, like + +```bash +kscale up +``` + +If you want to manually scale down your namespace when not using it, run +`kscale down`. diff --git a/devbin/functions.sh b/devbin/functions.sh index c90171506d5..44118f3a960 100644 --- a/devbin/functions.sh +++ b/devbin/functions.sh @@ -135,6 +135,35 @@ knodes() { done } +kscale() { + local usage="Use like kscale down or kscale up" + if [ -z "$1" ]; then + echo $usage + return + fi + local namespace=$1 + if [[ "$namespace" == "default" ]]; then + echo "ERROR: kscale should only be used for dev namespaces" + return + fi + + case "$2" in + up) + local replicas=1 + ;; + down) + local replicas=0 + ;; + *) + echo $usage + return + ;; + esac + + kubectl -n $namespace scale deployments --all --replicas=$replicas + kubectl -n $namespace scale statefulsets --all --replicas=$replicas +} + download-secret() { # download-secret secret-name namespace # From f1f7e3c1c6ed793b96c88b6b01e67ff74592643e Mon Sep 17 00:00:00 2001 From: jigold Date: Wed, 29 Nov 2023 10:31:30 -0800 Subject: [PATCH 19/48] [batch] turn off all unintended gcp cloud ops logs and metrics (#14050) --- batch/batch/cloud/gcp/driver/create_instance.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/batch/batch/cloud/gcp/driver/create_instance.py b/batch/batch/cloud/gcp/driver/create_instance.py index 5bddfe9d814..8fa6f349bc7 100644 --- a/batch/batch/cloud/gcp/driver/create_instance.py +++ b/batch/batch/cloud/gcp/driver/create_instance.py @@ -240,6 +240,7 @@ def scheduling() -> dict: labels.instance_id: static_value: $INSTANCE_ID service: + log_level: error pipelines: default_pipeline: processors: [labels] @@ -250,7 +251,9 @@ def scheduling() -> dict: metrics_filter: type: exclude_metrics metrics_pattern: - - agent.googleapis.com/processes/* + - agent.googleapis.com/*/* + service: + log_level: error EOF sudo systemctl restart google-cloud-ops-agent From 4fac65b20dd5840dd41ad7a02d0fff0edf4be55f Mon Sep 17 00:00:00 2001 From: Patrick Schultz Date: Wed, 29 Nov 2023 17:01:07 -0500 Subject: [PATCH 20/48] [query] relax tolerance in local whitening test (#14053) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s a random test, and it seems the current tolerance still allows rare sporadic failures. --- hail/python/test/hail/experimental/test_local_whitening.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/test/hail/experimental/test_local_whitening.py b/hail/python/test/hail/experimental/test_local_whitening.py index 097d12e5b95..f2e616ff9b8 100644 --- a/hail/python/test/hail/experimental/test_local_whitening.py +++ b/hail/python/test/hail/experimental/test_local_whitening.py @@ -24,7 +24,7 @@ def run_local_whitening_test(vec_size, num_rows, chunk_size, window_size, partit ht = tsm.block_table whitened_hail = np.vstack(ht.aggregate(hl.agg.collect(tsm.block_expr))) whitened_naive = naive_whiten(data.T, window_size) - np.testing.assert_allclose(whitened_hail, whitened_naive, rtol=5e-05) + np.testing.assert_allclose(whitened_hail, whitened_naive, rtol=1e-04) @test_timeout(local=5 * 60, batch=12 * 60) def test_local_whitening(): From 1dedf3c63f9aabf1b6ce538165360056f82f76e4 Mon Sep 17 00:00:00 2001 From: jigold Date: Thu, 30 Nov 2023 11:19:47 -0500 Subject: [PATCH 21/48] [batch] Rename batches tables to job groups (#13810) Stacked on #13475. This PR renames the following tables to have job groups in the name instead of batches. Note that this PR needs to shutdown the batch deployment (offline migration). I'm not 100% sure this is necessary, but I want to avoid a case where MJC of the database migration job cannot happen thus deadlocking the system. ```sql RENAME TABLE batch_attributes TO job_group_attributes, batches_cancelled TO job_groups_cancelled, batches_inst_coll_staging TO job_groups_inst_coll_staging, batch_inst_coll_cancellable_resources TO job_group_inst_coll_cancellable_resources, aggregated_batch_resources_v2 TO aggregated_job_group_resources_v2, aggregated_batch_resources_v3 TO aggregated_job_group_resources_v3, batches_n_jobs_in_complete_states TO job_groups_n_jobs_in_complete_states; ``` --- batch/batch/driver/canceller.py | 14 +- .../driver/instance_collection/job_private.py | 6 +- .../batch/driver/instance_collection/pool.py | 10 +- batch/batch/driver/job.py | 22 +- batch/batch/driver/main.py | 14 +- batch/batch/front_end/front_end.py | 56 +- batch/batch/front_end/query/query.py | 8 +- batch/batch/front_end/query/query_v1.py | 28 +- batch/batch/front_end/query/query_v2.py | 18 +- batch/sql/estimated-current.sql | 112 +-- batch/sql/rename-job-groups-tables.sql | 924 ++++++++++++++++++ build.yaml | 3 + 12 files changed, 1071 insertions(+), 144 deletions(-) create mode 100644 batch/sql/rename-job-groups-tables.sql diff --git a/batch/batch/driver/canceller.py b/batch/batch/driver/canceller.py index 09a4efa86ae..4ee7f0e51c1 100644 --- a/batch/batch/driver/canceller.py +++ b/batch/batch/driver/canceller.py @@ -96,10 +96,10 @@ async def cancel_cancelled_ready_jobs_loop_body(self): async def user_cancelled_ready_jobs(user, remaining) -> AsyncIterator[Dict[str, Any]]: async for batch in self.db.select_and_fetchall( ''' -SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled +SELECT batches.id, job_groups_cancelled.id IS NOT NULL AS cancelled FROM batches -LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE user = %s AND `state` = 'running'; ''', (user,), @@ -186,8 +186,8 @@ async def user_cancelled_creating_jobs(user, remaining) -> AsyncIterator[Dict[st ''' SELECT batches.id FROM batches -INNER JOIN batches_cancelled - ON batches.id = batches_cancelled.id +INNER JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE user = %s AND `state` = 'running'; ''', (user,), @@ -283,8 +283,8 @@ async def user_cancelled_running_jobs(user, remaining) -> AsyncIterator[Dict[str ''' SELECT batches.id FROM batches -INNER JOIN batches_cancelled - ON batches.id = batches_cancelled.id +INNER JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE user = %s AND `state` = 'running'; ''', (user,), diff --git a/batch/batch/driver/instance_collection/job_private.py b/batch/batch/driver/instance_collection/job_private.py index 44a021fa08a..d4800402cbc 100644 --- a/batch/batch/driver/instance_collection/job_private.py +++ b/batch/batch/driver/instance_collection/job_private.py @@ -351,10 +351,10 @@ async def create_instances_loop_body(self): async def user_runnable_jobs(user, remaining) -> AsyncIterator[Dict[str, Any]]: async for batch in self.db.select_and_fetchall( ''' -SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version +SELECT batches.id, job_groups_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version FROM batches -LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE user = %s AND `state` = 'running'; ''', (user,), diff --git a/batch/batch/driver/instance_collection/pool.py b/batch/batch/driver/instance_collection/pool.py index b241822d446..f6f254cc60f 100644 --- a/batch/batch/driver/instance_collection/pool.py +++ b/batch/batch/driver/instance_collection/pool.py @@ -340,8 +340,8 @@ async def regions_to_ready_cores_mcpu_from_estimated_job_queue(self) -> List[Tup SELECT jobs.batch_id, jobs.job_id, cores_mcpu, always_run, n_regions, regions_bits_rep FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled) LEFT JOIN batches ON jobs.batch_id = batches.id - LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id - WHERE user = %s AND batches.`state` = 'running' AND jobs.state = 'Ready' AND NOT always_run AND batches_cancelled.id IS NULL AND inst_coll = %s + LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id + WHERE user = %s AND batches.`state` = 'running' AND jobs.state = 'Ready' AND NOT always_run AND job_groups_cancelled.id IS NULL AND inst_coll = %s ORDER BY jobs.batch_id ASC, jobs.job_id ASC LIMIT {share * self.job_queue_scheduling_window_secs} ) @@ -607,10 +607,10 @@ async def schedule_loop_body(self): async def user_runnable_jobs(user): async for batch in self.db.select_and_fetchall( ''' -SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version +SELECT batches.id, job_groups_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version FROM batches -LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE user = %s AND `state` = 'running'; ''', (user,), diff --git a/batch/batch/driver/job.py b/batch/batch/driver/job.py index 4a7f44021ba..a4b54705e3e 100644 --- a/batch/batch/driver/job.py +++ b/batch/batch/driver/job.py @@ -34,27 +34,27 @@ async def notify_batch_job_complete(db: Database, client_session: httpx.ClientSe SELECT batches.*, cost_t.cost, cost_t.cost_breakdown, - batches_cancelled.id IS NOT NULL AS cancelled, - batches_n_jobs_in_complete_states.n_completed, - batches_n_jobs_in_complete_states.n_succeeded, - batches_n_jobs_in_complete_states.n_failed, - batches_n_jobs_in_complete_states.n_cancelled + job_groups_cancelled.id IS NOT NULL AS cancelled, + job_groups_n_jobs_in_complete_states.n_completed, + job_groups_n_jobs_in_complete_states.n_succeeded, + job_groups_n_jobs_in_complete_states.n_failed, + job_groups_n_jobs_in_complete_states.n_cancelled FROM batches -LEFT JOIN batches_n_jobs_in_complete_states - ON batches.id = batches_n_jobs_in_complete_states.id +LEFT JOIN job_groups_n_jobs_in_complete_states + ON batches.id = job_groups_n_jobs_in_complete_states.id LEFT JOIN LATERAL ( SELECT COALESCE(SUM(`usage` * rate), 0) AS cost, JSON_OBJECTAGG(resources.resource, COALESCE(`usage` * rate, 0)) AS cost_breakdown FROM ( SELECT batch_id, resource_id, CAST(COALESCE(SUM(`usage`), 0) AS SIGNED) AS `usage` - FROM aggregated_batch_resources_v3 - WHERE batches.id = aggregated_batch_resources_v3.batch_id + FROM aggregated_job_group_resources_v3 + WHERE batches.id = aggregated_job_group_resources_v3.batch_id GROUP BY batch_id, resource_id ) AS usage_t LEFT JOIN resources ON usage_t.resource_id = resources.resource_id GROUP BY batch_id ) AS cost_t ON TRUE -LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id WHERE batches.id = %s AND NOT deleted AND callback IS NOT NULL AND batches.`state` = 'complete'; ''', diff --git a/batch/batch/driver/main.py b/batch/batch/driver/main.py index bce80a89131..264aad5076d 100644 --- a/batch/batch/driver/main.py +++ b/batch/batch/driver/main.py @@ -1025,11 +1025,11 @@ async def check(tx): FROM ( SELECT batches.user, jobs.state, jobs.cores_mcpu, jobs.inst_coll, - (jobs.always_run OR NOT (jobs.cancelled OR batches_cancelled.id IS NOT NULL)) AS runnable, - (NOT jobs.always_run AND (jobs.cancelled OR batches_cancelled.id IS NOT NULL)) AS cancelled + (jobs.always_run OR NOT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL)) AS runnable, + (NOT jobs.always_run AND (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL)) AS cancelled FROM batches INNER JOIN jobs ON batches.id = jobs.batch_id - LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id + LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id WHERE batches.`state` = 'running' ) as v GROUP BY user, inst_coll @@ -1144,7 +1144,7 @@ async def check(tx): SELECT batch_id, billing_project, JSON_OBJECTAGG(resource, `usage`) as resources FROM ( SELECT batch_id, resource_id, CAST(COALESCE(SUM(`usage`), 0) AS SIGNED) AS `usage` - FROM aggregated_batch_resources_v3 + FROM aggregated_job_group_resources_v3 GROUP BY batch_id, resource_id) AS t LEFT JOIN resources ON t.resource_id = resources.resource_id JOIN batches ON batches.id = t.batch_id @@ -1251,10 +1251,10 @@ async def cancel_fast_failing_batches(app): records = db.select_and_fetchall( ''' -SELECT batches.id, batches_n_jobs_in_complete_states.n_failed +SELECT batches.id, job_groups_n_jobs_in_complete_states.n_failed FROM batches -LEFT JOIN batches_n_jobs_in_complete_states - ON batches.id = batches_n_jobs_in_complete_states.id +LEFT JOIN job_groups_n_jobs_in_complete_states + ON batches.id = job_groups_n_jobs_in_complete_states.id WHERE state = 'running' AND cancel_after_n_failures IS NOT NULL AND n_failed >= cancel_after_n_failures ''' ) diff --git a/batch/batch/front_end/front_end.py b/batch/batch/front_end/front_end.py index 960de5d193c..2d29be2901e 100644 --- a/batch/batch/front_end/front_end.py +++ b/batch/batch/front_end/front_end.py @@ -1099,7 +1099,7 @@ async def insert_jobs_into_db(tx): query_name='insert_jobs_telemetry', ) - batches_inst_coll_staging_args = [ + job_groups_inst_coll_staging_args = [ ( batch_id, update_id, @@ -1114,18 +1114,18 @@ async def insert_jobs_into_db(tx): ] await tx.execute_many( ''' -INSERT INTO batches_inst_coll_staging (batch_id, update_id, job_group_id, inst_coll, token, n_jobs, n_ready_jobs, ready_cores_mcpu) +INSERT INTO job_groups_inst_coll_staging (batch_id, update_id, job_group_id, inst_coll, token, n_jobs, n_ready_jobs, ready_cores_mcpu) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE n_jobs = n_jobs + VALUES(n_jobs), n_ready_jobs = n_ready_jobs + VALUES(n_ready_jobs), ready_cores_mcpu = ready_cores_mcpu + VALUES(ready_cores_mcpu); ''', - batches_inst_coll_staging_args, - query_name='insert_batches_inst_coll_staging', + job_groups_inst_coll_staging_args, + query_name='insert_job_groups_inst_coll_staging', ) - batch_inst_coll_cancellable_resources_args = [ + job_group_inst_coll_cancellable_resources_args = [ ( batch_id, update_id, @@ -1139,13 +1139,13 @@ async def insert_jobs_into_db(tx): ] await tx.execute_many( ''' -INSERT INTO batch_inst_coll_cancellable_resources (batch_id, update_id, job_group_id, inst_coll, token, n_ready_cancellable_jobs, ready_cancellable_cores_mcpu) +INSERT INTO job_group_inst_coll_cancellable_resources (batch_id, update_id, job_group_id, inst_coll, token, n_ready_cancellable_jobs, ready_cancellable_cores_mcpu) VALUES (%s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE n_ready_cancellable_jobs = n_ready_cancellable_jobs + VALUES(n_ready_cancellable_jobs), ready_cancellable_cores_mcpu = ready_cancellable_cores_mcpu + VALUES(ready_cancellable_cores_mcpu); ''', - batch_inst_coll_cancellable_resources_args, + job_group_inst_coll_cancellable_resources_args, query_name='insert_inst_coll_cancellable_resources', ) @@ -1354,20 +1354,20 @@ async def insert(tx): await tx.execute_insertone( ''' -INSERT INTO batches_n_jobs_in_complete_states (id, job_group_id) VALUES (%s, %s); +INSERT INTO job_groups_n_jobs_in_complete_states (id, job_group_id) VALUES (%s, %s); ''', (id, ROOT_JOB_GROUP_ID), - query_name='insert_batches_n_jobs_in_complete_states', + query_name='insert_job_groups_n_jobs_in_complete_states', ) if attributes: await tx.execute_many( ''' -INSERT INTO `batch_attributes` (batch_id, job_group_id, `key`, `value`) +INSERT INTO `job_group_attributes` (batch_id, job_group_id, `key`, `value`) VALUES (%s, %s, %s, %s) ''', [(id, ROOT_JOB_GROUP_ID, k, v) for k, v in attributes.items()], - query_name='insert_batch_attributes', + query_name='insert_job_group_attributes', ) return id @@ -1454,9 +1454,9 @@ async def update(tx: Transaction): # but do allow updates to batches with jobs that have been cancelled. record = await tx.execute_and_fetchone( ''' -SELECT batches_cancelled.id IS NOT NULL AS cancelled +SELECT job_groups_cancelled.id IS NOT NULL AS cancelled FROM batches -LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id WHERE batches.id = %s AND user = %s AND NOT deleted FOR UPDATE; ''', @@ -1506,23 +1506,23 @@ async def _get_batch(app, batch_id): record = await db.select_and_fetchone( ''' SELECT batches.*, - batches_cancelled.id IS NOT NULL AS cancelled, - batches_n_jobs_in_complete_states.n_completed, - batches_n_jobs_in_complete_states.n_succeeded, - batches_n_jobs_in_complete_states.n_failed, - batches_n_jobs_in_complete_states.n_cancelled, + job_groups_cancelled.id IS NOT NULL AS cancelled, + job_groups_n_jobs_in_complete_states.n_completed, + job_groups_n_jobs_in_complete_states.n_succeeded, + job_groups_n_jobs_in_complete_states.n_failed, + job_groups_n_jobs_in_complete_states.n_cancelled, cost_t.* FROM batches -LEFT JOIN batches_n_jobs_in_complete_states - ON batches.id = batches_n_jobs_in_complete_states.id -LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_n_jobs_in_complete_states + ON batches.id = job_groups_n_jobs_in_complete_states.id +LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id LEFT JOIN LATERAL ( SELECT COALESCE(SUM(`usage` * rate), 0) AS cost, JSON_OBJECTAGG(resources.resource, COALESCE(`usage` * rate, 0)) AS cost_breakdown FROM ( SELECT batch_id, resource_id, CAST(COALESCE(SUM(`usage`), 0) AS SIGNED) AS `usage` - FROM aggregated_batch_resources_v3 - WHERE batches.id = aggregated_batch_resources_v3.batch_id + FROM aggregated_job_group_resources_v3 + WHERE batches.id = aggregated_job_group_resources_v3.batch_id GROUP BY batch_id, resource_id ) AS usage_t LEFT JOIN resources ON usage_t.resource_id = resources.resource_id @@ -1592,9 +1592,9 @@ async def close_batch(request, userdata): record = await db.select_and_fetchone( ''' -SELECT batches_cancelled.id IS NOT NULL AS cancelled +SELECT job_groups_cancelled.id IS NOT NULL AS cancelled FROM batches -LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id WHERE user = %s AND batches.id = %s AND NOT deleted; ''', (user, batch_id), @@ -1629,10 +1629,10 @@ async def commit_update(request: web.Request, userdata): record = await db.select_and_fetchone( ''' -SELECT start_job_id, batches_cancelled.id IS NOT NULL AS cancelled +SELECT start_job_id, job_groups_cancelled.id IS NOT NULL AS cancelled FROM batches LEFT JOIN batch_updates ON batches.id = batch_updates.batch_id -LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id WHERE user = %s AND batches.id = %s AND batch_updates.update_id = %s AND NOT deleted; ''', (user, batch_id, update_id), diff --git a/batch/batch/front_end/query/query.py b/batch/batch/front_end/query/query.py index feaaf38e347..c28d804e9f0 100644 --- a/batch/batch/front_end/query/query.py +++ b/batch/batch/front_end/query/query.py @@ -373,7 +373,7 @@ def query(self) -> Tuple[str, List[Any]]: condition = "(`state` = 'running')" args = [] elif self.state == BatchState.CANCELLED: - condition = '(batches_cancelled.id IS NOT NULL)' + condition = '(job_groups_cancelled.id IS NOT NULL)' args = [] elif self.state == BatchState.FAILURE: condition = '(n_failed > 0)' @@ -457,7 +457,7 @@ def __init__(self, term: str): def query(self) -> Tuple[str, List[str]]: sql = ''' ((batches.id) IN - (SELECT batch_id FROM batch_attributes + (SELECT batch_id FROM job_group_attributes WHERE `key` = %s OR `value` = %s)) ''' return (sql, [self.term, self.term]) @@ -480,7 +480,7 @@ def __init__(self, term: str): def query(self) -> Tuple[str, List[str]]: sql = ''' ((batches.id) IN - (SELECT batch_id FROM batch_attributes + (SELECT batch_id FROM job_group_attributes WHERE `key` LIKE %s OR `value` LIKE %s)) ''' escaped_term = f'%{self.term}%' @@ -507,7 +507,7 @@ def query(self) -> Tuple[str, List[str]]: value = f'%{value}%' sql = f''' ((batches.id) IN - (SELECT batch_id FROM batch_attributes + (SELECT batch_id FROM job_group_attributes WHERE `key` = %s AND `value` {op} %s)) ''' return (sql, [self.key, value]) diff --git a/batch/batch/front_end/query/query_v1.py b/batch/batch/front_end/query/query_v1.py index 675287fd72b..a52b1cf2c25 100644 --- a/batch/batch/front_end/query/query_v1.py +++ b/batch/batch/front_end/query/query_v1.py @@ -28,7 +28,7 @@ def parse_list_batches_query_v1(user: str, q: str, last_batch_id: Optional[int]) k, v = t.split('=', 1) condition = ''' ((batches.id) IN - (SELECT batch_id FROM batch_attributes + (SELECT batch_id FROM job_group_attributes WHERE `key` = %s AND `value` = %s)) ''' args = [k, v] @@ -36,7 +36,7 @@ def parse_list_batches_query_v1(user: str, q: str, last_batch_id: Optional[int]) k = t[4:] condition = ''' ((batches.id) IN - (SELECT batch_id FROM batch_attributes + (SELECT batch_id FROM job_group_attributes WHERE `key` = %s)) ''' args = [k] @@ -65,7 +65,7 @@ def parse_list_batches_query_v1(user: str, q: str, last_batch_id: Optional[int]) condition = "(`state` = 'running')" args = [] elif t == 'cancelled': - condition = '(batches_cancelled.id IS NOT NULL)' + condition = '(job_groups_cancelled.id IS NOT NULL)' args = [] elif t == 'failure': condition = '(n_failed > 0)' @@ -86,17 +86,17 @@ def parse_list_batches_query_v1(user: str, q: str, last_batch_id: Optional[int]) sql = f''' WITH base_t AS ( SELECT batches.*, - batches_cancelled.id IS NOT NULL AS cancelled, - batches_n_jobs_in_complete_states.n_completed, - batches_n_jobs_in_complete_states.n_succeeded, - batches_n_jobs_in_complete_states.n_failed, - batches_n_jobs_in_complete_states.n_cancelled + job_groups_cancelled.id IS NOT NULL AS cancelled, + job_groups_n_jobs_in_complete_states.n_completed, + job_groups_n_jobs_in_complete_states.n_succeeded, + job_groups_n_jobs_in_complete_states.n_failed, + job_groups_n_jobs_in_complete_states.n_cancelled FROM batches LEFT JOIN billing_projects ON batches.billing_project = billing_projects.name - LEFT JOIN batches_n_jobs_in_complete_states - ON batches.id = batches_n_jobs_in_complete_states.id - LEFT JOIN batches_cancelled - ON batches.id = batches_cancelled.id + LEFT JOIN job_groups_n_jobs_in_complete_states + ON batches.id = job_groups_n_jobs_in_complete_states.id + LEFT JOIN job_groups_cancelled + ON batches.id = job_groups_cancelled.id STRAIGHT_JOIN billing_project_users ON batches.billing_project = billing_project_users.billing_project WHERE {' AND '.join(where_conditions)} ORDER BY id DESC @@ -108,8 +108,8 @@ def parse_list_batches_query_v1(user: str, q: str, last_batch_id: Optional[int]) SELECT COALESCE(SUM(`usage` * rate), 0) AS cost, JSON_OBJECTAGG(resources.resource, COALESCE(`usage` * rate, 0)) AS cost_breakdown FROM ( SELECT batch_id, resource_id, CAST(COALESCE(SUM(`usage`), 0) AS SIGNED) AS `usage` - FROM aggregated_batch_resources_v3 - WHERE base_t.id = aggregated_batch_resources_v3.batch_id + FROM aggregated_job_group_resources_v3 + WHERE base_t.id = aggregated_job_group_resources_v3.batch_id GROUP BY batch_id, resource_id ) AS usage_t LEFT JOIN resources ON usage_t.resource_id = resources.resource_id diff --git a/batch/batch/front_end/query/query_v2.py b/batch/batch/front_end/query/query_v2.py index ad7ba171a4c..ad2df661ff8 100644 --- a/batch/batch/front_end/query/query_v2.py +++ b/batch/batch/front_end/query/query_v2.py @@ -128,23 +128,23 @@ def parse_list_batches_query_v2(user: str, q: str, last_batch_id: Optional[int]) sql = f''' SELECT batches.*, - batches_cancelled.id IS NOT NULL AS cancelled, - batches_n_jobs_in_complete_states.n_completed, - batches_n_jobs_in_complete_states.n_succeeded, - batches_n_jobs_in_complete_states.n_failed, - batches_n_jobs_in_complete_states.n_cancelled, + job_groups_cancelled.id IS NOT NULL AS cancelled, + job_groups_n_jobs_in_complete_states.n_completed, + job_groups_n_jobs_in_complete_states.n_succeeded, + job_groups_n_jobs_in_complete_states.n_failed, + job_groups_n_jobs_in_complete_states.n_cancelled, cost_t.cost, cost_t.cost_breakdown FROM batches LEFT JOIN billing_projects ON batches.billing_project = billing_projects.name -LEFT JOIN batches_n_jobs_in_complete_states ON batches.id = batches_n_jobs_in_complete_states.id -LEFT JOIN batches_cancelled ON batches.id = batches_cancelled.id +LEFT JOIN job_groups_n_jobs_in_complete_states ON batches.id = job_groups_n_jobs_in_complete_states.id +LEFT JOIN job_groups_cancelled ON batches.id = job_groups_cancelled.id STRAIGHT_JOIN billing_project_users ON batches.billing_project = billing_project_users.billing_project LEFT JOIN LATERAL ( SELECT COALESCE(SUM(`usage` * rate), 0) AS cost, JSON_OBJECTAGG(resources.resource, COALESCE(`usage` * rate, 0)) AS cost_breakdown FROM ( SELECT batch_id, resource_id, CAST(COALESCE(SUM(`usage`), 0) AS SIGNED) AS `usage` - FROM aggregated_batch_resources_v3 - WHERE batches.id = aggregated_batch_resources_v3.batch_id + FROM aggregated_job_group_resources_v3 + WHERE batches.id = aggregated_job_group_resources_v3.batch_id GROUP BY batch_id, resource_id ) AS usage_t LEFT JOIN resources ON usage_t.resource_id = resources.resource_id diff --git a/batch/sql/estimated-current.sql b/batch/sql/estimated-current.sql index 6012fb309c1..74aa7ea114c 100644 --- a/batch/sql/estimated-current.sql +++ b/batch/sql/estimated-current.sql @@ -239,7 +239,7 @@ CREATE TABLE IF NOT EXISTS `batch_updates` ( CREATE INDEX `batch_updates_committed` ON `batch_updates` (`batch_id`, `committed`); CREATE INDEX `batch_updates_start_job_id` ON `batch_updates` (`batch_id`, `start_job_id`); -CREATE TABLE IF NOT EXISTS `batches_n_jobs_in_complete_states` ( +CREATE TABLE IF NOT EXISTS `job_groups_n_jobs_in_complete_states` ( `id` BIGINT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, `n_completed` INT NOT NULL DEFAULT 0, @@ -250,14 +250,14 @@ CREATE TABLE IF NOT EXISTS `batches_n_jobs_in_complete_states` ( FOREIGN KEY (`id`) REFERENCES batches(id) ON DELETE CASCADE ) ENGINE = InnoDB; -CREATE TABLE IF NOT EXISTS `batches_cancelled` ( +CREATE TABLE IF NOT EXISTS `job_groups_cancelled` ( `id` BIGINT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, PRIMARY KEY (`id`), FOREIGN KEY (`id`) REFERENCES batches(id) ON DELETE CASCADE ) ENGINE = InnoDB; -CREATE TABLE IF NOT EXISTS `batches_inst_coll_staging` ( +CREATE TABLE IF NOT EXISTS `job_groups_inst_coll_staging` ( `batch_id` BIGINT NOT NULL, `update_id` INT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, @@ -271,10 +271,10 @@ CREATE TABLE IF NOT EXISTS `batches_inst_coll_staging` ( FOREIGN KEY (`batch_id`, `update_id`) REFERENCES batch_updates (`batch_id`, `update_id`) ON DELETE CASCADE, FOREIGN KEY (`inst_coll`) REFERENCES inst_colls(name) ON DELETE CASCADE ) ENGINE = InnoDB; -CREATE INDEX `batches_inst_coll_staging_inst_coll` ON `batches_inst_coll_staging` (`inst_coll`); -CREATE INDEX batches_inst_coll_staging_batch_id_jg_id ON batches_inst_coll_staging (`batch_id`, `job_group_id`); +CREATE INDEX job_groups_inst_coll_staging_inst_coll ON job_groups_inst_coll_staging (`inst_coll`); +CREATE INDEX job_groups_inst_coll_staging_batch_id_jg_id ON job_groups_inst_coll_staging (`batch_id`, `job_group_id`); -CREATE TABLE `batch_inst_coll_cancellable_resources` ( +CREATE TABLE `job_group_inst_coll_cancellable_resources` ( `batch_id` BIGINT NOT NULL, `update_id` INT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, @@ -291,8 +291,8 @@ CREATE TABLE `batch_inst_coll_cancellable_resources` ( FOREIGN KEY (`batch_id`, `update_id`) REFERENCES batch_updates (`batch_id`, `update_id`) ON DELETE CASCADE, FOREIGN KEY (`inst_coll`) REFERENCES inst_colls(name) ON DELETE CASCADE ) ENGINE = InnoDB; -CREATE INDEX `batch_inst_coll_cancellable_resources_inst_coll` ON `batch_inst_coll_cancellable_resources` (`inst_coll`); -CREATE INDEX batch_inst_coll_cancellable_resources_jg_id ON `batch_inst_coll_cancellable_resources` (`batch_id`, `job_group_id`); +CREATE INDEX `job_group_inst_coll_cancellable_resources_inst_coll` ON `job_group_inst_coll_cancellable_resources` (`inst_coll`); +CREATE INDEX job_group_inst_coll_cancellable_resources_jg_id ON `job_group_inst_coll_cancellable_resources` (`batch_id`, `job_group_id`); CREATE TABLE IF NOT EXISTS `jobs` ( `batch_id` BIGINT NOT NULL, @@ -393,7 +393,7 @@ CREATE TABLE IF NOT EXISTS `regions` ( UNIQUE(region) ) ENGINE = InnoDB; -CREATE TABLE IF NOT EXISTS `batch_attributes` ( +CREATE TABLE IF NOT EXISTS `job_group_attributes` ( `batch_id` BIGINT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, `key` VARCHAR(100) NOT NULL, @@ -401,10 +401,10 @@ CREATE TABLE IF NOT EXISTS `batch_attributes` ( PRIMARY KEY (`batch_id`, `key`), FOREIGN KEY (`batch_id`) REFERENCES batches(id) ON DELETE CASCADE ) ENGINE = InnoDB; -CREATE INDEX batch_attributes_key_value ON `batch_attributes` (`key`, `value`(256)); -CREATE INDEX batch_attributes_value ON `batch_attributes` (`value`(256)); -CREATE INDEX batch_attributes_batch_id_key_value ON `batch_attributes` (`batch_id`, `job_group_id`, `key`, `value`(256)); -CREATE INDEX batch_attributes_batch_id_value ON `batch_attributes` (`batch_id`, `job_group_id`, `value`(256)); +CREATE INDEX job_group_attributes_key_value ON `job_group_attributes` (`key`, `value`(256)); +CREATE INDEX job_group_attributes_value ON `job_group_attributes` (`value`(256)); +CREATE INDEX job_group_attributes_batch_id_key_value ON `job_group_attributes` (`batch_id`, `job_group_id`, `key`, `value`(256)); +CREATE INDEX job_group_attributes_batch_id_value ON `job_group_attributes` (`batch_id`, `job_group_id`, `value`(256)); DROP TABLE IF EXISTS `aggregated_billing_project_user_resources_v2`; CREATE TABLE IF NOT EXISTS `aggregated_billing_project_user_resources_v2` ( @@ -435,8 +435,8 @@ CREATE TABLE IF NOT EXISTS `aggregated_billing_project_user_resources_by_date_v2 ) ENGINE = InnoDB; CREATE INDEX aggregated_billing_project_user_resources_by_date_v2_user ON `aggregated_billing_project_user_resources_by_date_v2` (`billing_date`, `user`); -DROP TABLE IF EXISTS `aggregated_batch_resources_v2`; -CREATE TABLE IF NOT EXISTS `aggregated_batch_resources_v2` ( +DROP TABLE IF EXISTS `aggregated_job_group_resources_v2`; +CREATE TABLE IF NOT EXISTS `aggregated_job_group_resources_v2` ( `batch_id` BIGINT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, `resource_id` INT NOT NULL, @@ -488,7 +488,7 @@ CREATE TABLE IF NOT EXISTS `aggregated_billing_project_user_resources_by_date_v3 CREATE INDEX aggregated_billing_project_user_resources_by_date_v3_user ON `aggregated_billing_project_user_resources_by_date_v3` (`billing_date`, `user`); CREATE INDEX aggregated_billing_project_user_resources_by_date_v3_token ON `aggregated_billing_project_user_resources_by_date_v3` (`token`); -CREATE TABLE IF NOT EXISTS `aggregated_batch_resources_v3` ( +CREATE TABLE IF NOT EXISTS `aggregated_job_group_resources_v3` ( `batch_id` BIGINT NOT NULL, `job_group_id` INT NOT NULL DEFAULT 0, `resource_id` INT NOT NULL, @@ -620,7 +620,7 @@ BEGIN WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 ON DUPLICATE KEY UPDATE `usage` = aggregated_billing_project_user_resources_v3.`usage` + msec_diff_rollup * quantity; - INSERT INTO aggregated_batch_resources_v2 (batch_id, resource_id, token, `usage`) + INSERT INTO aggregated_job_group_resources_v2 (batch_id, resource_id, token, `usage`) SELECT batch_id, resource_id, rand_token, @@ -629,18 +629,18 @@ BEGIN WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id ON DUPLICATE KEY UPDATE `usage` = `usage` + msec_diff_rollup * quantity; - INSERT INTO aggregated_batch_resources_v3 (batch_id, resource_id, token, `usage`) + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) SELECT attempt_resources.batch_id, attempt_resources.deduped_resource_id, rand_token, msec_diff_rollup * quantity FROM attempt_resources - JOIN aggregated_batch_resources_v2 ON - aggregated_batch_resources_v2.batch_id = attempt_resources.batch_id AND - aggregated_batch_resources_v2.resource_id = attempt_resources.resource_id AND - aggregated_batch_resources_v2.token = rand_token + JOIN aggregated_job_group_resources_v2 ON + aggregated_job_group_resources_v2.batch_id = attempt_resources.batch_id AND + aggregated_job_group_resources_v2.resource_id = attempt_resources.resource_id AND + aggregated_job_group_resources_v2.token = rand_token WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 - ON DUPLICATE KEY UPDATE `usage` = aggregated_batch_resources_v3.`usage` + msec_diff_rollup * quantity; + ON DUPLICATE KEY UPDATE `usage` = aggregated_job_group_resources_v3.`usage` + msec_diff_rollup * quantity; INSERT INTO aggregated_job_resources_v2 (batch_id, job_id, resource_id, `usage`) SELECT batch_id, job_id, @@ -759,7 +759,7 @@ BEGIN SELECT user INTO cur_user FROM batches WHERE id = NEW.batch_id; SET cur_batch_cancelled = EXISTS (SELECT TRUE - FROM batches_cancelled + FROM job_groups_cancelled WHERE id = NEW.batch_id LOCK IN SHARE MODE); @@ -804,7 +804,7 @@ BEGIN SET delta_running_cancellable_cores_mcpu = delta_n_running_cancellable_jobs * cores_mcpu; SET delta_running_cores_mcpu = delta_n_running_jobs * cores_mcpu; - INSERT INTO batch_inst_coll_cancellable_resources (batch_id, update_id, inst_coll, token, + INSERT INTO job_group_inst_coll_cancellable_resources (batch_id, update_id, inst_coll, token, n_ready_cancellable_jobs, ready_cancellable_cores_mcpu, n_creating_cancellable_jobs, @@ -904,18 +904,18 @@ BEGIN `usage` = `usage` + NEW.quantity * msec_diff_rollup; END IF; - INSERT INTO aggregated_batch_resources_v2 (batch_id, resource_id, token, `usage`) + INSERT INTO aggregated_job_group_resources_v2 (batch_id, resource_id, token, `usage`) VALUES (NEW.batch_id, NEW.resource_id, rand_token, NEW.quantity * msec_diff_rollup) ON DUPLICATE KEY UPDATE `usage` = `usage` + NEW.quantity * msec_diff_rollup; SELECT migrated INTO batch_resources_migrated - FROM aggregated_batch_resources_v2 + FROM aggregated_job_group_resources_v2 WHERE batch_id = NEW.batch_id AND resource_id = NEW.resource_id AND token = rand_token FOR UPDATE; IF batch_resources_migrated THEN - INSERT INTO aggregated_batch_resources_v3 (batch_id, resource_id, token, `usage`) + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) VALUES (NEW.batch_id, NEW.deduped_resource_id, rand_token, NEW.quantity * msec_diff_rollup) ON DUPLICATE KEY UPDATE `usage` = `usage` + NEW.quantity * msec_diff_rollup; @@ -972,8 +972,8 @@ BEGIN SET NEW.migrated = 1; END $$ -DROP TRIGGER IF EXISTS aggregated_batch_resources_v2_before_insert $$ -CREATE TRIGGER aggregated_batch_resources_v2_before_insert BEFORE INSERT on aggregated_batch_resources_v2 +DROP TRIGGER IF EXISTS aggregated_job_group_resources_v2_before_insert $$ +CREATE TRIGGER aggregated_job_group_resources_v2_before_insert BEFORE INSERT on aggregated_job_group_resources_v2 FOR EACH ROW BEGIN SET NEW.migrated = 1; @@ -1018,8 +1018,8 @@ BEGIN END IF; END $$ -DROP TRIGGER IF EXISTS aggregated_batch_resources_v2_after_update $$ -CREATE TRIGGER aggregated_batch_resources_v2_after_update AFTER UPDATE ON aggregated_batch_resources_v2 +DROP TRIGGER IF EXISTS aggregated_job_group_resources_v2_after_update $$ +CREATE TRIGGER aggregated_job_group_resources_v2_after_update AFTER UPDATE ON aggregated_job_group_resources_v2 FOR EACH ROW BEGIN DECLARE new_deduped_resource_id INT; @@ -1027,7 +1027,7 @@ BEGIN IF OLD.migrated = 0 AND NEW.migrated = 1 THEN SELECT deduped_resource_id INTO new_deduped_resource_id FROM resources WHERE resource_id = OLD.resource_id; - INSERT INTO aggregated_batch_resources_v3 (batch_id, resource_id, token, `usage`) + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) VALUES (NEW.batch_id, new_deduped_resource_id, NEW.token, NEW.usage) ON DUPLICATE KEY UPDATE `usage` = `usage` + NEW.usage; @@ -1165,7 +1165,7 @@ BEGIN SELECT 0 as rc; ELSE SELECT COALESCE(SUM(n_jobs), 0) INTO staging_n_jobs - FROM batches_inst_coll_staging + FROM job_groups_inst_coll_staging WHERE batch_id = in_batch_id AND update_id = in_update_id AND job_group_id = 0 FOR UPDATE; @@ -1184,7 +1184,7 @@ BEGIN UPDATE job_groups INNER JOIN ( SELECT batch_id, job_group_id, CAST(COALESCE(SUM(n_jobs), 0) AS SIGNED) AS staged_n_jobs - FROM batches_inst_coll_staging + FROM job_groups_inst_coll_staging WHERE batch_id = in_batch_id AND update_id = in_update_id GROUP BY batch_id, job_group_id ) AS t ON job_groups.batch_id = t.batch_id AND job_groups.job_group_id = t.job_group_id @@ -1193,15 +1193,15 @@ BEGIN # compute global number of new ready jobs from root job group INSERT INTO user_inst_coll_resources (user, inst_coll, token, n_ready_jobs, ready_cores_mcpu) SELECT user, inst_coll, 0, @n_ready_jobs := COALESCE(SUM(n_ready_jobs), 0), @ready_cores_mcpu := COALESCE(SUM(ready_cores_mcpu), 0) - FROM batches_inst_coll_staging - JOIN batches ON batches.id = batches_inst_coll_staging.batch_id + FROM job_groups_inst_coll_staging + JOIN batches ON batches.id = job_groups_inst_coll_staging.batch_id WHERE batch_id = in_batch_id AND update_id = in_update_id AND job_group_id = 0 GROUP BY `user`, inst_coll ON DUPLICATE KEY UPDATE n_ready_jobs = n_ready_jobs + @n_ready_jobs, ready_cores_mcpu = ready_cores_mcpu + @ready_cores_mcpu; - DELETE FROM batches_inst_coll_staging WHERE batch_id = in_batch_id AND update_id = in_update_id; + DELETE FROM job_groups_inst_coll_staging WHERE batch_id = in_batch_id AND update_id = in_update_id; IF in_update_id != 1 THEN SELECT start_job_id INTO cur_update_start_job_id FROM batch_updates WHERE batch_id = in_batch_id AND update_id = in_update_id; @@ -1261,7 +1261,7 @@ BEGIN FOR UPDATE; SET cur_cancelled = EXISTS (SELECT TRUE - FROM batches_cancelled + FROM job_groups_cancelled WHERE id = in_batch_id FOR UPDATE); @@ -1280,11 +1280,11 @@ BEGIN COALESCE(SUM(n_ready_cancellable_jobs), 0), COALESCE(SUM(n_running_cancellable_jobs), 0), COALESCE(SUM(n_creating_cancellable_jobs), 0) - FROM batch_inst_coll_cancellable_resources - JOIN batches ON batches.id = batch_inst_coll_cancellable_resources.batch_id - INNER JOIN batch_updates ON batch_inst_coll_cancellable_resources.batch_id = batch_updates.batch_id AND - batch_inst_coll_cancellable_resources.update_id = batch_updates.update_id - WHERE batch_inst_coll_cancellable_resources.batch_id = in_batch_id AND batch_updates.committed + FROM job_group_inst_coll_cancellable_resources + JOIN batches ON batches.id = job_group_inst_coll_cancellable_resources.batch_id + INNER JOIN batch_updates ON job_group_inst_coll_cancellable_resources.batch_id = batch_updates.batch_id AND + job_group_inst_coll_cancellable_resources.update_id = batch_updates.update_id + WHERE job_group_inst_coll_cancellable_resources.batch_id = in_batch_id AND batch_updates.committed GROUP BY user, inst_coll ON DUPLICATE KEY UPDATE n_ready_jobs = n_ready_jobs - @n_ready_cancellable_jobs, @@ -1297,10 +1297,10 @@ BEGIN n_cancelled_creating_jobs = n_cancelled_creating_jobs + @n_creating_cancellable_jobs; # there are no cancellable jobs left, they have been cancelled - DELETE FROM batch_inst_coll_cancellable_resources WHERE batch_id = in_batch_id; + DELETE FROM job_group_inst_coll_cancellable_resources WHERE batch_id = in_batch_id; # cancel root job group only - INSERT INTO batches_cancelled (id, job_group_id) VALUES (in_batch_id, 0); + INSERT INTO job_groups_cancelled (id, job_group_id) VALUES (in_batch_id, 0); END IF; COMMIT; @@ -1371,10 +1371,10 @@ BEGIN WHERE batch_id = in_batch_id AND job_id = in_job_id FOR UPDATE; - SELECT (jobs.cancelled OR batches_cancelled.id IS NOT NULL) AND NOT jobs.always_run + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run INTO cur_job_cancel FROM jobs - LEFT JOIN batches_cancelled ON batches_cancelled.id = jobs.batch_id + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id WHERE batch_id = in_batch_id AND job_id = in_job_id LOCK IN SHARE MODE; @@ -1491,10 +1491,10 @@ BEGIN WHERE batch_id = in_batch_id AND job_id = in_job_id FOR UPDATE; - SELECT (jobs.cancelled OR batches_cancelled.id IS NOT NULL) AND NOT jobs.always_run + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run INTO cur_job_cancel FROM jobs - LEFT JOIN batches_cancelled ON batches_cancelled.id = jobs.batch_id + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id WHERE batch_id = in_batch_id AND job_id = in_job_id LOCK IN SHARE MODE; @@ -1536,10 +1536,10 @@ BEGIN WHERE batch_id = in_batch_id AND job_id = in_job_id FOR UPDATE; - SELECT (jobs.cancelled OR batches_cancelled.id IS NOT NULL) AND NOT jobs.always_run + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run INTO cur_job_cancel FROM jobs - LEFT JOIN batches_cancelled ON batches_cancelled.id = jobs.batch_id + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id WHERE batch_id = in_batch_id AND job_id = in_job_id LOCK IN SHARE MODE; @@ -1594,7 +1594,7 @@ BEGIN LOCK IN SHARE MODE; SELECT n_completed INTO cur_n_completed - FROM batches_n_jobs_in_complete_states + FROM job_groups_n_jobs_in_complete_states WHERE id = in_batch_id AND job_group_id = cursor_job_group_id LOCK IN SHARE MODE; @@ -1679,7 +1679,7 @@ BEGIN # update only the record for the root job group # backwards compatibility for job groups that do not exist - UPDATE batches_n_jobs_in_complete_states + UPDATE job_groups_n_jobs_in_complete_states SET n_completed = (@new_n_completed := n_completed + 1), n_cancelled = n_cancelled + (new_state = 'Cancelled'), n_failed = n_failed + (new_state = 'Error' OR new_state = 'Failed'), @@ -1697,13 +1697,13 @@ BEGIN # update the rest of the non-root job groups if they exist # necessary for backwards compatibility - UPDATE batches_n_jobs_in_complete_states + UPDATE job_groups_n_jobs_in_complete_states INNER JOIN ( SELECT batch_id, ancestor_id FROM job_group_self_and_ancestors WHERE batch_id = in_batch_id AND job_group_id = cur_job_group_id AND job_group_id != 0 ORDER BY job_group_id ASC - ) AS t ON batches_n_jobs_in_complete_states.id = t.batch_id AND batches_n_jobs_in_complete_states.job_group_id = t.ancestor_id + ) AS t ON job_groups_n_jobs_in_complete_states.id = t.batch_id AND job_groups_n_jobs_in_complete_states.job_group_id = t.ancestor_id SET n_completed = n_completed + 1, n_cancelled = n_cancelled + (new_state = 'Cancelled'), n_failed = n_failed + (new_state = 'Error' OR new_state = 'Failed'), diff --git a/batch/sql/rename-job-groups-tables.sql b/batch/sql/rename-job-groups-tables.sql new file mode 100644 index 00000000000..f13602c2878 --- /dev/null +++ b/batch/sql/rename-job-groups-tables.sql @@ -0,0 +1,924 @@ +RENAME TABLE batch_attributes TO job_group_attributes, + batches_cancelled TO job_groups_cancelled, + batches_inst_coll_staging TO job_groups_inst_coll_staging, + batch_inst_coll_cancellable_resources TO job_group_inst_coll_cancellable_resources, + aggregated_batch_resources_v2 TO aggregated_job_group_resources_v2, + aggregated_batch_resources_v3 TO aggregated_job_group_resources_v3, + batches_n_jobs_in_complete_states TO job_groups_n_jobs_in_complete_states; + +ALTER TABLE job_group_attributes RENAME INDEX batch_attributes_key_value TO job_group_attributes_key_value; +ALTER TABLE job_group_attributes RENAME INDEX batch_attributes_value TO job_group_attributes_value; +ALTER TABLE job_group_attributes RENAME INDEX batch_attributes_batch_id_key_value TO job_group_attributes_batch_id_key_value; +ALTER TABLE job_group_attributes RENAME INDEX batch_attributes_batch_id_value TO job_group_attributes_batch_id_value; + +ALTER TABLE job_groups_inst_coll_staging RENAME INDEX batches_inst_coll_staging_inst_coll TO job_groups_inst_coll_staging_inst_coll; +ALTER TABLE job_groups_inst_coll_staging RENAME INDEX batches_inst_coll_staging_batch_id_jg_id TO job_groups_inst_coll_staging_batch_id_jg_id; + +ALTER TABLE job_group_inst_coll_cancellable_resources RENAME INDEX batch_inst_coll_cancellable_resources_inst_coll TO job_group_inst_coll_cancellable_resources_inst_coll; +ALTER TABLE job_group_inst_coll_cancellable_resources RENAME INDEX batch_inst_coll_cancellable_resources_jg_id TO job_group_inst_coll_cancellable_resources_jg_id; + +DELIMITER $$ + +DROP TRIGGER IF EXISTS attempts_after_update $$ +CREATE TRIGGER attempts_after_update AFTER UPDATE ON attempts +FOR EACH ROW +BEGIN + DECLARE job_cores_mcpu INT; + DECLARE cur_billing_project VARCHAR(100); + DECLARE msec_diff_rollup BIGINT; + DECLARE cur_n_tokens INT; + DECLARE rand_token INT; + DECLARE cur_billing_date DATE; + + SELECT n_tokens INTO cur_n_tokens FROM globals LOCK IN SHARE MODE; + SET rand_token = FLOOR(RAND() * cur_n_tokens); + + SELECT cores_mcpu INTO job_cores_mcpu FROM jobs + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id; + + SELECT billing_project INTO cur_billing_project FROM batches WHERE id = NEW.batch_id; + + SET msec_diff_rollup = (GREATEST(COALESCE(NEW.rollup_time - NEW.start_time, 0), 0) - + GREATEST(COALESCE(OLD.rollup_time - OLD.start_time, 0), 0)); + + SET cur_billing_date = CAST(UTC_DATE() AS DATE); + + IF msec_diff_rollup != 0 THEN + INSERT INTO aggregated_billing_project_user_resources_v2 (billing_project, user, resource_id, token, `usage`) + SELECT billing_project, `user`, + resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN batches ON batches.id = attempt_resources.batch_id + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id + ON DUPLICATE KEY UPDATE `usage` = `usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_billing_project_user_resources_v3 (billing_project, user, resource_id, token, `usage`) + SELECT batches.billing_project, batches.`user`, + attempt_resources.deduped_resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN batches ON batches.id = attempt_resources.batch_id + INNER JOIN aggregated_billing_project_user_resources_v2 ON + aggregated_billing_project_user_resources_v2.billing_project = batches.billing_project AND + aggregated_billing_project_user_resources_v2.user = batches.user AND + aggregated_billing_project_user_resources_v2.resource_id = attempt_resources.resource_id AND + aggregated_billing_project_user_resources_v2.token = rand_token + WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 + ON DUPLICATE KEY UPDATE `usage` = aggregated_billing_project_user_resources_v3.`usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_job_group_resources_v2 (batch_id, resource_id, token, `usage`) + SELECT batch_id, + resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id + ON DUPLICATE KEY UPDATE `usage` = `usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) + SELECT attempt_resources.batch_id, + attempt_resources.deduped_resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN aggregated_job_group_resources_v2 ON + aggregated_job_group_resources_v2.batch_id = attempt_resources.batch_id AND + aggregated_job_group_resources_v2.resource_id = attempt_resources.resource_id AND + aggregated_job_group_resources_v2.token = rand_token + WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 + ON DUPLICATE KEY UPDATE `usage` = aggregated_job_group_resources_v3.`usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_job_resources_v2 (batch_id, job_id, resource_id, `usage`) + SELECT batch_id, job_id, + resource_id, + msec_diff_rollup * quantity + FROM attempt_resources + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id + ON DUPLICATE KEY UPDATE `usage` = `usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_job_resources_v3 (batch_id, job_id, resource_id, `usage`) + SELECT attempt_resources.batch_id, attempt_resources.job_id, + attempt_resources.deduped_resource_id, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN aggregated_job_resources_v2 ON + aggregated_job_resources_v2.batch_id = attempt_resources.batch_id AND + aggregated_job_resources_v2.job_id = attempt_resources.job_id AND + aggregated_job_resources_v2.resource_id = attempt_resources.resource_id + WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 + ON DUPLICATE KEY UPDATE `usage` = aggregated_job_resources_v3.`usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_billing_project_user_resources_by_date_v2 (billing_date, billing_project, user, resource_id, token, `usage`) + SELECT cur_billing_date, + billing_project, + `user`, + resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN batches ON batches.id = attempt_resources.batch_id + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id + ON DUPLICATE KEY UPDATE `usage` = `usage` + msec_diff_rollup * quantity; + + INSERT INTO aggregated_billing_project_user_resources_by_date_v3 (billing_date, billing_project, user, resource_id, token, `usage`) + SELECT cur_billing_date, + batches.billing_project, + batches.`user`, + attempt_resources.deduped_resource_id, + rand_token, + msec_diff_rollup * quantity + FROM attempt_resources + JOIN batches ON batches.id = attempt_resources.batch_id + JOIN aggregated_billing_project_user_resources_by_date_v2 ON + aggregated_billing_project_user_resources_by_date_v2.billing_date = cur_billing_date AND + aggregated_billing_project_user_resources_by_date_v2.billing_project = batches.billing_project AND + aggregated_billing_project_user_resources_by_date_v2.user = batches.user AND + aggregated_billing_project_user_resources_by_date_v2.resource_id = attempt_resources.resource_id AND + aggregated_billing_project_user_resources_by_date_v2.token = rand_token + WHERE attempt_resources.batch_id = NEW.batch_id AND attempt_resources.job_id = NEW.job_id AND attempt_id = NEW.attempt_id AND migrated = 1 + ON DUPLICATE KEY UPDATE `usage` = aggregated_billing_project_user_resources_by_date_v3.`usage` + msec_diff_rollup * quantity; + END IF; +END $$ + +DROP TRIGGER IF EXISTS jobs_after_update $$ +CREATE TRIGGER jobs_after_update AFTER UPDATE ON jobs +FOR EACH ROW +BEGIN + DECLARE cur_user VARCHAR(100); + DECLARE cur_batch_cancelled BOOLEAN; + DECLARE cur_n_tokens INT; + DECLARE rand_token INT; + + DECLARE always_run boolean; + DECLARE cores_mcpu bigint; + + DECLARE was_marked_cancelled boolean; + DECLARE was_cancelled boolean; + DECLARE was_cancellable boolean; + + DECLARE now_marked_cancelled boolean; + DECLARE now_cancelled boolean; + DECLARE now_cancellable boolean; + + DECLARE was_ready boolean; + DECLARE now_ready boolean; + + DECLARE was_running boolean; + DECLARE now_running boolean; + + DECLARE was_creating boolean; + DECLARE now_creating boolean; + + DECLARE delta_n_ready_cancellable_jobs int; + DECLARE delta_ready_cancellable_cores_mcpu bigint; + DECLARE delta_n_ready_jobs int; + DECLARE delta_ready_cores_mcpu bigint; + DECLARE delta_n_cancelled_ready_jobs int; + + DECLARE delta_n_running_cancellable_jobs int; + DECLARE delta_running_cancellable_cores_mcpu bigint; + DECLARE delta_n_running_jobs int; + DECLARE delta_running_cores_mcpu bigint; + DECLARE delta_n_cancelled_running_jobs int; + + DECLARE delta_n_creating_cancellable_jobs int; + DECLARE delta_n_creating_jobs int; + DECLARE delta_n_cancelled_creating_jobs int; + + SELECT user INTO cur_user FROM batches WHERE id = NEW.batch_id; + + SET cur_batch_cancelled = EXISTS (SELECT TRUE + FROM job_groups_cancelled + WHERE id = NEW.batch_id + LOCK IN SHARE MODE); + + SELECT n_tokens INTO cur_n_tokens FROM globals LOCK IN SHARE MODE; + SET rand_token = FLOOR(RAND() * cur_n_tokens); + + SET always_run = old.always_run; # always_run is immutable + SET cores_mcpu = old.cores_mcpu; # cores_mcpu is immutable + + SET was_marked_cancelled = old.cancelled OR cur_batch_cancelled; + SET was_cancelled = NOT always_run AND was_marked_cancelled; + SET was_cancellable = NOT always_run AND NOT was_marked_cancelled; + + SET now_marked_cancelled = new.cancelled or cur_batch_cancelled; + SET now_cancelled = NOT always_run AND now_marked_cancelled; + SET now_cancellable = NOT always_run AND NOT now_marked_cancelled; + + # NB: was_cancelled => now_cancelled b/c you cannot be uncancelled + + SET was_ready = old.state = 'Ready'; + SET now_ready = new.state = 'Ready'; + SET was_running = old.state = 'Running'; + SET now_running = new.state = 'Running'; + SET was_creating = old.state = 'Creating'; + SET now_creating = new.state = 'Creating'; + + SET delta_n_ready_cancellable_jobs = (-1 * was_ready * was_cancellable ) + (now_ready * now_cancellable ) ; + SET delta_n_ready_jobs = (-1 * was_ready * (NOT was_cancelled)) + (now_ready * (NOT now_cancelled)); + SET delta_n_cancelled_ready_jobs = (-1 * was_ready * was_cancelled ) + (now_ready * now_cancelled ) ; + + SET delta_n_running_cancellable_jobs = (-1 * was_running * was_cancellable ) + (now_running * now_cancellable ) ; + SET delta_n_running_jobs = (-1 * was_running * (NOT was_cancelled)) + (now_running * (NOT now_cancelled)); + SET delta_n_cancelled_running_jobs = (-1 * was_running * was_cancelled ) + (now_running * now_cancelled ) ; + + SET delta_n_creating_cancellable_jobs = (-1 * was_creating * was_cancellable ) + (now_creating * now_cancellable ) ; + SET delta_n_creating_jobs = (-1 * was_creating * (NOT was_cancelled)) + (now_creating * (NOT now_cancelled)); + SET delta_n_cancelled_creating_jobs = (-1 * was_creating * was_cancelled ) + (now_creating * now_cancelled ) ; + + SET delta_ready_cancellable_cores_mcpu = delta_n_ready_cancellable_jobs * cores_mcpu; + SET delta_ready_cores_mcpu = delta_n_ready_jobs * cores_mcpu; + + SET delta_running_cancellable_cores_mcpu = delta_n_running_cancellable_jobs * cores_mcpu; + SET delta_running_cores_mcpu = delta_n_running_jobs * cores_mcpu; + + INSERT INTO job_group_inst_coll_cancellable_resources (batch_id, update_id, inst_coll, token, + n_ready_cancellable_jobs, + ready_cancellable_cores_mcpu, + n_creating_cancellable_jobs, + n_running_cancellable_jobs, + running_cancellable_cores_mcpu) + VALUES (NEW.batch_id, NEW.update_id, NEW.inst_coll, rand_token, + delta_n_ready_cancellable_jobs, + delta_ready_cancellable_cores_mcpu, + delta_n_creating_cancellable_jobs, + delta_n_running_cancellable_jobs, + delta_running_cancellable_cores_mcpu) + ON DUPLICATE KEY UPDATE + n_ready_cancellable_jobs = n_ready_cancellable_jobs + delta_n_ready_cancellable_jobs, + ready_cancellable_cores_mcpu = ready_cancellable_cores_mcpu + delta_ready_cancellable_cores_mcpu, + n_creating_cancellable_jobs = n_creating_cancellable_jobs + delta_n_creating_cancellable_jobs, + n_running_cancellable_jobs = n_running_cancellable_jobs + delta_n_running_cancellable_jobs, + running_cancellable_cores_mcpu = running_cancellable_cores_mcpu + delta_running_cancellable_cores_mcpu; + + INSERT INTO user_inst_coll_resources (user, inst_coll, token, + n_ready_jobs, + n_running_jobs, + n_creating_jobs, + ready_cores_mcpu, + running_cores_mcpu, + n_cancelled_ready_jobs, + n_cancelled_running_jobs, + n_cancelled_creating_jobs + ) + VALUES (cur_user, NEW.inst_coll, rand_token, + delta_n_ready_jobs, + delta_n_running_jobs, + delta_n_creating_jobs, + delta_ready_cores_mcpu, + delta_running_cores_mcpu, + delta_n_cancelled_ready_jobs, + delta_n_cancelled_running_jobs, + delta_n_cancelled_creating_jobs + ) + ON DUPLICATE KEY UPDATE + n_ready_jobs = n_ready_jobs + delta_n_ready_jobs, + n_running_jobs = n_running_jobs + delta_n_running_jobs, + n_creating_jobs = n_creating_jobs + delta_n_creating_jobs, + ready_cores_mcpu = ready_cores_mcpu + delta_ready_cores_mcpu, + running_cores_mcpu = running_cores_mcpu + delta_running_cores_mcpu, + n_cancelled_ready_jobs = n_cancelled_ready_jobs + delta_n_cancelled_ready_jobs, + n_cancelled_running_jobs = n_cancelled_running_jobs + delta_n_cancelled_running_jobs, + n_cancelled_creating_jobs = n_cancelled_creating_jobs + delta_n_cancelled_creating_jobs; +END $$ + +DROP TRIGGER IF EXISTS attempt_resources_after_insert $$ +CREATE TRIGGER attempt_resources_after_insert AFTER INSERT ON attempt_resources +FOR EACH ROW +BEGIN + DECLARE cur_start_time BIGINT; + DECLARE cur_rollup_time BIGINT; + DECLARE cur_billing_project VARCHAR(100); + DECLARE cur_user VARCHAR(100); + DECLARE msec_diff_rollup BIGINT; + DECLARE cur_n_tokens INT; + DECLARE rand_token INT; + DECLARE cur_billing_date DATE; + DECLARE bp_user_resources_migrated BOOLEAN DEFAULT FALSE; + DECLARE bp_user_resources_by_date_migrated BOOLEAN DEFAULT FALSE; + DECLARE batch_resources_migrated BOOLEAN DEFAULT FALSE; + DECLARE job_resources_migrated BOOLEAN DEFAULT FALSE; + + SELECT billing_project, user INTO cur_billing_project, cur_user + FROM batches WHERE id = NEW.batch_id; + + SELECT n_tokens INTO cur_n_tokens FROM globals LOCK IN SHARE MODE; + SET rand_token = FLOOR(RAND() * cur_n_tokens); + + SELECT start_time, rollup_time INTO cur_start_time, cur_rollup_time + FROM attempts + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND attempt_id = NEW.attempt_id + LOCK IN SHARE MODE; + + SET msec_diff_rollup = GREATEST(COALESCE(cur_rollup_time - cur_start_time, 0), 0); + + SET cur_billing_date = CAST(UTC_DATE() AS DATE); + + IF msec_diff_rollup != 0 THEN + INSERT INTO aggregated_billing_project_user_resources_v2 (billing_project, user, resource_id, token, `usage`) + VALUES (cur_billing_project, cur_user, NEW.resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + + SELECT migrated INTO bp_user_resources_migrated + FROM aggregated_billing_project_user_resources_v2 + WHERE billing_project = cur_billing_project AND user = cur_user AND resource_id = NEW.resource_id AND token = rand_token + FOR UPDATE; + + IF bp_user_resources_migrated THEN + INSERT INTO aggregated_billing_project_user_resources_v3 (billing_project, user, resource_id, token, `usage`) + VALUES (cur_billing_project, cur_user, NEW.deduped_resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + END IF; + + INSERT INTO aggregated_job_group_resources_v2 (batch_id, resource_id, token, `usage`) + VALUES (NEW.batch_id, NEW.resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + + SELECT migrated INTO batch_resources_migrated + FROM aggregated_job_group_resources_v2 + WHERE batch_id = NEW.batch_id AND resource_id = NEW.resource_id AND token = rand_token + FOR UPDATE; + + IF batch_resources_migrated THEN + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) + VALUES (NEW.batch_id, NEW.deduped_resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + END IF; + + INSERT INTO aggregated_job_resources_v2 (batch_id, job_id, resource_id, `usage`) + VALUES (NEW.batch_id, NEW.job_id, NEW.resource_id, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + + SELECT migrated INTO job_resources_migrated + FROM aggregated_job_resources_v2 + WHERE batch_id = NEW.batch_id AND job_id = NEW.job_id AND resource_id = NEW.resource_id + FOR UPDATE; + + IF job_resources_migrated THEN + INSERT INTO aggregated_job_resources_v3 (batch_id, job_id, resource_id, `usage`) + VALUES (NEW.batch_id, NEW.job_id, NEW.deduped_resource_id, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + END IF; + + INSERT INTO aggregated_billing_project_user_resources_by_date_v2 (billing_date, billing_project, user, resource_id, token, `usage`) + VALUES (cur_billing_date, cur_billing_project, cur_user, NEW.resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + + SELECT migrated INTO bp_user_resources_by_date_migrated + FROM aggregated_billing_project_user_resources_by_date_v2 + WHERE billing_date = cur_billing_date AND billing_project = cur_billing_project AND user = cur_user + AND resource_id = NEW.resource_id AND token = rand_token + FOR UPDATE; + + IF bp_user_resources_by_date_migrated THEN + INSERT INTO aggregated_billing_project_user_resources_by_date_v3 (billing_date, billing_project, user, resource_id, token, `usage`) + VALUES (cur_billing_date, cur_billing_project, cur_user, NEW.deduped_resource_id, rand_token, NEW.quantity * msec_diff_rollup) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.quantity * msec_diff_rollup; + END IF; + END IF; +END $$ + +DROP TRIGGER IF EXISTS aggregated_batch_resources_v2_before_insert $$ +DROP TRIGGER IF EXISTS aggregated_job_group_resources_v2_before_insert $$ +CREATE TRIGGER aggregated_job_group_resources_v2_before_insert BEFORE INSERT on aggregated_job_group_resources_v2 +FOR EACH ROW +BEGIN + SET NEW.migrated = 1; +END $$ + +DROP TRIGGER IF EXISTS aggregated_batch_resources_v2_after_update $$ +DROP TRIGGER IF EXISTS aggregated_job_group_resources_v2_after_update $$ +CREATE TRIGGER aggregated_job_group_resources_v2_after_update AFTER UPDATE ON aggregated_job_group_resources_v2 +FOR EACH ROW +BEGIN + DECLARE new_deduped_resource_id INT; + + IF OLD.migrated = 0 AND NEW.migrated = 1 THEN + SELECT deduped_resource_id INTO new_deduped_resource_id FROM resources WHERE resource_id = OLD.resource_id; + + INSERT INTO aggregated_job_group_resources_v3 (batch_id, resource_id, token, `usage`) + VALUES (NEW.batch_id, new_deduped_resource_id, NEW.token, NEW.usage) + ON DUPLICATE KEY UPDATE + `usage` = `usage` + NEW.usage; + END IF; +END $$ + +DROP PROCEDURE IF EXISTS commit_batch_update $$ +CREATE PROCEDURE commit_batch_update( + IN in_batch_id BIGINT, + IN in_update_id INT, + IN in_timestamp BIGINT +) +BEGIN + DECLARE cur_update_committed BOOLEAN; + DECLARE expected_n_jobs INT; + DECLARE staging_n_jobs INT; + DECLARE cur_update_start_job_id INT; + + START TRANSACTION; + + SELECT committed, n_jobs INTO cur_update_committed, expected_n_jobs + FROM batch_updates + WHERE batch_id = in_batch_id AND update_id = in_update_id + FOR UPDATE; + + IF cur_update_committed THEN + COMMIT; + SELECT 0 as rc; + ELSE + SELECT COALESCE(SUM(n_jobs), 0) INTO staging_n_jobs + FROM job_groups_inst_coll_staging + WHERE batch_id = in_batch_id AND update_id = in_update_id AND job_group_id = 0 + FOR UPDATE; + + # we can only check staged equals expected for the root job group + IF staging_n_jobs = expected_n_jobs THEN + UPDATE batch_updates + SET committed = 1, time_committed = in_timestamp + WHERE batch_id = in_batch_id AND update_id = in_update_id; + + UPDATE batches SET + `state` = 'running', + time_completed = NULL, + n_jobs = n_jobs + expected_n_jobs + WHERE id = in_batch_id; + + UPDATE job_groups + INNER JOIN ( + SELECT batch_id, job_group_id, CAST(COALESCE(SUM(n_jobs), 0) AS SIGNED) AS staged_n_jobs + FROM job_groups_inst_coll_staging + WHERE batch_id = in_batch_id AND update_id = in_update_id + GROUP BY batch_id, job_group_id + ) AS t ON job_groups.batch_id = t.batch_id AND job_groups.job_group_id = t.job_group_id + SET `state` = 'running', time_completed = NULL, n_jobs = n_jobs + t.staged_n_jobs; + + # compute global number of new ready jobs from root job group + INSERT INTO user_inst_coll_resources (user, inst_coll, token, n_ready_jobs, ready_cores_mcpu) + SELECT user, inst_coll, 0, @n_ready_jobs := COALESCE(SUM(n_ready_jobs), 0), @ready_cores_mcpu := COALESCE(SUM(ready_cores_mcpu), 0) + FROM job_groups_inst_coll_staging + JOIN batches ON batches.id = job_groups_inst_coll_staging.batch_id + WHERE batch_id = in_batch_id AND update_id = in_update_id AND job_group_id = 0 + GROUP BY `user`, inst_coll + ON DUPLICATE KEY UPDATE + n_ready_jobs = n_ready_jobs + @n_ready_jobs, + ready_cores_mcpu = ready_cores_mcpu + @ready_cores_mcpu; + + DELETE FROM job_groups_inst_coll_staging WHERE batch_id = in_batch_id AND update_id = in_update_id; + + IF in_update_id != 1 THEN + SELECT start_job_id INTO cur_update_start_job_id FROM batch_updates WHERE batch_id = in_batch_id AND update_id = in_update_id; + + UPDATE jobs + LEFT JOIN `jobs_telemetry` ON `jobs_telemetry`.batch_id = jobs.batch_id AND `jobs_telemetry`.job_id = jobs.job_id + LEFT JOIN ( + SELECT `job_parents`.batch_id, `job_parents`.job_id, + COALESCE(SUM(1), 0) AS n_parents, + COALESCE(SUM(state IN ('Pending', 'Ready', 'Creating', 'Running')), 0) AS n_pending_parents, + COALESCE(SUM(state = 'Success'), 0) AS n_succeeded + FROM `job_parents` + LEFT JOIN `jobs` ON jobs.batch_id = `job_parents`.batch_id AND jobs.job_id = `job_parents`.parent_id + WHERE job_parents.batch_id = in_batch_id AND + `job_parents`.job_id >= cur_update_start_job_id AND + `job_parents`.job_id < cur_update_start_job_id + staging_n_jobs + GROUP BY `job_parents`.batch_id, `job_parents`.job_id + FOR UPDATE + ) AS t + ON jobs.batch_id = t.batch_id AND + jobs.job_id = t.job_id + SET jobs.state = IF(COALESCE(t.n_pending_parents, 0) = 0, 'Ready', 'Pending'), + jobs.n_pending_parents = COALESCE(t.n_pending_parents, 0), + jobs.cancelled = IF(COALESCE(t.n_succeeded, 0) = COALESCE(t.n_parents - t.n_pending_parents, 0), jobs.cancelled, 1), + jobs_telemetry.time_ready = IF(COALESCE(t.n_pending_parents, 0) = 0 AND jobs_telemetry.time_ready IS NULL, in_timestamp, jobs_telemetry.time_ready) + WHERE jobs.batch_id = in_batch_id AND jobs.job_id >= cur_update_start_job_id AND + jobs.job_id < cur_update_start_job_id + staging_n_jobs; + END IF; + + COMMIT; + SELECT 0 as rc; + ELSE + ROLLBACK; + SELECT 1 as rc, expected_n_jobs, staging_n_jobs as actual_n_jobs, 'wrong number of jobs' as message; + END IF; + END IF; +END $$ + +DROP PROCEDURE IF EXISTS cancel_batch $$ +CREATE PROCEDURE cancel_batch( + IN in_batch_id VARCHAR(100) +) +BEGIN + DECLARE cur_user VARCHAR(100); + DECLARE cur_batch_state VARCHAR(40); + DECLARE cur_cancelled BOOLEAN; + DECLARE cur_n_cancelled_ready_jobs INT; + DECLARE cur_cancelled_ready_cores_mcpu BIGINT; + DECLARE cur_n_cancelled_running_jobs INT; + DECLARE cur_cancelled_running_cores_mcpu BIGINT; + DECLARE cur_n_n_cancelled_creating_jobs INT; + + START TRANSACTION; + + SELECT user, `state` INTO cur_user, cur_batch_state FROM batches + WHERE id = in_batch_id + FOR UPDATE; + + SET cur_cancelled = EXISTS (SELECT TRUE + FROM job_groups_cancelled + WHERE id = in_batch_id + FOR UPDATE); + + IF cur_batch_state = 'running' AND NOT cur_cancelled THEN + INSERT INTO user_inst_coll_resources (user, inst_coll, token, + n_ready_jobs, ready_cores_mcpu, + n_running_jobs, running_cores_mcpu, + n_creating_jobs, + n_cancelled_ready_jobs, n_cancelled_running_jobs, n_cancelled_creating_jobs) + SELECT user, inst_coll, 0, + -1 * (@n_ready_cancellable_jobs := COALESCE(SUM(n_ready_cancellable_jobs), 0)), + -1 * (@ready_cancellable_cores_mcpu := COALESCE(SUM(ready_cancellable_cores_mcpu), 0)), + -1 * (@n_running_cancellable_jobs := COALESCE(SUM(n_running_cancellable_jobs), 0)), + -1 * (@running_cancellable_cores_mcpu := COALESCE(SUM(running_cancellable_cores_mcpu), 0)), + -1 * (@n_creating_cancellable_jobs := COALESCE(SUM(n_creating_cancellable_jobs), 0)), + COALESCE(SUM(n_ready_cancellable_jobs), 0), + COALESCE(SUM(n_running_cancellable_jobs), 0), + COALESCE(SUM(n_creating_cancellable_jobs), 0) + FROM job_group_inst_coll_cancellable_resources + JOIN batches ON batches.id = job_group_inst_coll_cancellable_resources.batch_id + INNER JOIN batch_updates ON job_group_inst_coll_cancellable_resources.batch_id = batch_updates.batch_id AND + job_group_inst_coll_cancellable_resources.update_id = batch_updates.update_id + WHERE job_group_inst_coll_cancellable_resources.batch_id = in_batch_id AND batch_updates.committed + GROUP BY user, inst_coll + ON DUPLICATE KEY UPDATE + n_ready_jobs = n_ready_jobs - @n_ready_cancellable_jobs, + ready_cores_mcpu = ready_cores_mcpu - @ready_cancellable_cores_mcpu, + n_running_jobs = n_running_jobs - @n_running_cancellable_jobs, + running_cores_mcpu = running_cores_mcpu - @running_cancellable_cores_mcpu, + n_creating_jobs = n_creating_jobs - @n_creating_cancellable_jobs, + n_cancelled_ready_jobs = n_cancelled_ready_jobs + @n_ready_cancellable_jobs, + n_cancelled_running_jobs = n_cancelled_running_jobs + @n_running_cancellable_jobs, + n_cancelled_creating_jobs = n_cancelled_creating_jobs + @n_creating_cancellable_jobs; + + # there are no cancellable jobs left, they have been cancelled + DELETE FROM job_group_inst_coll_cancellable_resources WHERE batch_id = in_batch_id; + + # cancel root job group only + INSERT INTO job_groups_cancelled (id, job_group_id) VALUES (in_batch_id, 0); + END IF; + + COMMIT; +END $$ + +DROP PROCEDURE IF EXISTS schedule_job $$ +CREATE PROCEDURE schedule_job( + IN in_batch_id BIGINT, + IN in_job_id INT, + IN in_attempt_id VARCHAR(40), + IN in_instance_name VARCHAR(100) +) +BEGIN + DECLARE cur_job_state VARCHAR(40); + DECLARE cur_cores_mcpu INT; + DECLARE cur_job_cancel BOOLEAN; + DECLARE cur_instance_state VARCHAR(40); + DECLARE cur_attempt_id VARCHAR(40); + DECLARE delta_cores_mcpu INT; + DECLARE cur_instance_is_pool BOOLEAN; + + START TRANSACTION; + + SELECT state, cores_mcpu, attempt_id + INTO cur_job_state, cur_cores_mcpu, cur_attempt_id + FROM jobs + WHERE batch_id = in_batch_id AND job_id = in_job_id + FOR UPDATE; + + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run + INTO cur_job_cancel + FROM jobs + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id + WHERE batch_id = in_batch_id AND job_id = in_job_id + LOCK IN SHARE MODE; + + SELECT is_pool + INTO cur_instance_is_pool + FROM instances + LEFT JOIN inst_colls ON instances.inst_coll = inst_colls.name + WHERE instances.name = in_instance_name; + + CALL add_attempt(in_batch_id, in_job_id, in_attempt_id, in_instance_name, cur_cores_mcpu, delta_cores_mcpu); + + IF cur_instance_is_pool THEN + IF delta_cores_mcpu = 0 THEN + SET delta_cores_mcpu = cur_cores_mcpu; + ELSE + SET delta_cores_mcpu = 0; + END IF; + END IF; + + SELECT state INTO cur_instance_state FROM instances WHERE name = in_instance_name LOCK IN SHARE MODE; + + IF (cur_job_state = 'Ready' OR cur_job_state = 'Creating') AND NOT cur_job_cancel AND cur_instance_state = 'active' THEN + UPDATE jobs SET state = 'Running', attempt_id = in_attempt_id WHERE batch_id = in_batch_id AND job_id = in_job_id; + COMMIT; + SELECT 0 as rc, in_instance_name, delta_cores_mcpu; + ELSE + COMMIT; + SELECT 1 as rc, + cur_job_state, + cur_job_cancel, + cur_instance_state, + in_instance_name, + cur_attempt_id, + delta_cores_mcpu, + 'job not Ready or cancelled or instance not active, but attempt already exists' as message; + END IF; +END $$ + +DROP PROCEDURE IF EXISTS mark_job_creating $$ +CREATE PROCEDURE mark_job_creating( + IN in_batch_id BIGINT, + IN in_job_id INT, + IN in_attempt_id VARCHAR(40), + IN in_instance_name VARCHAR(100), + IN new_start_time BIGINT +) +BEGIN + DECLARE cur_job_state VARCHAR(40); + DECLARE cur_job_cancel BOOLEAN; + DECLARE cur_cores_mcpu INT; + DECLARE cur_instance_state VARCHAR(40); + DECLARE delta_cores_mcpu INT; + + START TRANSACTION; + + SELECT state, cores_mcpu + INTO cur_job_state, cur_cores_mcpu + FROM jobs + WHERE batch_id = in_batch_id AND job_id = in_job_id + FOR UPDATE; + + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run + INTO cur_job_cancel + FROM jobs + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id + WHERE batch_id = in_batch_id AND job_id = in_job_id + LOCK IN SHARE MODE; + + CALL add_attempt(in_batch_id, in_job_id, in_attempt_id, in_instance_name, cur_cores_mcpu, delta_cores_mcpu); + + UPDATE attempts SET start_time = new_start_time, rollup_time = new_start_time + WHERE batch_id = in_batch_id AND job_id = in_job_id AND attempt_id = in_attempt_id; + + SELECT state INTO cur_instance_state FROM instances WHERE name = in_instance_name LOCK IN SHARE MODE; + + IF cur_job_state = 'Ready' AND NOT cur_job_cancel AND cur_instance_state = 'pending' THEN + UPDATE jobs SET state = 'Creating', attempt_id = in_attempt_id WHERE batch_id = in_batch_id AND job_id = in_job_id; + END IF; + + COMMIT; + SELECT 0 as rc, delta_cores_mcpu; +END $$ + +DROP PROCEDURE IF EXISTS mark_job_started $$ +CREATE PROCEDURE mark_job_started( + IN in_batch_id BIGINT, + IN in_job_id INT, + IN in_attempt_id VARCHAR(40), + IN in_instance_name VARCHAR(100), + IN new_start_time BIGINT +) +BEGIN + DECLARE cur_job_state VARCHAR(40); + DECLARE cur_job_cancel BOOLEAN; + DECLARE cur_cores_mcpu INT; + DECLARE cur_instance_state VARCHAR(40); + DECLARE delta_cores_mcpu INT; + + START TRANSACTION; + + SELECT state, cores_mcpu + INTO cur_job_state, cur_cores_mcpu + FROM jobs + WHERE batch_id = in_batch_id AND job_id = in_job_id + FOR UPDATE; + + SELECT (jobs.cancelled OR job_groups_cancelled.id IS NOT NULL) AND NOT jobs.always_run + INTO cur_job_cancel + FROM jobs + LEFT JOIN job_groups_cancelled ON job_groups_cancelled.id = jobs.batch_id + WHERE batch_id = in_batch_id AND job_id = in_job_id + LOCK IN SHARE MODE; + + CALL add_attempt(in_batch_id, in_job_id, in_attempt_id, in_instance_name, cur_cores_mcpu, delta_cores_mcpu); + + UPDATE attempts SET start_time = new_start_time, rollup_time = new_start_time + WHERE batch_id = in_batch_id AND job_id = in_job_id AND attempt_id = in_attempt_id; + + SELECT state INTO cur_instance_state FROM instances WHERE name = in_instance_name LOCK IN SHARE MODE; + + IF cur_job_state = 'Ready' AND NOT cur_job_cancel AND cur_instance_state = 'active' THEN + UPDATE jobs SET state = 'Running', attempt_id = in_attempt_id WHERE batch_id = in_batch_id AND job_id = in_job_id; + END IF; + + COMMIT; + SELECT 0 as rc, delta_cores_mcpu; +END $$ + +# https://dev.mysql.com/doc/refman/8.0/en/cursors.html +# https://stackoverflow.com/questions/5817395/how-can-i-loop-through-all-rows-of-a-table-mysql/16350693#16350693 +DROP PROCEDURE IF EXISTS mark_job_group_complete $$ +CREATE PROCEDURE mark_job_group_complete( + IN in_batch_id BIGINT, + IN in_job_group_id INT, + IN new_timestamp BIGINT +) +BEGIN + DECLARE cursor_job_group_id INT; + DECLARE done BOOLEAN DEFAULT FALSE; + DECLARE total_jobs_in_job_group INT; + DECLARE cur_n_completed INT; + + DECLARE job_group_cursor CURSOR FOR + SELECT ancestor_id + FROM job_group_self_and_ancestors + WHERE batch_id = in_batch_id AND job_group_id = in_job_group_id + ORDER BY job_group_id ASC; + + DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE; + + OPEN job_group_cursor; + update_job_group_loop: LOOP + FETCH job_group_cursor INTO cursor_job_group_id; + + IF done THEN + LEAVE update_job_group_loop; + END IF; + + SELECT n_jobs INTO total_jobs_in_job_group + FROM job_groups + WHERE batch_id = in_batch_id AND job_group_id = cursor_job_group_id + LOCK IN SHARE MODE; + + SELECT n_completed INTO cur_n_completed + FROM job_groups_n_jobs_in_complete_states + WHERE id = in_batch_id AND job_group_id = cursor_job_group_id + LOCK IN SHARE MODE; + + # Grabbing an exclusive lock on job groups here could deadlock, + # but this IF should only execute for the last job + IF cur_n_completed = total_jobs_in_job_group THEN + UPDATE job_groups + SET time_completed = new_timestamp, + `state` = 'complete' + WHERE batch_id = in_batch_id AND job_group_id = cursor_job_group_id; + END IF; + END LOOP; + CLOSE job_group_cursor; +END $$ + +DROP PROCEDURE IF EXISTS mark_job_complete $$ +CREATE PROCEDURE mark_job_complete( + IN in_batch_id BIGINT, + IN in_job_id INT, + IN in_attempt_id VARCHAR(40), + IN in_instance_name VARCHAR(100), + IN new_state VARCHAR(40), + IN new_status TEXT, + IN new_start_time BIGINT, + IN new_end_time BIGINT, + IN new_reason VARCHAR(40), + IN new_timestamp BIGINT +) +BEGIN + DECLARE cur_job_group_id INT; + DECLARE cur_job_state VARCHAR(40); + DECLARE cur_instance_state VARCHAR(40); + DECLARE cur_cores_mcpu INT; + DECLARE cur_end_time BIGINT; + DECLARE delta_cores_mcpu INT DEFAULT 0; + DECLARE total_jobs_in_batch INT; + DECLARE expected_attempt_id VARCHAR(40); + + START TRANSACTION; + + SELECT n_jobs INTO total_jobs_in_batch FROM batches WHERE id = in_batch_id; + + SELECT state, cores_mcpu, job_group_id + INTO cur_job_state, cur_cores_mcpu, cur_job_group_id + FROM jobs + WHERE batch_id = in_batch_id AND job_id = in_job_id + FOR UPDATE; + + CALL add_attempt(in_batch_id, in_job_id, in_attempt_id, in_instance_name, cur_cores_mcpu, delta_cores_mcpu); + + SELECT end_time INTO cur_end_time FROM attempts + WHERE batch_id = in_batch_id AND job_id = in_job_id AND attempt_id = in_attempt_id + FOR UPDATE; + + UPDATE attempts + SET start_time = new_start_time, rollup_time = new_end_time, end_time = new_end_time, reason = new_reason + WHERE batch_id = in_batch_id AND job_id = in_job_id AND attempt_id = in_attempt_id; + + SELECT state INTO cur_instance_state FROM instances WHERE name = in_instance_name LOCK IN SHARE MODE; + IF cur_instance_state = 'active' AND cur_end_time IS NULL THEN + UPDATE instances_free_cores_mcpu + SET free_cores_mcpu = free_cores_mcpu + cur_cores_mcpu + WHERE instances_free_cores_mcpu.name = in_instance_name; + + SET delta_cores_mcpu = delta_cores_mcpu + cur_cores_mcpu; + END IF; + + SELECT attempt_id INTO expected_attempt_id FROM jobs + WHERE batch_id = in_batch_id AND job_id = in_job_id + FOR UPDATE; + + IF expected_attempt_id IS NOT NULL AND expected_attempt_id != in_attempt_id THEN + COMMIT; + SELECT 2 as rc, + expected_attempt_id, + delta_cores_mcpu, + 'input attempt id does not match expected attempt id' as message; + ELSEIF cur_job_state = 'Ready' OR cur_job_state = 'Creating' OR cur_job_state = 'Running' THEN + UPDATE jobs + SET state = new_state, status = new_status, attempt_id = in_attempt_id + WHERE batch_id = in_batch_id AND job_id = in_job_id; + + # update only the record for the root job group + # backwards compatibility for job groups that do not exist + UPDATE job_groups_n_jobs_in_complete_states + SET n_completed = (@new_n_completed := n_completed + 1), + n_cancelled = n_cancelled + (new_state = 'Cancelled'), + n_failed = n_failed + (new_state = 'Error' OR new_state = 'Failed'), + n_succeeded = n_succeeded + (new_state != 'Cancelled' AND new_state != 'Error' AND new_state != 'Failed') + WHERE id = in_batch_id AND job_group_id = 0; + + # Grabbing an exclusive lock on batches here could deadlock, + # but this IF should only execute for the last job + IF @new_n_completed = total_jobs_in_batch THEN + UPDATE batches + SET time_completed = new_timestamp, + `state` = 'complete' + WHERE id = in_batch_id; + END IF; + + # update the rest of the non-root job groups if they exist + # necessary for backwards compatibility + UPDATE job_groups_n_jobs_in_complete_states + INNER JOIN ( + SELECT batch_id, ancestor_id + FROM job_group_self_and_ancestors + WHERE batch_id = in_batch_id AND job_group_id = cur_job_group_id AND job_group_id != 0 + ORDER BY job_group_id ASC + ) AS t ON job_groups_n_jobs_in_complete_states.id = t.batch_id AND job_groups_n_jobs_in_complete_states.job_group_id = t.ancestor_id + SET n_completed = n_completed + 1, + n_cancelled = n_cancelled + (new_state = 'Cancelled'), + n_failed = n_failed + (new_state = 'Error' OR new_state = 'Failed'), + n_succeeded = n_succeeded + (new_state != 'Cancelled' AND new_state != 'Error' AND new_state != 'Failed'); + + CALL mark_job_group_complete(in_batch_id, cur_job_group_id, new_timestamp); + + UPDATE jobs + LEFT JOIN `jobs_telemetry` ON `jobs_telemetry`.batch_id = jobs.batch_id AND `jobs_telemetry`.job_id = jobs.job_id + INNER JOIN `job_parents` + ON jobs.batch_id = `job_parents`.batch_id AND + jobs.job_id = `job_parents`.job_id + SET jobs.state = IF(jobs.n_pending_parents = 1, 'Ready', 'Pending'), + jobs.n_pending_parents = jobs.n_pending_parents - 1, + jobs.cancelled = IF(new_state = 'Success', jobs.cancelled, 1), + jobs_telemetry.time_ready = IF(jobs.n_pending_parents = 1, new_timestamp, jobs_telemetry.time_ready) + WHERE jobs.batch_id = in_batch_id AND + `job_parents`.batch_id = in_batch_id AND + `job_parents`.parent_id = in_job_id; + + COMMIT; + SELECT 0 as rc, + cur_job_state as old_state, + delta_cores_mcpu; + ELSEIF cur_job_state = 'Cancelled' OR cur_job_state = 'Error' OR + cur_job_state = 'Failed' OR cur_job_state = 'Success' THEN + COMMIT; + SELECT 0 as rc, + cur_job_state as old_state, + delta_cores_mcpu; + ELSE + COMMIT; + SELECT 1 as rc, + cur_job_state, + delta_cores_mcpu, + 'job state not Ready, Creating, Running or complete' as message; + END IF; +END $$ + +DELIMITER ; diff --git a/build.yaml b/build.yaml index ae51f3cee05..f0d9ba4bf6d 100644 --- a/build.yaml +++ b/build.yaml @@ -2347,6 +2347,9 @@ steps: - name: populate-job-groups script: /io/sql/populate_job_groups.py online: true + - name: rename-job-groups-tables + script: /io/sql/rename-job-groups-tables.sql + online: false # this must be offline inputs: - from: /repo/batch/sql to: /io/sql From 1f11d2dfe453b23f7bfb21b40f4b9bb3e0db116f Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 30 Nov 2023 17:13:46 -0500 Subject: [PATCH 22/48] [query] fix #13998 (#14057) CHANGELOG: Fix #13998 which appeared in 0.2.58 and prevented reading from a networked filesystem mounted within the filesystem of the worker node for certain pipelines (those that did not trigger "lowering"). We use the IndexReader in `PartitionNativeIntervalReader`, `PartitionNativeReaderIndexed`, and `PartitionZippedIndexedNativeReader`. 1. `PartitionNativeIntervalReader` is only used by `query_table`. 2. `PartitionNativeReaderIndexed` is only used by `IndexedRVDSpec2.readTableStage` which is used by `TableNativeReader` when there is a new partitioner. 3. `PartitionZippedIndexedNativeReader` is only sued by `AbstractRVDSpec.readZippedLowered` when there is a new partitioner. Two is for tables, three is for matrix tables. In `readZippedLowered` we explicitly [drop the file protocol](https://github.com/hail-is/hail/blob/1dedf3c63f9aabf1b6ce538165360056f82f76e4/hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala#L154-L155): ``` val absPathLeft = removeFileProtocol(pathLeft) val absPathRight = removeFileProtocol(pathRight) ``` We have done this, by various names, since this lowered code path was added. I added `removeFileProtocol` because stripping the protocol in Query-on-Batch prevented the reading and writing of gs:// URIs, the only URIs I could read in QoB. `uriPath` (the function whose use I replaced with `removeFileProtocol`) was added by Cotton [a very long time ago](https://github.com/hail-is/hail/commit/92a9936e11d2f56b88390bee5dc4de489e188f02). It seems he added it so that he could use HDFS to generate a temporary file path on the local filesystem but pass the file path to binary tools that know nothing of HDFS and file:// URIs. #9522 added the lowered code path and thus introduced this bug. It attempted to mirror the extant code in [`readIndexedPartitions`](https://github.com/hail-is/hail/blob/2b0aded9206849252b453dd80710cea8d2156793/hail/src/main/scala/is/hail/HailContext.scala#L421-L440) which *does not* strip any protocols from the path. This has gone undetected because we never try to read data through the OS's filesystem. We always use gs://, Azure, or s3:// because we do not test in environments that have a networked file system mounted in the OS's filesystem. To replicate this bug (and add a test for it), we would need a cluster with a lustre file system (or another networked filesystem). This would be a fairly large lift. The fix is trivial: just never intentionally strip the protocol! --- hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala | 6 +++--- hail/src/main/scala/is/hail/utils/package.scala | 9 --------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala b/hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala index b95670552c3..384784a8785 100644 --- a/hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala +++ b/hail/src/main/scala/is/hail/rvd/AbstractRVDSpec.scala @@ -151,8 +151,8 @@ object AbstractRVDSpec { indexSpecLeft, indexSpecRight, specLeft.key, uidFieldName) - val absPathLeft = removeFileProtocol(pathLeft) - val absPathRight = removeFileProtocol(pathRight) + val absPathLeft = pathLeft + val absPathRight = pathRight val partsAndIntervals: IndexedSeq[(String, Interval)] = if (specLeft.key.isEmpty) { specLeft.partFiles.map { p => (p, null) } } else { @@ -438,7 +438,7 @@ case class IndexedRVDSpec2( val rSpec = typedCodecSpec val reader = ir.PartitionNativeReaderIndexed(rSpec, indexSpec, part.kType.fieldNames, uidFieldName) - val absPath = removeFileProtocol(path) + val absPath = path val partPaths = tmpPartitioner.rangeBounds.map { b => partFiles(part.lowerBoundInterval(b)) } diff --git a/hail/src/main/scala/is/hail/utils/package.scala b/hail/src/main/scala/is/hail/utils/package.scala index b1d4c61518a..ade36e71226 100644 --- a/hail/src/main/scala/is/hail/utils/package.scala +++ b/hail/src/main/scala/is/hail/utils/package.scala @@ -351,15 +351,6 @@ package object utils extends Logging def uriPath(uri: String): String = new URI(uri).getPath - def removeFileProtocol(uriString: String): String = { - val uri = new URI(uriString) - if (uri.getScheme == "file") { - uri.getPath - } else { - uri.toString - } - } - // NB: can't use Nothing here because it is not a super type of Null private object flattenOrNullInstance extends FlattenOrNull[Array] From 7ec0430932b4457422062ebf97672967aa091787 Mon Sep 17 00:00:00 2001 From: Patrick Schultz Date: Thu, 30 Nov 2023 18:03:13 -0500 Subject: [PATCH 23/48] [query] increase tolerance in approx_cdf tests (#14058) We observed some sporadic failures, but the error was always less than .011. This raises the tolerance to .015, which should be a comfortable margin. --- hail/python/test/hail/expr/test_expr.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hail/python/test/hail/expr/test_expr.py b/hail/python/test/hail/expr/test_expr.py index 18b9185c310..f17549fbe63 100644 --- a/hail/python/test/hail/expr/test_expr.py +++ b/hail/python/test/hail/expr/test_expr.py @@ -3932,7 +3932,7 @@ def test_approx_cdf_accuracy(cdf_test_data): t = cdf_test_data cdf = t.aggregate(hl.agg.approx_cdf(t.idx, 200)) error = cdf_max_observed_error(cdf) - assert(error < 0.01) + assert(error < 0.015) def test_approx_cdf_all_missing(): @@ -3972,7 +3972,7 @@ def test_cdf_combine(cdf_test_data): cdf = _cdf_combine(200, cdf1, cdf2) cdf = hl.eval(_result_from_raw_cdf(cdf)) error = cdf_max_observed_error(cdf) - assert(error < 0.01) + assert(error < 0.015) def test_approx_cdf_array_agg(): From 5b718e1d3e1d72fad15cc57801601a6e35df41dd Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 30 Nov 2023 18:37:08 -0500 Subject: [PATCH 24/48] [query] restore Spark logs to the Hail log file (#14055) Spark depends on a very old verison of SLF4J. We cannot upgrade. We added this dependency ages ago to fix some undocumented issue with logging and SLF4J. It seems reasonable to me that we should just accept whatever version of SLF4J that Spark provides. This removes this message: ``` SLF4J: No SLF4J providers were found. SLF4J: Defaulting to no-operation (NOP) logger implementation SLF4J: See https://www.slf4j.org/codes.html#noProviders for further details. SLF4J: Class path contains SLF4J bindings targeting slf4j-api versions 1.7.x or earlier. SLF4J: Ignoring binding found at [jar:file:/usr/lib/spark/jars/log4j-slf4j-impl-2.17.2.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: See https://www.slf4j.org/codes.html#ignoredBindings for an explanation. ``` Which, IMO, really should be a stop-the-world error. --- hail/build.gradle | 2 -- 1 file changed, 2 deletions(-) diff --git a/hail/build.gradle b/hail/build.gradle index 1312dd1ed6e..d3111dc7eaa 100644 --- a/hail/build.gradle +++ b/hail/build.gradle @@ -147,8 +147,6 @@ dependencies { transitive = false } - implementation group: 'org.slf4j', name: 'slf4j-api', version: '2.0.7' - def elasticMajorVersion = System.getProperty("elasticsearch.major-version", "7") if (elasticMajorVersion != "7" && elasticMajorVersion != "8") { throw new UnsupportedOperationException("elasticsearch.major-version must be 7 or 8") From 4c0dc9a1765db15227feda666e204abb24ed4873 Mon Sep 17 00:00:00 2001 From: iris <84595986+iris-garden@users.noreply.github.com> Date: Fri, 1 Dec 2023 16:08:45 -0500 Subject: [PATCH 25/48] [tooling] adds non-hailtop subdirs to pylint ignore (#14060) We only have `make` commands for running `pylint` on subdirectories that have been kept up to date with its rules, but the `pylintrc` doesn't actually contain any indication of which subdirectories should be ignored when running `pylint`. This makes the use of language servers that run `pylint` on the file that's open frustrating, as files in the ignored subdirectories will often be full of `pylint` suggestions. This change adds the relevant subdirectories to the `pylintrc` file. Note that this does not necessarily enable us to run `pylint` directly on those subdirectories with the equivalent `make` commands to the ones that already exist, because there is no way that I've found to make `pylint` ignore the `__init__.py` file of whatever module it's being run on, so running it on `hail/python/hail`, for example, produces many errors. --- pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylintrc b/pylintrc index e93bb0ac0b0..3b113665a96 100644 --- a/pylintrc +++ b/pylintrc @@ -2,7 +2,7 @@ # I also tried extension-pkg-allow-list, but it had no effect. https://stackoverflow.com/a/35259944/6823256 generated-members=orjson -ignore=sql +ignore-paths=^.*.sql$,^.*hail/python/cluster-tests/.*$,^.*hail/python/dev/.*$,^.*hail/python/hail/.*$,^.*hail/python/hail.egg-info/.*$,^.*hail/python/test/.*$ [MESSAGES CONTROL] # C0111 Missing docstring From c49d8fe6945a76d43fa813c51b865419b2075715 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Fri, 1 Dec 2023 18:17:08 -0500 Subject: [PATCH 26/48] [letsencrypt] Update make targets to be compatible with new docker-build.sh (#14064) This wasn't updated when I separated out building and pushing so the `build` target wasn't pushing the image. When I fixed that the cert generation failed because `memory` is no longer available. These changes got everything working again. --- Makefile | 6 +++++- letsencrypt/Makefile | 8 +++----- letsencrypt/subdomains.txt | 1 - 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index d810b90aa6f..cce0ecc5922 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ SERVICES_IMAGES := $(patsubst %, %-image, $(SERVICES_PLUS_ADMIN_POD)) SERVICES_DATABASES := $(patsubst %, %-db, $(SERVICES)) SERVICES_MODULES := $(SERVICES) gear web_common CHECK_SERVICES_MODULES := $(patsubst %, check-%, $(SERVICES_MODULES)) -SPECIAL_IMAGES := hail-ubuntu batch-worker +SPECIAL_IMAGES := hail-ubuntu batch-worker letsencrypt HAILGENETICS_IMAGES = $(foreach img,hail vep-grch37-85 vep-grch38-95,hailgenetics-$(img)) CI_IMAGES = ci-utils ci-buildkit base hail-run @@ -217,6 +217,10 @@ hailgenetics-vep-grch38-95-image: hail-ubuntu-image --build-arg BASE_IMAGE=$(shell cat hail-ubuntu-image) echo $(IMAGE_NAME) > $@ +letsencrypt-image: + ./docker-build.sh letsencrypt Dockerfile $(IMAGE_NAME) + echo $(IMAGE_NAME) > $@ + $(PRIVATE_REGISTRY_IMAGES): pushed-private-%-image: %-image ! [ -z $(NAMESPACE) ] # call this like: make ... NAMESPACE=default [ $(DOCKER_PREFIX) != docker.io ] # DOCKER_PREFIX should be an internal private registry diff --git a/letsencrypt/Makefile b/letsencrypt/Makefile index 4ff20839bd3..4591bf151ed 100644 --- a/letsencrypt/Makefile +++ b/letsencrypt/Makefile @@ -1,23 +1,21 @@ include ../config.mk -LETSENCRYPT_IMAGE := $(DOCKER_PREFIX)/letsencrypt:$(TOKEN) - .PHONY: build start-service run clean build: - ../docker-build.sh . Dockerfile $(LETSENCRYPT_IMAGE) + $(MAKE) -C .. pushed-private-letsencrypt-image DRY_RUN ?= false run: build echo $(DOMAIN) > domains.txt.out echo internal.$(DOMAIN) >> domains.txt.out sed 's/$$/.$(DOMAIN)/g' subdomains.txt >> domains.txt.out - python3 ../ci/jinja2_render.py '{"letsencrypt_image":{"image":"$(LETSENCRYPT_IMAGE)"},"domain":"$(DOMAIN)","domains":"'$$(paste -s -d, domains.txt.out)'","dry_run":$(DRY_RUN)}' letsencrypt-pod.yaml letsencrypt-pod.yaml.out + python3 ../ci/jinja2_render.py '{"letsencrypt_image":{"image":"$(shell cat ../pushed-private-letsencrypt-image)"},"domain":"$(DOMAIN)","domains":"'$$(paste -s -d, domains.txt.out)'","dry_run":$(DRY_RUN)}' letsencrypt-pod.yaml letsencrypt-pod.yaml.out /bin/bash run-letsencrypt.sh letsencrypt-pod.yaml.out revoke: build ! [ -z "$(CERT_IDS_TO_REVOKE)" ] # call this like: make deploy CERT_IDS_TO_REVOKE='abc123 def567' - python3 ../ci/jinja2_render.py '{"letsencrypt_image":{"image":"$(LETSENCRYPT_IMAGE)"},"cert_ids_to_revoke":"$(CERT_IDS_TO_REVOKE)"}' revoke-certs-pod.yaml revoke-certs-pod.yaml.out + python3 ../ci/jinja2_render.py '{"letsencrypt_image":{"image":"$(shell cat ../pushed-private-letsencrypt-image)"},"cert_ids_to_revoke":"$(CERT_IDS_TO_REVOKE)"}' revoke-certs-pod.yaml revoke-certs-pod.yaml.out /bin/bash run-letsencrypt.sh revoke-certs-pod.yaml.out .PHONY: clean diff --git a/letsencrypt/subdomains.txt b/letsencrypt/subdomains.txt index 5e923b04cb8..6423c7e76d1 100644 --- a/letsencrypt/subdomains.txt +++ b/letsencrypt/subdomains.txt @@ -3,7 +3,6 @@ www batch batch-driver blog -memory monitoring auth ukbb-rg From 6f5c4fd33606ac1a8e0948dc88b9f93da88a15fe Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Mon, 4 Dec 2023 12:12:49 -0500 Subject: [PATCH 27/48] [batch] Batch workers listen on the internal IP address (#14063) No need to accept anything addressed to a public IP --- batch/batch/worker/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/batch/batch/worker/worker.py b/batch/batch/worker/worker.py index 1a7a865b5bc..f5870b22caa 100644 --- a/batch/batch/worker/worker.py +++ b/batch/batch/worker/worker.py @@ -3222,7 +3222,7 @@ async def run(self): app_runner = web.AppRunner(app, access_log_class=BatchWorkerAccessLogger) await app_runner.setup() - site = web.TCPSite(app_runner, '0.0.0.0', 5000) + site = web.TCPSite(app_runner, IP_ADDRESS, 5000) await site.start() try: From 42930ec6e33fdb87840fd137e557cad2c02b61c0 Mon Sep 17 00:00:00 2001 From: Dan King Date: Mon, 4 Dec 2023 12:49:46 -0500 Subject: [PATCH 28/48] [query] prevent sudden unceremonious death of driver JVM (#14066) CHANGELOG: Since 0.2.110, `hailctl dataproc` set the heap size of the driver JVM dangerously high. It is now set to an appropriate level. This issue manifests in a variety of inscrutable ways including RemoteDisconnectedError and socket closed. See issue #13960 for details. In Dataproc versions 1.5.74, 2.0.48, and 2.1.0, Dataproc introduced ["memory protection"](https://cloud.google.com/dataproc/docs/support/troubleshoot-oom-errors#memory_protection) which is a euphemism for a newly aggressive OOMKiller. When the OOMKiller kills the JVM driver process, there is no hs_err_pid...log file, no exceptional log statements, and no clean shutdown of any sockets. The process is simply SIGTERM'ed and then SIGKILL'ed. From Hail 0.2.83 through Hail 0.2.109 (released February 2023), Hail was pinned to Dataproc 2.0.44. From Hail 0.2.15 onwards, `hailctl dataproc`, by default, reserves 80% of the advertised memory of the driver node for the use of the Hail Query Driver JVM process. For example, Google advertises that an n1-highmem-8 has 52 GiB of RAM, so Hail sets the `spark:spark.driver.memory` property to 41g (we always round down). Before aggressive memory protection, this setting was sufficient to protect the driver from starving itself of memory. Unfortunately, Hail 0.2.110 upgraded to Dataproc 2.1.2 which enabled "memory protection". Moreover, in the years since Hail 0.2.15, the memory in use by system processes on Dataproc driver nodes appears to have increased. Due to these two circumstances, the driver VM's memory usage can grow high enough to trigger the OOMKiller before the JVM triggers a GC. Consider, for example, these slices of the syslog of the n1-highmem-8 driver VM of a Dataproc cluster: ``` Nov 22 14:26:51 vds-cluster-91f3f4c1-b737-m earlyoom[4115]: earlyoom v1.6.2 Nov 22 14:26:51 vds-cluster-91f3f4c1-b737-m earlyoom[4115]: mem total: 52223 MiB, swap total: 0 MiB Nov 22 14:26:51 vds-cluster-91f3f4c1-b737-m earlyoom[4115]: sending SIGTERM when mem <= 0.12% and swap <= 1.00%, Nov 22 14:26:51 vds-cluster-91f3f4c1-b737-m earlyoom[4115]: SIGKILL when mem <= 0.06% and swap <= 0.50% ... Nov 22 14:30:05 vds-cluster-91f3f4c1-b737-m post-hdfs-startup-script[7747]: + echo 'All done' Nov 22 14:30:05 vds-cluster-91f3f4c1-b737-m post-hdfs-startup-script[7747]: All done Nov 22 14:30:06 vds-cluster-91f3f4c1-b737-m earlyoom[4115]: mem avail: 42760 of 52223 MiB (81.88%), swap free: 0 of 0 MiB ( 0.00%) ``` Notice: 1. The total memory available on the machine is less than 52 GiB (= 53,248 MiB), indeed it is a full 1025 MiB below the advertised amount. 2. Once all the components of the Dataproc cluster have started (but before any Hail Query jobs are submitted) the total memory available is already depleted to 42760 MiB. Recall that Hail allocates 41 GiB (= 41,984 MiB) to its JVM. This leaves the Python process and all other daemons on the system only 776 MiB of excess RAM. For reference python3 -c 'import hail' needs 206 MiB. This PR modifies `hailctl dataproc start` and the meaning of `--master-memory-fraction`. Now, `--master-memory-fraction` is the precentage of the memory available to the master node after accounting for the missing 1GiB and the system daemons. We also increase the default memory fraction to 90%. For an n1-highmem-8, the driver has 36 GiB instead of 41 GiB. An n1-highmem-16 is unchanged at 83 GiB. --- hail/python/hailtop/hailctl/dataproc/cli.py | 2 +- hail/python/hailtop/hailctl/dataproc/start.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hail/python/hailtop/hailctl/dataproc/cli.py b/hail/python/hailtop/hailctl/dataproc/cli.py index 3ee648fa216..3ff55f6a99c 100644 --- a/hail/python/hailtop/hailctl/dataproc/cli.py +++ b/hail/python/hailtop/hailctl/dataproc/cli.py @@ -84,7 +84,7 @@ def start( Opt( help='Fraction of master memory allocated to the JVM. Use a smaller value to reserve more memory for Python.' ), - ] = 0.8, + ] = 0.9, master_boot_disk_size: Ann[int, Opt(help='Disk size of master machine, in GB')] = 100, num_master_local_ssds: Ann[int, Opt(help='Number of local SSDs to attach to the master machine.')] = 0, num_secondary_workers: NumSecondaryWorkersOption = 0, diff --git a/hail/python/hailtop/hailctl/dataproc/start.py b/hail/python/hailtop/hailctl/dataproc/start.py index 1131e049f2f..5d10d958186 100755 --- a/hail/python/hailtop/hailctl/dataproc/start.py +++ b/hail/python/hailtop/hailctl/dataproc/start.py @@ -307,12 +307,18 @@ def disk_size(size): size = max(size, 200) return str(size) + 'GB' + def jvm_heap_size_gib(machine_type: str, memory_fraction: float) -> int: + advertised_memory_gib = MACHINE_MEM[machine_type] + # 1. GCE only provides 51 GiB for an n1-highmem-8 (advertised as 52 GiB) + # 2. System daemons use ~10 GiB based on syslog "earlyoom" log statements during VM startup + actual_available_memory_gib = advertised_memory_gib - 11 + jvm_heap_size = actual_available_memory_gib * memory_fraction + return int(jvm_heap_size) + conf.extend_flag( 'properties', { - "spark:spark.driver.memory": "{driver_memory}g".format( - driver_memory=str(int(MACHINE_MEM[master_machine_type] * master_memory_fraction)) - ) + "spark:spark.driver.memory": f"{jvm_heap_size_gib(master_machine_type, master_memory_fraction)}g" }, ) conf.flags['master-machine-type'] = master_machine_type From 405248cd53c46faaa867b17027075f201aff5244 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Tue, 5 Dec 2023 10:07:07 -0500 Subject: [PATCH 29/48] [auth] Use aiohttp AppKeys for typed Application storage (#14065) `aiohttp` 3.9 introduced `AppKey` which allows `Application.__getitem__` to convey a return type based on its input. So we can do things like `db = app[AppKeys.DB]` and `db` is known to be of type `Database`. This should help a great deal with static type checking and in-editor type hints. --- auth/auth/auth.py | 91 ++++++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/auth/auth/auth.py b/auth/auth/auth.py index fac37925605..2087ca8e2f3 100644 --- a/auth/auth/auth.py +++ b/auth/auth/auth.py @@ -212,7 +212,7 @@ async def get_index(request: web.Request, userdata: Optional[UserData]) -> web.R @routes.get('/creating') @auth.maybe_authenticated_user async def creating_account(request: web.Request, userdata: Optional[UserData]) -> web.Response: - db = request.app['db'] + db = request.app[AppKeys.DB] session = await aiohttp_session.get_session(request) if 'pending' in session: login_id = session['login_id'] @@ -257,7 +257,7 @@ async def creating_account_wait(request): async def _wait_websocket(request, login_id): app = request.app - db = app['db'] + db = app[AppKeys.DB] user = await user_from_login_id(db, login_id) if not user: @@ -291,7 +291,7 @@ async def _wait_websocket(request, login_id): async def signup(request) -> NoReturn: next_page = request.query.get('next', deploy_config.external_url('auth', '/user')) - flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) + flow_data = request.app[AppKeys.FLOW_CLIENT].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) session = await aiohttp_session.new_session(request) cleanup_session(session) @@ -306,7 +306,7 @@ async def signup(request) -> NoReturn: async def login(request) -> NoReturn: next_page = request.query.get('next', deploy_config.external_url('auth', '/user')) - flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) + flow_data = request.app[AppKeys.FLOW_CLIENT].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) session = await aiohttp_session.new_session(request) cleanup_session(session) @@ -333,7 +333,7 @@ async def callback(request) -> web.Response: cleanup_session(session) try: - flow_result = request.app['flow_client'].receive_callback(request, flow_dict) + flow_result = request.app[AppKeys.FLOW_CLIENT].receive_callback(request, flow_dict) login_id = flow_result.login_id except asyncio.CancelledError: raise @@ -341,7 +341,7 @@ async def callback(request) -> web.Response: log.exception('oauth2 callback: could not fetch and verify token') raise web.HTTPUnauthorized() from e - db = request.app['db'] + db = request.app[AppKeys.DB] user = await user_from_login_id(db, login_id) @@ -393,7 +393,7 @@ async def callback(request) -> web.Response: @routes.post('/api/v1alpha/users/{user}/create') @auth.authenticated_developers_only() async def create_user(request: web.Request, _) -> web.Response: - db: Database = request.app['db'] + db = request.app[AppKeys.DB] username = request.match_info['user'] body = await json_request(request) @@ -442,7 +442,7 @@ async def create_copy_paste_token(db, session_id, max_age_secs=300): async def get_copy_paste_token(request: web.Request, userdata: UserData) -> web.Response: session = await aiohttp_session.get_session(request) session_id = session['session_id'] - db = request.app['db'] + db = request.app[AppKeys.DB] copy_paste_token = await create_copy_paste_token(db, session_id) page_context = {'copy_paste_token': copy_paste_token} return await render_template('auth', request, userdata, 'copy-paste-token.html', page_context) @@ -452,7 +452,7 @@ async def get_copy_paste_token(request: web.Request, userdata: UserData) -> web. @auth.authenticated_users_only() async def get_copy_paste_token_api(request: web.Request, _) -> web.Response: session_id = await get_session_id(request) - db = request.app['db'] + db = request.app[AppKeys.DB] copy_paste_token = await create_copy_paste_token(db, session_id) return web.Response(body=copy_paste_token) @@ -463,7 +463,7 @@ async def logout(request: web.Request, userdata: Optional[UserData]) -> NoReturn if not userdata: raise web.HTTPFound(deploy_config.external_url('auth', '')) - db = request.app['db'] + db = request.app[AppKeys.DB] session_id = await get_session_id(request) await db.just_execute('DELETE FROM sessions WHERE session_id = %s;', session_id) @@ -477,7 +477,7 @@ async def logout(request: web.Request, userdata: Optional[UserData]) -> NoReturn async def rest_login(request: web.Request) -> web.Response: callback_port = request.query['callback_port'] callback_uri = f'http://127.0.0.1:{callback_port}/oauth2callback' - flow_data = request.app['flow_client'].initiate_flow(callback_uri) + flow_data = request.app[AppKeys.FLOW_CLIENT].initiate_flow(callback_uri) flow_data['callback_uri'] = callback_uri # keeping authorization_url and state for backwards compatibility @@ -489,13 +489,13 @@ async def rest_login(request: web.Request) -> web.Response: @routes.get('/api/v1alpha/oauth2-client') async def hailctl_oauth_client(request): # pylint: disable=unused-argument idp = IdentityProvider.GOOGLE if CLOUD == 'gcp' else IdentityProvider.MICROSOFT - return json_response({'idp': idp.value, 'oauth2_client': request.app['hailctl_client_config']}) + return json_response({'idp': idp.value, 'oauth2_client': request.app[AppKeys.HAILCTL_CLIENT_CONFIG]}) @routes.get('/roles') @auth.authenticated_developers_only() async def get_roles(request: web.Request, userdata: UserData) -> web.Response: - db = request.app['db'] + db = request.app[AppKeys.DB] roles = [x async for x in db.select_and_fetchall('SELECT * FROM roles;')] page_context = {'roles': roles} return await render_template('auth', request, userdata, 'roles.html', page_context) @@ -505,7 +505,7 @@ async def get_roles(request: web.Request, userdata: UserData) -> web.Response: @auth.authenticated_developers_only() async def post_create_role(request: web.Request, _) -> NoReturn: session = await aiohttp_session.get_session(request) - db = request.app['db'] + db = request.app[AppKeys.DB] post = await request.post() name = str(post['name']) @@ -525,7 +525,7 @@ async def post_create_role(request: web.Request, _) -> NoReturn: @routes.get('/users') @auth.authenticated_developers_only() async def get_users(request: web.Request, userdata: UserData) -> web.Response: - db = request.app['db'] + db = request.app[AppKeys.DB] users = [x async for x in db.select_and_fetchall('SELECT * FROM users;')] page_context = {'users': users} return await render_template('auth', request, userdata, 'users.html', page_context) @@ -535,7 +535,7 @@ async def get_users(request: web.Request, userdata: UserData) -> web.Response: @auth.authenticated_developers_only() async def post_create_user(request: web.Request, _) -> NoReturn: session = await aiohttp_session.get_session(request) - db = request.app['db'] + db = request.app[AppKeys.DB] post = await request.post() username = str(post['username']) login_id = str(post['login_id']) if 'login_id' in post else None @@ -562,7 +562,7 @@ async def rest_get_users(request: web.Request, userdata: UserData) -> web.Respon if userdata['is_developer'] != 1 and userdata['username'] != 'ci': raise web.HTTPUnauthorized() - db: Database = request.app['db'] + db = request.app[AppKeys.DB] _query = ''' SELECT id, username, login_id, state, is_developer, is_service_account, hail_identity FROM users; @@ -574,7 +574,7 @@ async def rest_get_users(request: web.Request, userdata: UserData) -> web.Respon @routes.get('/api/v1alpha/users/{user}') @auth.authenticated_developers_only() async def rest_get_user(request: web.Request, _) -> web.Response: - db: Database = request.app['db'] + db = request.app[AppKeys.DB] username = request.match_info['user'] user = await db.select_and_fetchone( @@ -614,7 +614,7 @@ async def _delete_user(db: Database, username: str, id: Optional[str]): @auth.authenticated_developers_only() async def delete_user(request: web.Request, _) -> NoReturn: session = await aiohttp_session.get_session(request) - db = request.app['db'] + db = request.app[AppKeys.DB] post = await request.post() id = str(post['id']) username = str(post['username']) @@ -631,7 +631,7 @@ async def delete_user(request: web.Request, _) -> NoReturn: @routes.delete('/api/v1alpha/users/{user}') @auth.authenticated_developers_only() async def rest_delete_user(request: web.Request, _) -> web.Response: - db = request.app['db'] + db = request.app[AppKeys.DB] username = request.match_info['user'] try: @@ -656,14 +656,14 @@ async def rest_callback(request): flow_dict = json.loads(request.query['flow']) try: - flow_result = request.app['flow_client'].receive_callback(request, flow_dict) + flow_result = request.app[AppKeys.FLOW_CLIENT].receive_callback(request, flow_dict) except asyncio.CancelledError: raise except Exception as e: log.exception('fetching and decoding token') raise web.HTTPUnauthorized() from e - db = request.app['db'] + db = request.app[AppKeys.DB] users = [ x async for x in db.select_and_fetchall( @@ -683,7 +683,7 @@ async def rest_callback(request): @routes.post('/api/v1alpha/copy-paste-login') async def rest_copy_paste_login(request): copy_paste_token = request.query['copy_paste_token'] - db = request.app['db'] + db = request.app[AppKeys.DB] @transaction(db) async def maybe_pop_token(tx): @@ -710,21 +710,21 @@ async def maybe_pop_token(tx): @auth.authenticated_users_only() async def rest_logout(request: web.Request, _) -> web.Response: session_id = await get_session_id(request) - db = request.app['db'] + db = request.app[AppKeys.DB] await db.just_execute('DELETE FROM sessions WHERE session_id = %s;', session_id) return web.Response(status=200) async def get_userinfo(request: web.Request, auth_token: str) -> UserData: - flow_client: Flow = request.app['flow_client'] - client_session = request.app['client_session'] + flow_client = request.app[AppKeys.FLOW_CLIENT] + client_session = request.app[AppKeys.CLIENT_SESSION] userdata = await get_userinfo_from_hail_session_id(request, auth_token) if userdata: return userdata - hailctl_oauth_client = request.app['hailctl_client_config'] + hailctl_oauth_client = request.app[AppKeys.HAILCTL_CLIENT_CONFIG] uid = await flow_client.get_identity_uid_from_access_token( client_session, auth_token, oauth2_client=hailctl_oauth_client ) @@ -737,7 +737,7 @@ async def get_userinfo(request: web.Request, auth_token: str) -> UserData: async def get_userinfo_from_login_id_or_hail_identity_id( request: web.Request, login_id_or_hail_idenity_uid: str ) -> UserData: - db = request.app['db'] + db = request.app[AppKeys.DB] users = [ x @@ -754,7 +754,7 @@ async def get_userinfo_from_login_id_or_hail_identity_id( if len(users) != 1: log.info('Unknown login id') raise web.HTTPUnauthorized() - return users[0] + return typing.cast(UserData, users[0]) async def get_userinfo_from_hail_session_id(request: web.Request, session_id: str) -> Optional[UserData]: @@ -762,7 +762,7 @@ async def get_userinfo_from_hail_session_id(request: web.Request, session_id: st if len(session_id) != 44: return None - db = request.app['db'] + db = request.app[AppKeys.DB] users = [ x async for x in db.select_and_fetchall( @@ -779,7 +779,7 @@ async def get_userinfo_from_hail_session_id(request: web.Request, session_id: st if len(users) != 1: return None - return users[0] + return typing.cast(UserData, users[0]) @routes.get('/api/v1alpha/userinfo') @@ -804,32 +804,41 @@ async def verify_dev_or_sa_credentials(_, userdata: UserData) -> web.Response: return web.Response(status=200) +class AppKeys: + DB = web.AppKey('db', Database) + CLIENT_SESSION = web.AppKey('client_session', httpx.ClientSession) + FLOW_CLIENT = web.AppKey('flow_client', Flow) + HAILCTL_CLIENT_CONFIG = web.AppKey('hailctl_client_config', dict) + K8S_CLIENT = web.AppKey('k8s_client', kubernetes_asyncio.client.CoreV1Api) + K8S_CACHE = web.AppKey('k8s_cache', K8sCache) + + async def on_startup(app): db = Database() await db.async_init(maxsize=50) - app['db'] = db - app['client_session'] = httpx.client_session() + app[AppKeys.DB] = db + app[AppKeys.CLIENT_SESSION] = httpx.client_session() credentials_file = '/auth-oauth2-client-secret/client_secret.json' if CLOUD == 'gcp': - app['flow_client'] = GoogleFlow(credentials_file) + app[AppKeys.FLOW_CLIENT] = GoogleFlow(credentials_file) else: assert CLOUD == 'azure' - app['flow_client'] = AzureFlow(credentials_file) + app[AppKeys.FLOW_CLIENT] = AzureFlow(credentials_file) with open('/auth-oauth2-client-secret/hailctl_client_secret.json', 'r', encoding='utf-8') as f: - app['hailctl_client_config'] = json.loads(f.read()) + app[AppKeys.HAILCTL_CLIENT_CONFIG] = json.loads(f.read()) kubernetes_asyncio.config.load_incluster_config() - app['k8s_client'] = kubernetes_asyncio.client.CoreV1Api() - app['k8s_cache'] = K8sCache(app['k8s_client']) + app[AppKeys.K8S_CLIENT] = kubernetes_asyncio.client.CoreV1Api() + app[AppKeys.K8S_CACHE] = K8sCache(app[AppKeys.K8S_CLIENT]) async def on_cleanup(app): async with AsyncExitStack() as cleanup: - cleanup.push_async_callback(app['k8s_client'].api_client.rest_client.pool_manager.close) - cleanup.push_async_callback(app['db'].async_close) - cleanup.push_async_callback(app['client_session'].close) + cleanup.push_async_callback(app[AppKeys.K8S_CLIENT].api_client.rest_client.pool_manager.close) + cleanup.push_async_callback(app[AppKeys.DB].async_close) + cleanup.push_async_callback(app[AppKeys.CLIENT_SESSION].close) class AuthAccessLogger(AccessLogger): From c24974fda59ecdb799abe9499f606efb85d18001 Mon Sep 17 00:00:00 2001 From: Dan King Date: Tue, 5 Dec 2023 12:17:30 -0500 Subject: [PATCH 30/48] [Snyk] Security upgrade jupyter-server from 1.24.0 to 2.11.2 (#14070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit

This PR was automatically created by Snyk using the credentials of a real user.


Snyk has created this PR to fix one or more vulnerable packages in the `pip` dependencies of this project.

#### Changes included in this PR - Changes to the following files to upgrade the vulnerable dependencies to a fixed version: - hail/python/dev/pinned-requirements.txt
⚠️ Warning ``` jupyter 1.0.0 requires notebook, which is not installed. jupyter 1.0.0 requires qtconsole, which is not installed. beautifulsoup4 4.12.2 requires soupsieve, which is not installed. argon2-cffi-bindings 21.2.0 requires cffi, which is not installed. aiosignal 1.3.1 requires frozenlist, which is not installed. ```
#### Vulnerabilities that will be fixed ##### By pinning: Severity | Priority Score (*) | Issue | Upgrade | Breaking Change | Exploit Maturity :-------------------------:|-------------------------|:-------------------------|:-------------------------|:-------------------------|:------------------------- ![low severity](https://res.cloudinary.com/snyk/image/upload/w_20,h_20/v1561977819/icon/l.png "low severity") | **461/1000**
**Why?** Recently disclosed, Has a fix available, CVSS 3.5 | Generation of Error Message Containing Sensitive Information
[SNYK-PYTHON-JUPYTERSERVER-6099119](https://snyk.io/vuln/SNYK-PYTHON-JUPYTERSERVER-6099119) | `jupyter-server:`
`1.24.0 -> 2.11.2`
| No | No Known Exploit (*) Note that the real score may have changed since the PR was raised. Some vulnerabilities couldn't be fully fixed and so Snyk will still find them when the project is tested again. This may be because the vulnerability existed within more than one direct dependency, but not all of the affected dependencies could be upgraded. Check the changes in this PR to ensure they won't cause issues with your project. ------------ **Note:** *You are seeing this because you or someone else with access to this repository has authorized Snyk to open fix PRs.* For more information: 🧐 [View latest project report](https://app.snyk.io/org/danking/project/20159ae6-a5aa-42fa-845a-c89f5bcbf999?utm_source=github&utm_medium=referral&page=fix-pr) 🛠 [Adjust project settings](https://app.snyk.io/org/danking/project/20159ae6-a5aa-42fa-845a-c89f5bcbf999?utm_source=github&utm_medium=referral&page=fix-pr/settings) 📚 [Read more about Snyk's upgrade and patch logic](https://support.snyk.io/hc/en-us/articles/360003891078-Snyk-patches-to-fix-vulnerabilities) [//]: # (snyk:metadata:{"prId":"718cb82d-c4e7-4e5a-883f-664469fc080a","prPublicId":"718cb82d-c4e7-4e5a-883f-664469fc080a","dependencies":[{"name":"jupyter-server","from":"1.24.0","to":"2.11.2"}],"packageManager":"pip","projectPublicId":"20159ae6-a5aa-42fa-845a-c89f5bcbf999","projectUrl":"https://app.snyk.io/org/danking/project/20159ae6-a5aa-42fa-845a-c89f5bcbf999?utm_source=github&utm_medium=referral&page=fix-pr","type":"auto","patch":[],"vulns":["SNYK-PYTHON-JUPYTERSERVER-6099119"],"upgrade":[],"isBreakingChange":false,"env":"prod","prType":"fix","templateVariants":["updated-fix-title","pr-warning-shown","priorityScore"],"priorityScoreList":[461],"remediationStrategy":"vuln"}) --- **Learn how to fix vulnerabilities with free interactive lessons:** 🦉 [Generation of Error Message Containing Sensitive Information](https://learn.snyk.io/lesson/error-message-with-sensitive-information/?loc=fix-pr) Co-authored-by: snyk-bot --- hail/python/dev/pinned-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/dev/pinned-requirements.txt b/hail/python/dev/pinned-requirements.txt index 2fff21408a2..7bc18ae7c5a 100644 --- a/hail/python/dev/pinned-requirements.txt +++ b/hail/python/dev/pinned-requirements.txt @@ -213,7 +213,7 @@ jupyter-events==0.9.0 # via jupyter-server jupyter-lsp==2.2.1 # via jupyterlab -jupyter-server==2.11.1 +jupyter-server==2.11.2 # via # jupyter-lsp # jupyterlab From 3e0b2131eafa075e406d674c2d5e847c2f06f8cc Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Tue, 5 Dec 2023 19:04:32 -0500 Subject: [PATCH 31/48] [batch|qob] Dont request hail tokens in the batch client (#14059) Jobs that are configured with `mount_tokens=True` will have their Hail tokens mounted into the main container. However, now that we are using access tokens from cloud identities, the tokens are no longer used. This removes the default behavior of mounting the `tokens.json` files since they aren't used by our codebase anyway. --- batch/test/test_batch.py | 2 -- hail/python/hail/backend/service_backend.py | 1 - hail/python/hailtop/batch/backend.py | 1 - 3 files changed, 4 deletions(-) diff --git a/batch/test/test_batch.py b/batch/test/test_batch.py index f2d19af5a43..9193ddcef6a 100644 --- a/batch/test/test_batch.py +++ b/batch/test/test_batch.py @@ -1128,7 +1128,6 @@ def test_submit_batch_in_job(client: BatchClient, remote_tmpdir: str): j = b.create_job( HAIL_GENETICS_HAILTOP_IMAGE, ['/bin/bash', '-c', f'''python3 -c \'{script}\''''], - mount_tokens=True, ) b.submit() status = j.wait() @@ -1158,7 +1157,6 @@ def test_cant_submit_to_default_with_other_ns_creds(client: BatchClient, remote_ python3 -c \'{script}\'''', ], env={'HAIL_DOMAIN': DOMAIN, 'HAIL_DEFAULT_NAMESPACE': 'default', 'HAIL_LOCATION': 'external'}, - mount_tokens=True, ) b.submit() status = j.wait() diff --git a/hail/python/hail/backend/service_backend.py b/hail/python/hail/backend/service_backend.py index dbf2eac4b92..7035f8889cd 100644 --- a/hail/python/hail/backend/service_backend.py +++ b/hail/python/hail/backend/service_backend.py @@ -406,7 +406,6 @@ async def _run_on_batch( iodir + '/in', iodir + '/out', ], - mount_tokens=True, resources=resources, attributes={'name': name + '_driver'}, regions=self.regions, diff --git a/hail/python/hailtop/batch/backend.py b/hail/python/hailtop/batch/backend.py index 8d82004b2c2..3ee237dabe4 100644 --- a/hail/python/hailtop/batch/backend.py +++ b/hail/python/hailtop/batch/backend.py @@ -801,7 +801,6 @@ async def compile_job(job): cloudfuse=job._cloudfuse if len(job._cloudfuse) > 0 else None, env=env, requester_pays_project=batch.requester_pays_project, - mount_tokens=True, user_code=user_code, regions=job._regions, always_copy_output=job._always_copy_output From 98adcce1d07001995b0819fd6afe161bf34ba840 Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 7 Dec 2023 11:38:32 -0500 Subject: [PATCH 32/48] [query] update past a broken google cloud storage java library (#14080) CHANGELOG: Fix #13979, affecting Query-on-Batch and manifesting most frequently as "com.github.luben.zstd.ZstdException: Corrupted block detected". This PR upgrades google-cloud-storage from 2.29.1 to 2.30.1. The google-cloud-storage java library has a bug present at least since 2.29.0 in which simply incorrect data was returned. https://github.com/googleapis/java-storage/issues/2301 . The issue seems related to their use of multiple intremediate ByteBuffers. As far as I can tell, this is what could happen: 1. If there's no channel, open a new channel with the current position. 2. Read *some* data from the input ByteChannel into an intermediate ByteBuffer. 3. While attempting to read more data into a subsequent intermediate ByteBuffer, an retryable exception occurs. 4. The exception bubbles to google-cloud-storage's error handling, which frees the channel and loops back to (1) The key bug is that the intermediate buffers have data but the `position` hasn't been updated. When we recreate the channel we will jump to the wrong position and re-read some data. Lucky for us, between Zstd and our assertions, this usually crashes the program instead of silently returning bad data. This is the third bug we have found in Google's cloud storage java library. The previous two: 1. https://github.com/hail-is/hail/issues/13721 2. https://github.com/hail-is/hail/issues/13937 Be forewarned: the next time we see bizarre networking or data corruption issues, check if updating google-cloud-storage fixes the problem. --- hail/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/build.gradle b/hail/build.gradle index d3111dc7eaa..eaa3332337d 100644 --- a/hail/build.gradle +++ b/hail/build.gradle @@ -168,7 +168,7 @@ dependencies { throw new UnsupportedOperationException("Couldn't pick a valid elasticsearch.") } - implementation(group: 'com.google.cloud', name: 'google-cloud-storage', version: '2.29.1') { + implementation(group: 'com.google.cloud', name: 'google-cloud-storage', version: '2.30.1') { exclude group: 'com.fasterxml.jackson.core' } From 8d566eea571bda2036d83c1916aa5fbd3acfbb5a Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 7 Dec 2023 13:32:53 -0500 Subject: [PATCH 33/48] [query] update zstd-jni to 1.5.5-11 (#14081) The Zstandard version is not changing. The zstd-jni library, which wraps Zstandard and provides some interoperation with java.nio, has released 9 times since 1.5.5-2. They do not publish a changelog, but I scanned through their commits. There were some potentially relevant bug fixes: 1. When using array-backed ByteBuffers, zstd-jni reads the wrong data if the arrayOffset is not zero. https://github.com/luben/zstd-jni/commit/355b8511a2967d097a619047a579930cac2ccd9d 2. Perhaps a slightly faster path for array-backed ByteBuffers. https://github.com/luben/zstd-jni/commit/100c434dfcec17a865ca2c2b844afe1046ce1b10 3. Possibly faster buffer pool. https://github.com/luben/zstd-jni/commit/2b6c3b75012dec44f8fd2dd56dd97eea0d62f19c 4. Removed a double free during compression. https://github.com/luben/zstd-jni/commit/b2ad3834439375b12b0fd0c0b80788a2fe94f06b --- hail/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/build.gradle b/hail/build.gradle index eaa3332337d..2a9e0189221 100644 --- a/hail/build.gradle +++ b/hail/build.gradle @@ -202,7 +202,7 @@ dependencies { implementation 'com.kohlschutter.junixsocket:junixsocket-core:2.6.1' - implementation 'com.github.luben:zstd-jni:1.5.5-2' + implementation 'com.github.luben:zstd-jni:1.5.5-11' implementation project(path: ':shadedazure', configuration: 'shadow') } From 6b6e8297128c839e82c765bac5f3ae3a6a7d7816 Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Thu, 7 Dec 2023 17:34:25 -0500 Subject: [PATCH 34/48] [guide-analysis] Host guide browser in hail-vdc (#14078) This adds the k8s config necessary to host the [guide browser](https://hub.docker.com/r/gneak123/guide_browser/tags) in our k8s cluster. You can see it running in dev [here](https://internal.hail.is/dgoldste/guide-analysis/). There's not much special here, a deployment with the browser app and an envoy sidecar to handle TLS. Once this merges and the `ssl-config-guide-analysis` is created in `default` I can `make -C guide deploy NAMESPACE=default` and then recreate the certs to pick up the new subdomain, after which it should be live. Resolves #14067 --- guide/Makefile | 9 ++ guide/deployment.yaml | 163 +++++++++++++++++++++++++++++++++++++ letsencrypt/subdomains.txt | 1 + tls/config.yaml | 4 + 4 files changed, 177 insertions(+) create mode 100644 guide/Makefile create mode 100644 guide/deployment.yaml diff --git a/guide/Makefile b/guide/Makefile new file mode 100644 index 00000000000..422abfc3e49 --- /dev/null +++ b/guide/Makefile @@ -0,0 +1,9 @@ +include ../config.mk + +.PHONY: deploy +deploy: + ! [ -z $(NAMESPACE) ] # call this like: make deploy NAMESPACE=default + python3 ../ci/jinja2_render.py \ + '{"global":{"docker_prefix":"$(DOCKER_PREFIX)"},"default_ns":{"name":"$(NAMESPACE)"}}' \ + deployment.yaml deployment.yaml.out + kubectl -n $(NAMESPACE) apply -f deployment.yaml.out diff --git a/guide/deployment.yaml b/guide/deployment.yaml new file mode 100644 index 00000000000..880315f23c5 --- /dev/null +++ b/guide/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: guide-sidecar-envoy-config +data: + envoy.yaml: | + static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8443 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + upgrade_configs: + - upgrade_type: websocket + route_config: + name: local_route + virtual_hosts: + - name: guide-analysis + domains: ["*"] + routes: +{% if default_ns.name != "default" %} + - match: + prefix: "/{{ default_ns.name }}/guide-analysis/" + route: + prefix_rewrite: "/" + timeout: 0s + cluster: guide-analysis +{% endif %} + - match: + prefix: "/" + route: + timeout: 0s + cluster: guide-analysis + http_filters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /ssl-config/guide-analysis-cert.pem + private_key: + filename: /ssl-config/guide-analysis-key.pem + clusters: + - name: guide-analysis + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: guide-analysis + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + admin: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guide-analysis + labels: + name: guide-analysis +spec: + selector: + matchLabels: + app: guide-analysis + replicas: 1 + template: + metadata: + labels: + app: guide-analysis + spec: + nodeSelector: + preemptible: "false" + containers: + - name: guide-analysis + image: gneak123/guide_browser:amd@sha256:d3801eb2ff08ac0b5e9587ee3780dfa491bc087c367bc8bf3d252b2e60fae5b6 + imagePullPolicy: Always + resources: + requests: + cpu: "100m" + memory: "200M" + limits: + cpu: "1" + memory: "1G" + ports: + - containerPort: 8000 + protocol: TCP + - name: envoy + image: "{{ global.docker_prefix }}/envoyproxy/envoy:v1.22.3" + command: + - /usr/local/bin/envoy + - --config-path + - /etc/envoy/envoy.yaml + - --concurrency + - "2" + resources: + requests: + cpu: "20m" + memory: "20M" + limits: + cpu: "1" + memory: "1G" + ports: + - containerPort: 8443 + volumeMounts: + - name: ssl-config-guide-analysis + mountPath: /ssl-config + readOnly: true + - name: guide-sidecar-envoy-config + mountPath: /etc/envoy + readOnly: true + readinessProbe: + httpGet: +{% if default_ns.name == "default" %} + path: / +{% else %} + path: /{{ default_ns.name }}/guide-analysis/ +{% endif %} + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + volumes: + - name: ssl-config-guide-analysis + secret: + optional: false + secretName: ssl-config-guide-analysis + - name: guide-sidecar-envoy-config + configMap: + name: guide-sidecar-envoy-config +--- +apiVersion: v1 +kind: Service +metadata: + name: guide-analysis + labels: + app: guide-analysis +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 8443 + selector: + app: guide-analysis diff --git a/letsencrypt/subdomains.txt b/letsencrypt/subdomains.txt index 6423c7e76d1..94633e98b05 100644 --- a/letsencrypt/subdomains.txt +++ b/letsencrypt/subdomains.txt @@ -6,6 +6,7 @@ blog monitoring auth ukbb-rg +guide-analysis grafana prometheus hello diff --git a/tls/config.yaml b/tls/config.yaml index b2cbffca020..bd71e534762 100644 --- a/tls/config.yaml +++ b/tls/config.yaml @@ -62,3 +62,7 @@ principals: domains: - prometheus kind: nginx +- name: guide-analysis + domains: + - guide-analysis + kind: nginx From 16a3c5ae0ed7baf130df1c9c35dd2599d560b5fd Mon Sep 17 00:00:00 2001 From: Dan King Date: Fri, 8 Dec 2023 04:41:38 -0500 Subject: [PATCH 35/48] [query] update requests dependency (#14084) From https://github.com/hail-is/hail/pull/14024. --- hail/python/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/requirements.txt b/hail/python/requirements.txt index 9f8492a2a07..eaed2c0d116 100644 --- a/hail/python/requirements.txt +++ b/hail/python/requirements.txt @@ -11,5 +11,5 @@ parsimonious<1 plotly>=5.5.0,<6 protobuf==3.20.2 pyspark>=3.3.0,<3.4 -requests>=2.25.1,<3 +requests>=2.31.0,<3 scipy>1.2,<1.12 From c1973cdb3b32f781c1ccbe1d37d71301b8d288bc Mon Sep 17 00:00:00 2001 From: Dan King Date: Fri, 8 Dec 2023 08:39:22 -0500 Subject: [PATCH 36/48] [aiocloud] refresh access tokens after transient errors (#14083) We need to retry 401s until we get a 401 from credentials we know to be invalid. --- .../hailtop/aiocloud/aioazure/credentials.py | 12 +++---- .../hailtop/aiocloud/aiogoogle/credentials.py | 11 ++++--- .../hailtop/aiocloud/common/credentials.py | 23 +++++++++++-- .../python/hailtop/aiocloud/common/session.py | 32 +++++++++++++------ hail/python/hailtop/auth/auth.py | 28 +++++++++------- hail/python/hailtop/auth/tokens.py | 10 +++++- hail/python/hailtop/batch_client/aioclient.py | 8 ++--- 7 files changed, 85 insertions(+), 39 deletions(-) diff --git a/hail/python/hailtop/aiocloud/aioazure/credentials.py b/hail/python/hailtop/aiocloud/aioazure/credentials.py index a13694db2b9..bb86bc6781d 100644 --- a/hail/python/hailtop/aiocloud/aioazure/credentials.py +++ b/hail/python/hailtop/aiocloud/aioazure/credentials.py @@ -5,7 +5,7 @@ import logging from types import TracebackType -from typing import Any, List, Optional, Type, Union +from typing import Any, List, Optional, Type, Union, Tuple, Dict from azure.identity.aio import DefaultAzureCredential, ClientSecretCredential from azure.core.credentials import AccessToken from azure.core.credentials_async import AsyncTokenCredential @@ -110,17 +110,17 @@ def __init__(self, credential: Union[DefaultAzureCredential, ClientSecretCredent scopes = ['https://management.azure.com/.default'] self.scopes = scopes - async def auth_headers(self): - access_token = await self.access_token() - return {'Authorization': f'Bearer {access_token}'} + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: + access_token, expiration = await self.access_token_with_expiration() + return {'Authorization': f'Bearer {access_token}'}, expiration - async def access_token(self) -> str: + async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: now = time.time() if self._access_token is None or (self._expires_at is not None and now > self._expires_at): self._access_token = await self.get_access_token() self._expires_at = now + (self._access_token.expires_on - now) // 2 # type: ignore assert self._access_token - return self._access_token.token + return self._access_token.token, self._expires_at async def get_access_token(self): return await self.credential.get_token(*self.scopes) diff --git a/hail/python/hailtop/aiocloud/aiogoogle/credentials.py b/hail/python/hailtop/aiocloud/aiogoogle/credentials.py index 33ac5887033..0b1356c1604 100644 --- a/hail/python/hailtop/aiocloud/aiogoogle/credentials.py +++ b/hail/python/hailtop/aiocloud/aiogoogle/credentials.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional, Union, List, Literal, ClassVar, overload +from typing import Dict, Optional, Union, List, Literal, ClassVar, overload, Tuple import os import json import time @@ -102,13 +102,14 @@ def default_credentials(scopes: Optional[List[str]] = None, *, anonymous_ok: boo 'run `gcloud auth application-default login` first to log in.') return AnonymousCloudCredentials() - async def auth_headers(self) -> Dict[str, str]: - return {'Authorization': f'Bearer {await self.access_token()}'} + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: + token, expiration = await self.access_token_with_expiration() + return {'Authorization': f'Bearer {token}'}, expiration - async def access_token(self) -> str: + async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: if self._access_token is None or self._access_token.expired(): self._access_token = await self._get_access_token() - return self._access_token.token + return self._access_token.token, self._access_token._expiry_time async def _get_access_token(self) -> GoogleExpiringAccessToken: raise NotImplementedError diff --git a/hail/python/hailtop/aiocloud/common/credentials.py b/hail/python/hailtop/aiocloud/common/credentials.py index 08fb314e722..30d7df47141 100644 --- a/hail/python/hailtop/aiocloud/common/credentials.py +++ b/hail/python/hailtop/aiocloud/common/credentials.py @@ -1,22 +1,39 @@ import abc -from typing import Dict +from typing import Dict, Tuple, Optional class CloudCredentials(abc.ABC): @abc.abstractmethod - async def auth_headers(self) -> Dict[str, str]: + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: + """Return HTTP authentication headers and the time of expiration in seconds since the epoch (Unix time). + + None indicates a non-expiring credentials.""" raise NotImplementedError @abc.abstractmethod - async def access_token(self) -> str: + async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: + """Return an access token and the time of expiration in seconds since the epoch (Unix time). + + None indicates a non-expiring credentials.""" raise NotImplementedError + async def auth_headers(self) -> Dict[str, str]: + headers, _ = await self.auth_headers_with_expiration() + return headers + + async def access_token(self) -> str: + access_token, _ = await self.access_token_with_expiration() + return access_token + @abc.abstractmethod async def close(self): raise NotImplementedError class AnonymousCloudCredentials: + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: + return {}, None + async def auth_headers(self) -> Dict[str, str]: return {} diff --git a/hail/python/hailtop/aiocloud/common/session.py b/hail/python/hailtop/aiocloud/common/session.py index 5f03be1a87d..6be8347261b 100644 --- a/hail/python/hailtop/aiocloud/common/session.py +++ b/hail/python/hailtop/aiocloud/common/session.py @@ -1,13 +1,17 @@ from contextlib import AsyncExitStack from types import TracebackType from typing import Optional, Type, TypeVar, Mapping, Union +import time import aiohttp import abc +import logging from hailtop import httpx from hailtop.utils import retry_transient_errors, RateLimit, RateLimiter from .credentials import CloudCredentials, AnonymousCloudCredentials + SessionType = TypeVar('SessionType', bound='BaseSession') +log = logging.getLogger('hailtop.aiocloud.common.session') class BaseSession(abc.ABC): @@ -82,13 +86,6 @@ def __init__(self, self._credentials = credentials async def request(self, method: str, url: str, **kwargs) -> aiohttp.ClientResponse: - auth_headers = await self._credentials.auth_headers() - if auth_headers: - if 'headers' in kwargs: - kwargs['headers'].update(auth_headers) - else: - kwargs['headers'] = auth_headers - if self._params: if 'params' in kwargs: request_params = kwargs['params'] @@ -102,8 +99,25 @@ async def request(self, method: str, url: str, **kwargs) -> aiohttp.ClientRespon # retry by default retry = kwargs.pop('retry', True) if retry: - return await retry_transient_errors(self._http_session.request, method, url, **kwargs) - return await self._http_session.request(method, url, **kwargs) + return await retry_transient_errors(self._request_with_valid_authn, method, url, **kwargs) + return await self._request_with_valid_authn(method, url, **kwargs) + + async def _request_with_valid_authn(self, method, url, **kwargs): + while True: + auth_headers, expiration = await self._credentials.auth_headers_with_expiration() + if auth_headers: + if 'headers' in kwargs: + kwargs['headers'].update(auth_headers) + else: + kwargs['headers'] = auth_headers + try: + return await self._http_session.request(method, url, **kwargs) + except httpx.ClientResponseError as err: + if err.status != 401: + raise + if expiration is None or time.time() <= expiration: + raise err + log.info(f'Credentials expired while waiting for request to {url}. We will retry. {err}.') async def close(self) -> None: async with AsyncExitStack() as stack: diff --git a/hail/python/hailtop/auth/auth.py b/hail/python/hailtop/auth/auth.py index 9320e4cb8a6..6b2ef9875b7 100644 --- a/hail/python/hailtop/auth/auth.py +++ b/hail/python/hailtop/auth/auth.py @@ -40,32 +40,38 @@ def __init__(self, tokens: Tokens, cloud_credentials: Optional[CloudCredentials] self._namespace = namespace self._authorize_target = authorize_target - async def auth_headers(self) -> Dict[str, str]: + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: headers = {} + expiration = None if self._authorize_target: - token = await self._get_idp_access_token_or_hail_token(self._namespace) + token, expiration = await self._get_idp_access_token_or_hail_token(self._namespace) headers['Authorization'] = f'Bearer {token}' if get_deploy_config().location() == 'external' and self._namespace != 'default': # We prefer an extant hail token to an access token for the internal auth token # during development of the idp access token feature because the production auth # is not yet configured to accept access tokens. This can be changed to always prefer # an idp access token when this change is in production. - token = await self._get_hail_token_or_idp_access_token('default') + token, internal_expiration = await self._get_hail_token_or_idp_access_token('default') + if internal_expiration: + if not expiration: + expiration = internal_expiration + else: + expiration = min(expiration, internal_expiration) headers['X-Hail-Internal-Authorization'] = f'Bearer {token}' - return headers + return headers, expiration - async def access_token(self) -> str: + async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: return await self._get_idp_access_token_or_hail_token(self._namespace) - async def _get_idp_access_token_or_hail_token(self, namespace: str) -> str: + async def _get_idp_access_token_or_hail_token(self, namespace: str) -> Tuple[str, Optional[float]]: if self._cloud_credentials is not None: - return await self._cloud_credentials.access_token() - return self._tokens.namespace_token_or_error(namespace) + return await self._cloud_credentials.access_token_with_expiration() + return self._tokens.namespace_token_with_expiration_or_error(namespace) - async def _get_hail_token_or_idp_access_token(self, namespace: str) -> str: + async def _get_hail_token_or_idp_access_token(self, namespace: str) -> Tuple[str, Optional[float]]: if self._cloud_credentials is None: - return self._tokens.namespace_token_or_error(namespace) - return self._tokens.namespace_token(namespace) or await self._cloud_credentials.access_token() + return self._tokens.namespace_token_with_expiration_or_error(namespace) + return self._tokens.namespace_token_with_expiration(namespace) or await self._cloud_credentials.access_token_with_expiration() async def close(self): if self._cloud_credentials: diff --git a/hail/python/hailtop/auth/tokens.py b/hail/python/hailtop/auth/tokens.py index 8f7a45c3885..6e44ece2645 100644 --- a/hail/python/hailtop/auth/tokens.py +++ b/hail/python/hailtop/auth/tokens.py @@ -1,4 +1,4 @@ -from typing import Optional, Dict +from typing import Optional, Dict, Tuple import base64 import collections.abc import os @@ -71,6 +71,11 @@ def __getitem__(self, key: str) -> str: def namespace_token(self, ns: str) -> Optional[str]: return self._tokens.get(ns) + def namespace_token_with_expiration(self, ns: str) -> Optional[Tuple[str, Optional[float]]]: + if token := self._tokens.get(ns): + return token, None + return None + def namespace_token_or_error(self, ns: str) -> str: if ns in self._tokens: return self._tokens[ns] @@ -80,6 +85,9 @@ def namespace_token_or_error(self, ns: str) -> str: ns_arg = '' if ns == default_ns else f'-n {ns}' raise NotLoggedInError(ns_arg) + def namespace_token_with_expiration_or_error(self, ns: str) -> Tuple[str, Optional[float]]: + return self.namespace_token_or_error(ns), None + def __delitem__(self, key: str): del self._tokens[key] diff --git a/hail/python/hailtop/batch_client/aioclient.py b/hail/python/hailtop/batch_client/aioclient.py index fb471d135f3..ff5fe5f855b 100644 --- a/hail/python/hailtop/batch_client/aioclient.py +++ b/hail/python/hailtop/batch_client/aioclient.py @@ -854,11 +854,11 @@ class HailExplicitTokenCredentials(CloudCredentials): def __init__(self, token: str): self._token = token - async def auth_headers(self) -> Dict[str, str]: - return {'Authorization': f'Bearer {self._token}'} + async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: + return {'Authorization': f'Bearer {self._token}'}, None - async def access_token(self) -> str: - return self._token + async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: + return self._token, None async def close(self): pass From d51b398b2e32f844ce77bc349fb03ec9943ba810 Mon Sep 17 00:00:00 2001 From: Dan King Date: Fri, 8 Dec 2023 13:53:14 -0500 Subject: [PATCH 37/48] [fs] await coroutines in armtree and amkdir (#14088) With the types, pyright can see this. --- hail/python/hailtop/fs/fs.py | 12 ++++++------ hail/python/hailtop/fs/router_fs.py | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/hail/python/hailtop/fs/fs.py b/hail/python/hailtop/fs/fs.py index ec8598310d5..26a2b944a37 100644 --- a/hail/python/hailtop/fs/fs.py +++ b/hail/python/hailtop/fs/fs.py @@ -34,24 +34,24 @@ def ls(self, path: str) -> List[FileListEntry]: raise NotImplementedError @abc.abstractmethod - def mkdir(self, path: str): + def mkdir(self, path: str) -> None: raise NotImplementedError - async def amkdir(self, path: str): + async def amkdir(self, path: str) -> None: return self.mkdir(path) @abc.abstractmethod - def remove(self, path: str): + def remove(self, path: str) -> None: raise NotImplementedError - async def aremove(self, path: str): + async def aremove(self, path: str) -> None: return self.remove(path) @abc.abstractmethod - def rmtree(self, path: str): + def rmtree(self, path: str) -> None: raise NotImplementedError - async def armtree(self, path: str): + async def armtree(self, path: str) -> None: return self.rmtree(path) @abc.abstractmethod diff --git a/hail/python/hailtop/fs/router_fs.py b/hail/python/hailtop/fs/router_fs.py index cb0c5d15a62..e4f23442e10 100644 --- a/hail/python/hailtop/fs/router_fs.py +++ b/hail/python/hailtop/fs/router_fs.py @@ -393,23 +393,23 @@ async def ls_as_dir() -> Optional[List[FileListEntry]]: raise FileNotFoundError(path) return maybe_contents - def mkdir(self, path: str): + def mkdir(self, path: str) -> None: return async_to_blocking(self.afs.mkdir(path)) - async def amkdir(self, path: str): - return self.afs.mkdir(path) + async def amkdir(self, path: str) -> None: + return await self.afs.mkdir(path) - def remove(self, path: str): + def remove(self, path: str) -> None: return async_to_blocking(self.afs.remove(path)) - async def aremove(self, path: str): + async def aremove(self, path: str) -> None: return await self.afs.remove(path) - def rmtree(self, path: str): + def rmtree(self, path: str) -> None: return async_to_blocking(self.afs.rmtree(None, path)) - async def armtree(self, path: str): - return self.afs.rmtree(None, path) + async def armtree(self, path: str) -> None: + return await self.afs.rmtree(None, path) def supports_scheme(self, scheme: str) -> bool: return scheme in self.afs.schemes From f7d839b9d06a4e572fc690fdf61a181a7e994b87 Mon Sep 17 00:00:00 2001 From: Dan King Date: Fri, 8 Dec 2023 18:59:18 -0500 Subject: [PATCH 38/48] [hailctl] make hailctl work on windows (#14090) CHANGELOG: Fix #14089, which makes `hailctl dataproc connect` work in Windows Subsystem for Linux. 1. Non 64-bit Windows uses "Program Files" not "Program Files (x86)" 2. Windows Subsystem for Linux looks like GNU/Linux but will not have chromium on its path. 3. The removed arguments are no longer supported. They produce a warning message in my version of Chrome and appear to not work in the version of Chrome that this user was using. Instead, I bind to 0.0.0.0 and access the Notebook using the machine DNS name. This is how Google recommend accessing the Spark UI anyway. --- .../hailtop/hailctl/dataproc/connect.py | 30 +++++++++++-------- .../dataproc/resources/init_notebook.py | 2 +- .../hailtop/hailctl/dataproc/test_connect.py | 6 ++-- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/hail/python/hailtop/hailctl/dataproc/connect.py b/hail/python/hailtop/hailctl/dataproc/connect.py index 4da5bb66e45..d3c4c6d4a17 100755 --- a/hail/python/hailtop/hailctl/dataproc/connect.py +++ b/hail/python/hailtop/hailctl/dataproc/connect.py @@ -36,16 +36,25 @@ def get_chrome_path(): if system == 'Darwin': return '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome' - if system == 'Linux': - for c in ['chromium', 'chromium-browser']: + release = platform.uname().release + is_wsl = 'Microsoft' in release or 'microsoft' in release + + if system == 'Linux' and not is_wsl: + for c in ['chromium', 'chromium-browser', 'chrome.exe']: chrome = shutil.which(c) if chrome: return chrome - raise EnvironmentError("cannot find 'chromium' or 'chromium-browser' on path") + raise EnvironmentError("cannot find 'chromium', 'chromium-browser', or 'chrome.exe' on path") - if system == 'Windows': - return '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe' + if system == 'Windows' or (system == 'Linux' and is_wsl): + fnames = [ + '/mnt/c/Program Files/Google/Chrome/Application/chrome.exe' + '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe' + ] + for fname in fnames: + if os.path.exists(fname): + return fname raise ValueError(f"unsupported system: {system}, set environment variable HAILCTL_CHROME to a chrome executable") @@ -109,18 +118,15 @@ def connect( gcloud.run(cmd) chrome = os.environ.get('HAILCTL_CHROME') or get_chrome_path() + data_dir = os.path.join(tempfile.gettempdir(), 'hailctl-dataproc-connect-' + secret_alnum_string(6)) # open Chrome with SOCKS proxy configuration with subprocess.Popen( [ # pylint: disable=consider-using-with chrome, - 'http://localhost:{}'.format(connect_port_and_path), - '--proxy-server=socks5://localhost:{}'.format(port), - '--host-resolver-rules=MAP * 0.0.0.0 , EXCLUDE localhost', - '--proxy-bypass-list=<-loopback>', # https://chromium.googlesource.com/chromium/src/+/da790f920bbc169a6805a4fb83b4c2ab09532d91 - '--user-data-dir={}'.format( - os.path.join(tempfile.gettempdir(), 'hailctl-dataproc-connect-' + secret_alnum_string(6)) - ), + f'http://{name}-m:{connect_port_and_path}', + f'--proxy-server=socks5://localhost:{port}', + f'--user-data-dir={data_dir}', ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, diff --git a/hail/python/hailtop/hailctl/dataproc/resources/init_notebook.py b/hail/python/hailtop/hailctl/dataproc/resources/init_notebook.py index 8b473b9a67d..c0a0c1a31ea 100644 --- a/hail/python/hailtop/hailctl/dataproc/resources/init_notebook.py +++ b/hail/python/hailtop/hailctl/dataproc/resources/init_notebook.py @@ -163,7 +163,7 @@ def mkdir_if_not_exists(path): with open('/opt/conda/default/etc/jupyter/jupyter_notebook_config.py', 'w') as f: opts = [ 'c.Application.log_level = "DEBUG"', - 'c.NotebookApp.ip = "127.0.0.1"', + 'c.NotebookApp.ip = "0.0.0.0"', 'c.NotebookApp.open_browser = False', 'c.NotebookApp.port = 8123', 'c.NotebookApp.token = ""', diff --git a/hail/python/test/hailtop/hailctl/dataproc/test_connect.py b/hail/python/test/hailtop/hailctl/dataproc/test_connect.py index 7a1dbad7eb4..36212906ee8 100644 --- a/hail/python/test/hailtop/hailctl/dataproc/test_connect.py +++ b/hail/python/test/hailtop/hailctl/dataproc/test_connect.py @@ -57,11 +57,9 @@ def test_connect(gcloud_run, subprocess): popen_args = subprocess.Popen.call_args[0][0] assert popen_args[0] == "chromium" - assert popen_args[1].startswith("http://localhost") + assert popen_args[1].startswith("http://test-cluster-m") assert "--proxy-server=socks5://localhost:10000" in popen_args - assert "--host-resolver-rules=MAP * 0.0.0.0 , EXCLUDE localhost" in popen_args - assert "--proxy-bypass-list=<-loopback>" in popen_args assert any(arg.startswith("--user-data-dir=") for arg in popen_args) @@ -77,7 +75,7 @@ def test_service_port_and_path(subprocess, service, expected_port_and_path): runner.invoke(cli.app, ['connect', 'test-cluster', service]) popen_args = subprocess.Popen.call_args[0][0] - assert popen_args[1] == f"http://localhost:{expected_port_and_path}" + assert popen_args[1] == f"http://test-cluster-m:{expected_port_and_path}" def test_hailctl_chrome(subprocess, monkeypatch): From f35588666e8d689c6bf893c7531a1078e5d5f1d4 Mon Sep 17 00:00:00 2001 From: Christopher Vittal Date: Sat, 9 Dec 2023 10:25:29 -0600 Subject: [PATCH 39/48] [query/vds combiner] Change sanity checks on combiner construction (#14087) * Add assertion to `load_combiner` and `new_combiner` to fail if the output vds exists * Remove assertion that disallows empty `gvcfs` and `vdses` in `VariantDatasetCombiner.__init__` Resolves #14079 --- .../vds/combiner/variant_dataset_combiner.py | 61 +++++++++++-------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/hail/python/hail/vds/combiner/variant_dataset_combiner.py b/hail/python/hail/vds/combiner/variant_dataset_combiner.py index c99a5f7c9ce..6ed4009e89d 100644 --- a/hail/python/hail/vds/combiner/variant_dataset_combiner.py +++ b/hail/python/hail/vds/combiner/variant_dataset_combiner.py @@ -10,11 +10,12 @@ import hail as hl from hail.expr import HailType, tmatrix -from hail.utils import Interval +from hail.utils import FatalError, Interval from hail.utils.java import info, warning from .combine import combine_variant_datasets, transform_gvcf, defined_entry_fields, make_variant_stream, \ make_reference_stream, combine_r, calculate_even_genome_partitioning, \ calculate_new_intervals, combine +from ..variant_dataset import VariantDataset class VDSMetadata(NamedTuple): @@ -223,8 +224,6 @@ def __init__(self, gvcf_info_to_keep: Optional[Collection[str]] = None, gvcf_reference_entry_fields_to_keep: Optional[Collection[str]] = None, ): - if not (vdses or gvcfs): - raise ValueError("one of 'vdses' or 'gvcfs' must be nonempty") if gvcf_import_intervals: interval = gvcf_import_intervals[0] if not isinstance(interval.point_type, hl.tlocus): @@ -345,12 +344,21 @@ def load(path) -> 'VariantDatasetCombiner': fs = hl.current_backend().fs with fs.open(path) as stream: combiner = json.load(stream, cls=Decoder) + combiner._raise_if_output_exists() if combiner._save_path != path: warning('path/save_path mismatch in loaded VariantDatasetCombiner, using ' f'{path} as the new save_path for this combiner') combiner._save_path = path return combiner + def _raise_if_output_exists(self): + fs = hl.current_backend().fs + ref_success_path = os.path.join(VariantDataset._reference_path(self._output_path), '_SUCCESS') + var_success_path = os.path.join(VariantDataset._variants_path(self._output_path), '_SUCCESS') + if fs.exists(ref_success_path) and fs.exists(var_success_path): + raise FatalError(f'combiner output already exists at {self._output_path}\n' + 'move or delete it before continuing') + def to_dict(self) -> dict: """A serializable representation of this combiner.""" intervals_typ = hl.tarray(hl.tinterval(hl.tlocus(self._reference_genome))) @@ -399,14 +407,14 @@ def step(self): self._job_id += 1 def _write_final(self, vds): - fd = hl.vds.VariantDataset.ref_block_max_length_field + fd = VariantDataset.ref_block_max_length_field if fd not in vds.reference_data.globals: info("VDS combiner: computing reference block max length...") max_len = vds.reference_data.aggregate_entries( hl.agg.max(vds.reference_data.END + 1 - vds.reference_data.locus.position)) info(f"VDS combiner: max reference block length is {max_len}") - vds = hl.vds.VariantDataset(reference_data=vds.reference_data.annotate_globals(**{fd: max_len}), + vds = VariantDataset(reference_data=vds.reference_data.annotate_globals(**{fd: max_len}), variant_data=vds.variant_data) vds.write(self._output_path) @@ -548,7 +556,7 @@ def _step_gvcfs(self): globals=hl.struct( g=hl.literal(ids).map(lambda s: hl.struct(__cols=[hl.struct(s=s)])))) variant_ht = combine(variant_ht) - vds = hl.vds.VariantDataset(reference_ht._unlocalize_entries('__entries', '__cols', ['s']), + vds = VariantDataset(reference_ht._unlocalize_entries('__entries', '__cols', ['s']), variant_ht._unlocalize_entries('__entries', '__cols', ['s'])._key_rows_by_assert_sorted('locus', 'alleles')) @@ -654,8 +662,9 @@ def maybe_load_from_saved_path(save_path: str) -> Optional[VariantDatasetCombine combiner._target_records = target_records combiner._gvcf_batch_size = gvcf_batch_size return combiner - except (ValueError, TypeError, OSError, KeyError): - warning(f'file exists at {save_path}, but it is not a valid combiner plan, overwriting') + except (ValueError, TypeError, OSError, KeyError) as e: + warning(f'file exists at {save_path}, but it is not a valid combiner plan, overwriting\n' + f' caused by: {e}') return None # We do the first save_path check now after validating the arguments @@ -792,23 +801,25 @@ def maybe_load_from_saved_path(save_path: str) -> Optional[VariantDatasetCombine vdses.sort(key=lambda x: x.n_samples, reverse=True) - return VariantDatasetCombiner(save_path=save_path, - output_path=output_path, - temp_path=temp_path, - reference_genome=reference_genome, - dataset_type=dataset_type, - branch_factor=branch_factor, - target_records=target_records, - gvcf_batch_size=gvcf_batch_size, - contig_recoding=contig_recoding, - call_fields=call_fields, - vdses=vdses, - gvcfs=gvcf_paths, - gvcf_import_intervals=intervals, - gvcf_external_header=gvcf_external_header, - gvcf_sample_names=gvcf_sample_names, - gvcf_info_to_keep=gvcf_info_to_keep, - gvcf_reference_entry_fields_to_keep=gvcf_reference_entry_fields_to_keep) + combiner = VariantDatasetCombiner(save_path=save_path, + output_path=output_path, + temp_path=temp_path, + reference_genome=reference_genome, + dataset_type=dataset_type, + branch_factor=branch_factor, + target_records=target_records, + gvcf_batch_size=gvcf_batch_size, + contig_recoding=contig_recoding, + call_fields=call_fields, + vdses=vdses, + gvcfs=gvcf_paths, + gvcf_import_intervals=intervals, + gvcf_external_header=gvcf_external_header, + gvcf_sample_names=gvcf_sample_names, + gvcf_info_to_keep=gvcf_info_to_keep, + gvcf_reference_entry_fields_to_keep=gvcf_reference_entry_fields_to_keep) + combiner._raise_if_output_exists() + return combiner def load_combiner(path: str) -> VariantDatasetCombiner: From 1286c1ceb022bc1a0a8c5d055e400e6e91db9e0a Mon Sep 17 00:00:00 2001 From: Dan King Date: Wed, 13 Dec 2023 07:04:55 -0500 Subject: [PATCH 40/48] [query] retry Tag mismatch a limited number of times (#14094) Wenhan observed this error after I gave her a branch using google cloud storage 2.30.1. I've reported this new transient error to the client API repo, but I doubt it will be fixed. https://github.com/googleapis/java-storage/issues/2337 SSL errors seem like the kind of thing we should not retry forever since they could indicate a bad actor. --- hail/src/main/scala/is/hail/services/package.scala | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hail/src/main/scala/is/hail/services/package.scala b/hail/src/main/scala/is/hail/services/package.scala index 306bd4759ce..4e454d853ed 100644 --- a/hail/src/main/scala/is/hail/services/package.scala +++ b/hail/src/main/scala/is/hail/services/package.scala @@ -83,6 +83,10 @@ package object services { // at sun.nio.ch.IOUtil.read(IOUtil.java:192) ~[?:1.8.0_362] // at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:379) ~[?:1.8.0_362] true + case e: SSLException + if e.getMessage != null && e.getMessage.contains("Tag mismatch!") => + // https://github.com/googleapis/java-storage/issues/2337 + true case e => val cause = e.getCause cause != null && isLimitedRetriesError(cause) From 7fab886da771e84f88d2a6dae36f3a8e9d73e937 Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 14 Dec 2023 13:12:26 -0500 Subject: [PATCH 41/48] [qob] fix pytest-qob (#14091) Just explicitly start a loop and use our cloud-friendly FSes. Also, we need to recursively copy the resources. --- hail/Makefile | 4 +- .../hail/matrixtable/test_file_formats.py | 48 ++++++++++++------- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/hail/Makefile b/hail/Makefile index 64fd0740468..d79395060b3 100644 --- a/hail/Makefile +++ b/hail/Makefile @@ -316,8 +316,8 @@ upload-artifacts: $(WHEEL) upload-qob-test-resources: $(shell git ls-files src/test/resources) upload-qob-test-resources: $(shell git ls-files python/hail/docs/data) ! [ -z $(NAMESPACE) ] # call this like: make upload-qob-test-resources NAMESPACE=default - gcloud storage cp src/test/resources/\* $(CLOUD_HAIL_TEST_RESOURCES_DIR) - gcloud storage cp python/hail/docs/data/\* $(CLOUD_HAIL_DOCTEST_DATA_DIR) + gcloud storage cp -r src/test/resources/\* $(CLOUD_HAIL_TEST_RESOURCES_DIR) + gcloud storage cp -r python/hail/docs/data/\* $(CLOUD_HAIL_DOCTEST_DATA_DIR) # # In Azure, use the following instead of gcloud storage cp # python3 -m hailtop.aiotools.copy -vvv 'null' '[\ # {"from":"src/test/resources","to":"$(CLOUD_HAIL_TEST_RESOURCES_DIR)"},\ diff --git a/hail/python/test/hail/matrixtable/test_file_formats.py b/hail/python/test/hail/matrixtable/test_file_formats.py index 95c71654c7e..a0bda5b694e 100644 --- a/hail/python/test/hail/matrixtable/test_file_formats.py +++ b/hail/python/test/hail/matrixtable/test_file_formats.py @@ -1,6 +1,7 @@ +import asyncio import pytest import os -from typing import List +from typing import List, Tuple from pathlib import Path import hail as hl @@ -47,21 +48,36 @@ def all_values_table_fixture(init_hail): return create_all_values_table() -resource_dir = resource('backward_compatability') -def add_paths(dirname): - file_paths: List[str] = [] - with os.scandir(resource_dir) as versions: - for version_dir in versions: - try: - with os.scandir(Path(resource_dir, version_dir, dirname)) as old_files: - for file in old_files: - file_paths.append(file.path) - except FileNotFoundError: - pass - return file_paths - -ht_paths = add_paths('table') -mt_paths = add_paths('matrix_table') +async def collect_paths() -> Tuple[List[str], List[str]]: + resource_dir = resource('backward_compatability/') + from hailtop.aiotools.router_fs import RouterAsyncFS + fs = RouterAsyncFS() + + async def contents_if_present(url: str): + try: + return await fs.listfiles(url) + except FileNotFoundError: + async def empty(): + if False: + yield + return empty() + + try: + versions = [await x.url() async for x in await fs.listfiles(resource_dir)] + ht_paths = [await x.url() + for version in versions + async for x in await contents_if_present(version + 'table/')] + mt_paths = [await x.url() + for version in versions + async for x in await contents_if_present(version + 'matrix_table/')] + return ht_paths, mt_paths + finally: + await fs.close() + + +# pytest sometimes uses background threads, named "Dummy-1", to collect tests. Asyncio dislikes +# automatically creating event loops in these threads, so we just explicitly create one. +ht_paths, mt_paths = asyncio.new_event_loop().run_until_complete(collect_paths()) @pytest.mark.parametrize("path", mt_paths) From c4aed6043af75eb881bb7fb56437af4470fb60fa Mon Sep 17 00:00:00 2001 From: Daniel Goldstein Date: Sat, 16 Dec 2023 13:54:56 -0500 Subject: [PATCH 42/48] [hailtop] Remove namespace options in hailctl auth commands (#14069) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I'm trying to move our codebase away from relying on the notion of a K8s namespace for routing because this model does not hold in alternative deployment environments such as Terra. Further, having a `-n` option in `hailctl auth` commands is awkward because it can only be used for dev environments yet is visible to users in the help menu. As such, this is a breaking change only for developers. The functionality is not really gone though because you can replace any `hailctl auth … -n dgoldste` with `HAIL_DEFAULT_NAMESPACE=dgoldste hailctl auth …`. --- build.yaml | 10 +- hail/python/hailtop/auth/auth.py | 104 +++++++----------- hail/python/hailtop/hailctl/__main__.py | 5 +- hail/python/hailtop/hailctl/auth/cli.py | 28 ++--- .../hailtop/hailctl/auth/create_user.py | 5 +- .../hailtop/hailctl/auth/delete_user.py | 7 +- hail/python/hailtop/hailctl/auth/login.py | 17 +-- hail/python/hailtop/hailctl/batch/utils.py | 2 +- 8 files changed, 66 insertions(+), 112 deletions(-) diff --git a/build.yaml b/build.yaml index f0d9ba4bf6d..db51d38ed38 100644 --- a/build.yaml +++ b/build.yaml @@ -1872,9 +1872,9 @@ steps: --retry 3 \ --retry-delay 5 \ -XPOST) - hailctl auth copy-paste-login "$COPY_PASTE_TOKEN" --namespace {{ default_ns.name }} + hailctl auth copy-paste-login "$COPY_PASTE_TOKEN" - if hailctl auth copy-paste-login "$COPY_PASTE_TOKEN" --namespace {{ default_ns.name }} + if hailctl auth copy-paste-login "$COPY_PASTE_TOKEN" then echo "reusing a token should not work, but did" exit 1 @@ -1888,14 +1888,14 @@ steps: -XPOST) python3 -c ' from hailtop.auth import copy_paste_login; - copy_paste_login("'$COPY_PASTE_TOKEN'", "{{ default_ns.name }}") + copy_paste_login("'$COPY_PASTE_TOKEN'") ' python3 -c ' from hailtop.auth import copy_paste_login; import aiohttp try: - copy_paste_login("'$COPY_PASTE_TOKEN'", "{{ default_ns.name }}") + copy_paste_login("'$COPY_PASTE_TOKEN'") print("reusing a token should not work, but did") sys.exit(1) except aiohttp.client_exceptions.ClientResponseError as exc: @@ -1945,7 +1945,7 @@ steps: import aiohttp from hailtop.auth import copy_paste_login; try: - copy_paste_login("'$COPY_PASTE_TOKEN'", "{{ default_ns.name }}") + copy_paste_login("'$COPY_PASTE_TOKEN'") print("using an expired token should not work, but did") sys.exit(1) except aiohttp.client_exceptions.ClientResponseError as exc: diff --git a/hail/python/hailtop/auth/auth.py b/hail/python/hailtop/auth/auth.py index 6b2ef9875b7..1a1566b578e 100644 --- a/hail/python/hailtop/auth/auth.py +++ b/hail/python/hailtop/auth/auth.py @@ -1,4 +1,5 @@ from typing import Any, Optional, Dict, Tuple, List +from contextlib import asynccontextmanager from dataclasses import dataclass from enum import Enum import os @@ -34,19 +35,19 @@ def from_json(config: Dict[str, Any]): class HailCredentials(CloudCredentials): - def __init__(self, tokens: Tokens, cloud_credentials: Optional[CloudCredentials], namespace: str, authorize_target: bool): + def __init__(self, tokens: Tokens, cloud_credentials: Optional[CloudCredentials], deploy_config: DeployConfig, authorize_target: bool): self._tokens = tokens self._cloud_credentials = cloud_credentials - self._namespace = namespace + self._deploy_config = deploy_config self._authorize_target = authorize_target async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[float]]: headers = {} expiration = None if self._authorize_target: - token, expiration = await self._get_idp_access_token_or_hail_token(self._namespace) + token, expiration = await self._get_idp_access_token_or_hail_token(self._deploy_config.default_namespace()) headers['Authorization'] = f'Bearer {token}' - if get_deploy_config().location() == 'external' and self._namespace != 'default': + if get_deploy_config().location() == 'external' and self._deploy_config.default_namespace() != 'default': # We prefer an extant hail token to an access token for the internal auth token # during development of the idp access token feature because the production auth # is not yet configured to accept access tokens. This can be changed to always prefer @@ -61,7 +62,7 @@ async def auth_headers_with_expiration(self) -> Tuple[Dict[str, str], Optional[f return headers, expiration async def access_token_with_expiration(self) -> Tuple[str, Optional[float]]: - return await self._get_idp_access_token_or_hail_token(self._namespace) + return await self._get_idp_access_token_or_hail_token(self._deploy_config.default_namespace()) async def _get_idp_access_token_or_hail_token(self, namespace: str) -> Tuple[str, Optional[float]]: if self._cloud_credentials is not None: @@ -88,13 +89,12 @@ def hail_credentials( *, tokens_file: Optional[str] = None, cloud_credentials_file: Optional[str] = None, - namespace: Optional[str] = None, + deploy_config: Optional[DeployConfig] = None, authorize_target: bool = True ) -> HailCredentials: tokens = get_tokens(tokens_file) - deploy_config = get_deploy_config() - ns = namespace or deploy_config.default_namespace() - return HailCredentials(tokens, get_cloud_credentials_scoped_for_hail(credentials_file=cloud_credentials_file), ns, authorize_target=authorize_target) + deploy_config = deploy_config or get_deploy_config() + return HailCredentials(tokens, get_cloud_credentials_scoped_for_hail(credentials_file=cloud_credentials_file), deploy_config, authorize_target=authorize_target) def get_cloud_credentials_scoped_for_hail(credentials_file: Optional[str] = None) -> Optional[CloudCredentials]: @@ -138,21 +138,6 @@ def load_identity_spec() -> Optional[IdentityProviderSpec]: return None -async def deploy_config_and_headers_from_namespace(namespace: Optional[str] = None, *, authorize_target: bool = True) -> Tuple[DeployConfig, Dict[str, str], str]: - deploy_config = get_deploy_config() - - if namespace is not None: - deploy_config = deploy_config.with_default_namespace(namespace) - else: - namespace = deploy_config.default_namespace() - - - async with hail_credentials(namespace=namespace, authorize_target=authorize_target) as credentials: - headers = await credentials.auth_headers() - - return (deploy_config, headers, namespace) - - async def async_get_userinfo(): deploy_config = get_deploy_config() userinfo_url = deploy_config.url('auth', '/api/v1alpha/userinfo') @@ -172,29 +157,29 @@ def get_userinfo(): return async_to_blocking(async_get_userinfo()) -def copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None): - return async_to_blocking(async_copy_paste_login(copy_paste_token, namespace)) +def copy_paste_login(copy_paste_token: str) -> str: + return async_to_blocking(async_copy_paste_login(copy_paste_token)) -async def async_copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None): - deploy_config, headers, namespace = await deploy_config_and_headers_from_namespace(namespace, authorize_target=False) - async with httpx.client_session(headers=headers) as session: +async def async_copy_paste_login(copy_paste_token: str) -> str: + deploy_config = get_deploy_config() + async with httpx.client_session() as session: data = await retry_transient_errors( session.post_read_json, deploy_config.url('auth', '/api/v1alpha/copy-paste-login'), - params={'copy_paste_token': copy_paste_token} + params={'copy_paste_token': copy_paste_token}, ) token = data['token'] username = data['username'] tokens = get_tokens() - tokens[namespace] = token + tokens[deploy_config.default_namespace()] = token dot_hail_dir = os.path.expanduser('~/.hail') if not os.path.exists(dot_hail_dir): os.mkdir(dot_hail_dir, mode=0o700) tokens.write() - return namespace, username + return username async def async_logout(): @@ -236,20 +221,21 @@ async def logout_oauth2_credentials(identity_spec: IdentityProviderSpec): await AzureFlow.logout_installed_app(identity_spec.oauth2_credentials) -def get_user(username: str, namespace: Optional[str] = None) -> dict: - return async_to_blocking(async_get_user(username, namespace)) +@asynccontextmanager +async def hail_session(**session_kwargs): + async with hail_credentials() as credentials: + async with Session(credentials=credentials, **session_kwargs) as session: + yield session +def get_user(username: str) -> dict: + return async_to_blocking(async_get_user(username)) -async def async_get_user(username: str, namespace: Optional[str] = None) -> dict: - deploy_config, headers, _ = await deploy_config_and_headers_from_namespace(namespace) - async with httpx.client_session( - timeout=aiohttp.ClientTimeout(total=30), - headers=headers) as session: - return await retry_transient_errors( - session.get_read_json, - deploy_config.url('auth', f'/api/v1alpha/users/{username}') - ) +async def async_get_user(username: str) -> dict: + async with hail_session(timeout=aiohttp.ClientTimeout(total=30)) as session: + url = get_deploy_config().url('auth', f'/api/v1alpha/users/{username}') + async with await session.get(url) as resp: + return await resp.json() async def async_create_user( @@ -259,11 +245,7 @@ async def async_create_user( is_service_account: bool, hail_identity: Optional[str], hail_credentials_secret_name: Optional[str], - *, - namespace: Optional[str] = None ): - deploy_config, headers, _ = await deploy_config_and_headers_from_namespace(namespace) - body = { 'login_id': login_id, 'is_developer': is_developer, @@ -272,26 +254,16 @@ async def async_create_user( 'hail_credentials_secret_name': hail_credentials_secret_name, } - async with httpx.client_session( - timeout=aiohttp.ClientTimeout(total=30), - headers=headers) as session: - await retry_transient_errors( - session.post, - deploy_config.url('auth', f'/api/v1alpha/users/{username}/create'), - json=body - ) + url = get_deploy_config().url('auth', f'/api/v1alpha/users/{username}/create') + async with hail_session(timeout=aiohttp.ClientTimeout(total=30)) as session: + await session.post(url, json=body) -def delete_user(username: str, namespace: Optional[str] = None): - return async_to_blocking(async_delete_user(username, namespace=namespace)) +def delete_user(username: str): + return async_to_blocking(async_delete_user(username)) -async def async_delete_user(username: str, namespace: Optional[str] = None): - deploy_config, headers, _ = await deploy_config_and_headers_from_namespace(namespace) - async with httpx.client_session( - timeout=aiohttp.ClientTimeout(total=300), - headers=headers) as session: - await retry_transient_errors( - session.delete, - deploy_config.url('auth', f'/api/v1alpha/users/{username}') - ) +async def async_delete_user(username: str): + url = get_deploy_config().url('auth', f'/api/v1alpha/users/{username}') + async with hail_session(timeout=aiohttp.ClientTimeout(total=300)) as session: + await session.delete(url) diff --git a/hail/python/hailtop/hailctl/__main__.py b/hail/python/hailtop/hailctl/__main__.py index ca51dc5721b..ebb9ad02e58 100644 --- a/hail/python/hailtop/hailctl/__main__.py +++ b/hail/python/hailtop/hailctl/__main__.py @@ -55,10 +55,11 @@ async def _curl( from hailtop.auth import hail_credentials # pylint: disable=import-outside-toplevel from hailtop.config import get_deploy_config # pylint: disable=import-outside-toplevel - async with hail_credentials(namespace=namespace) as credentials: + deploy_config = get_deploy_config().with_default_namespace(namespace) + async with hail_credentials(deploy_config=deploy_config) as credentials: headers_dict = await credentials.auth_headers() headers = [x for k, v in headers_dict.items() for x in ['-H', f'{k}: {v}']] - path = get_deploy_config().url(service, path) + path = deploy_config.url(service, path) os.execvp('curl', ['curl', *headers, *ctx.args, path]) diff --git a/hail/python/hailtop/hailctl/auth/cli.py b/hail/python/hailtop/hailctl/auth/cli.py index 5f84a5a4db5..bba44d240f6 100644 --- a/hail/python/hailtop/hailctl/auth/cli.py +++ b/hail/python/hailtop/hailctl/auth/cli.py @@ -1,7 +1,7 @@ import asyncio import sys import typer -from typer import Option as Opt, Argument as Arg +from typer import Argument as Arg import json from typing import Optional, Annotated as Ann @@ -15,29 +15,21 @@ ) -NamespaceOption = Ann[ - Optional[str], - Opt('--namespace', '-n', help='Namespace for the auth server (default: from deploy configuration).'), -] - - @app.command() -def login(namespace: NamespaceOption = None): +def login(): '''Obtain Hail credentials.''' from .login import async_login # pylint: disable=import-outside-toplevel - asyncio.run(async_login(namespace)) + asyncio.run(async_login()) @app.command() -def copy_paste_login(copy_paste_token: str, namespace: NamespaceOption = None): +def copy_paste_login(copy_paste_token: str): '''Obtain Hail credentials with a copy paste token.''' from hailtop.auth import copy_paste_login # pylint: disable=import-outside-toplevel + from hailtop.config import get_deploy_config # pylint: disable=import-outside-toplevel - auth_ns, username = copy_paste_login(copy_paste_token, namespace) - if auth_ns == 'default': - print(f'Logged in as {username}.') - else: - print(f'Logged into namespace {auth_ns} as {username}.') + username = copy_paste_login(copy_paste_token) + print(f'Logged into {get_deploy_config().base_url("auth")} as {username}.') @app.command() @@ -92,7 +84,6 @@ def create_user( service_account: bool = False, hail_identity: Optional[str] = None, hail_credentials_secret_name: Optional[str] = None, - namespace: NamespaceOption = None, wait: bool = False, ): ''' @@ -100,13 +91,12 @@ def create_user( ''' from .create_user import polling_create_user # pylint: disable=import-outside-toplevel - asyncio.run(polling_create_user(username, login_id, developer, service_account, hail_identity, hail_credentials_secret_name, namespace=namespace, wait=wait)) + asyncio.run(polling_create_user(username, login_id, developer, service_account, hail_identity, hail_credentials_secret_name, wait=wait)) @app.command() def delete_user( username: str, - namespace: NamespaceOption = None, wait: bool = False, ): ''' @@ -114,4 +104,4 @@ def delete_user( ''' from .delete_user import polling_delete_user # pylint: disable=import-outside-toplevel - asyncio.run(polling_delete_user(username, namespace, wait)) + asyncio.run(polling_delete_user(username, wait)) diff --git a/hail/python/hailtop/hailctl/auth/create_user.py b/hail/python/hailtop/hailctl/auth/create_user.py index dab152d6b3f..2f622a14cca 100644 --- a/hail/python/hailtop/hailctl/auth/create_user.py +++ b/hail/python/hailtop/hailctl/auth/create_user.py @@ -16,11 +16,10 @@ async def polling_create_user( hail_identity: Optional[str], hail_credentials_secret_name: Optional[str], *, - namespace: Optional[str] = None, wait: bool = False, ): try: - await async_create_user(username, login_id, developer, service_account, hail_identity, hail_credentials_secret_name, namespace=namespace) + await async_create_user(username, login_id, developer, service_account, hail_identity, hail_credentials_secret_name) if not wait: return @@ -28,7 +27,7 @@ async def polling_create_user( async def _poll(): tries = 0 while True: - user = await async_get_user(username, namespace) + user = await async_get_user(username) if user['state'] == 'active': print(f"Created user '{username}'") return diff --git a/hail/python/hailtop/hailctl/auth/delete_user.py b/hail/python/hailtop/hailctl/auth/delete_user.py index fad558d98b0..f82d20045ef 100644 --- a/hail/python/hailtop/hailctl/auth/delete_user.py +++ b/hail/python/hailtop/hailctl/auth/delete_user.py @@ -1,5 +1,3 @@ -from typing import Optional - from hailtop.utils import sleep_before_try from hailtop.auth import async_delete_user, async_get_user @@ -10,11 +8,10 @@ class DeleteUserException(Exception): async def polling_delete_user( username: str, - namespace: Optional[str], wait: bool, ): try: - await async_delete_user(username, namespace) + await async_delete_user(username) if not wait: return @@ -22,7 +19,7 @@ async def polling_delete_user( async def _poll(): tries = 1 while True: - user = await async_get_user(username, namespace) + user = await async_get_user(username) if user['state'] == 'deleted': print(f"Deleted user '{username}'") return diff --git a/hail/python/hailtop/hailctl/auth/login.py b/hail/python/hailtop/hailctl/auth/login.py index fe74876c011..70dad3faf93 100644 --- a/hail/python/hailtop/hailctl/auth/login.py +++ b/hail/python/hailtop/hailctl/auth/login.py @@ -1,4 +1,3 @@ -from typing import Optional import os import json @@ -7,7 +6,7 @@ from hailtop.httpx import client_session, ClientSession -async def auth_flow(deploy_config: DeployConfig, default_ns: str, session: ClientSession): +async def auth_flow(deploy_config: DeployConfig, session: ClientSession): resp = await session.get_read_json(deploy_config.url('auth', '/api/v1alpha/oauth2-client')) idp = IdentityProvider(resp['idp']) client_secret_config = resp['oauth2_client'] @@ -22,22 +21,18 @@ async def auth_flow(deploy_config: DeployConfig, default_ns: str, session: Clien f.write(json.dumps({'idp': idp.value, 'credentials': credentials})) # Confirm that the logged in user is registered with the hail service - async with hail_credentials(namespace=default_ns) as c: + async with hail_credentials(deploy_config=deploy_config) as c: headers_with_auth = await c.auth_headers() async with client_session(headers=headers_with_auth) as auth_session: userinfo = await auth_session.get_read_json(deploy_config.url('auth', '/api/v1alpha/userinfo')) username = userinfo['username'] - if default_ns == 'default': - print(f'Logged in as {username}.') - else: - print(f'Logged into namespace {default_ns} as {username}.') + print(f'Logged into {deploy_config.base_url("auth")} as {username}.') -async def async_login(namespace: Optional[str]): +async def async_login(): deploy_config = get_deploy_config() - namespace = namespace or deploy_config.default_namespace() - async with hail_credentials(namespace=namespace, authorize_target=False) as credentials: + async with hail_credentials(deploy_config=deploy_config, authorize_target=False) as credentials: headers = await credentials.auth_headers() async with client_session(headers=headers) as session: - await auth_flow(deploy_config, namespace, session) + await auth_flow(deploy_config, session) diff --git a/hail/python/hailtop/hailctl/batch/utils.py b/hail/python/hailtop/hailctl/batch/utils.py index 8adb709c75a..d5e0898dd9d 100644 --- a/hail/python/hailtop/hailctl/batch/utils.py +++ b/hail/python/hailtop/hailctl/batch/utils.py @@ -25,7 +25,7 @@ async def already_logged_into_service() -> bool: async def login_to_service(): from hailtop.hailctl.auth.login import async_login # pylint: disable=import-outside-toplevel - await async_login('default') + await async_login() async def check_for_gcloud() -> bool: From 9c717de7823756b502aa00ca45eef62b7df1cbfe Mon Sep 17 00:00:00 2001 From: Dan King Date: Mon, 18 Dec 2023 14:13:37 -0500 Subject: [PATCH 43/48] [hailctl] fix hailctl for YHJ (#14098) Oh Chrome, How do I call thee? Let me count the ways. --- .../hailtop/hailctl/dataproc/connect.py | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/hail/python/hailtop/hailctl/dataproc/connect.py b/hail/python/hailtop/hailctl/dataproc/connect.py index d3c4c6d4a17..0bec6db7097 100755 --- a/hail/python/hailtop/hailctl/dataproc/connect.py +++ b/hail/python/hailtop/hailctl/dataproc/connect.py @@ -30,6 +30,18 @@ def shortcut(self): return self +def get_datadir_path(): + from hailtop.utils import secret_alnum_string # pylint: disable=import-outside-toplevel + + system = platform.system() + release = platform.uname().release + is_wsl = system == 'Linux' and ('Microsoft' in release or 'microsoft' in release) + + if not is_wsl: + return os.path.join(tempfile.mkdtemp('hailctl-dataproc-connect-')) + return 'C:\\Temp\\hailctl-' + secret_alnum_string(5) + + def get_chrome_path(): system = platform.system() @@ -48,10 +60,14 @@ def get_chrome_path(): raise EnvironmentError("cannot find 'chromium', 'chromium-browser', or 'chrome.exe' on path") if system == 'Windows' or (system == 'Linux' and is_wsl): + # https://stackoverflow.com/questions/40674914/google-chrome-path-in-windows-10 fnames = [ - '/mnt/c/Program Files/Google/Chrome/Application/chrome.exe' - '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe' + '/mnt/c/Program Files/Google/Chrome/Application/chrome.exe', + '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe', + '/mnt/c/Program Files(x86)/Google/Chrome/Application/chrome.exe', + '/mnt/c/ProgramFiles(x86)/Google/Chrome/Application/chrome.exe', ] + for fname in fnames: if os.path.exists(fname): return fname @@ -68,8 +84,6 @@ def connect( dry_run: bool, pass_through_args: List[str], ): - from hailtop.utils import secret_alnum_string # pylint: disable=import-outside-toplevel - service = service.shortcut() # Dataproc port mapping @@ -118,7 +132,7 @@ def connect( gcloud.run(cmd) chrome = os.environ.get('HAILCTL_CHROME') or get_chrome_path() - data_dir = os.path.join(tempfile.gettempdir(), 'hailctl-dataproc-connect-' + secret_alnum_string(6)) + data_dir = os.environ.get('HAILCTL_CHROME_DATA_DIR') or get_datadir_path() # open Chrome with SOCKS proxy configuration with subprocess.Popen( From 39c3cda8089bf38cc38fdf9278a209f65796b60b Mon Sep 17 00:00:00 2001 From: Dan King Date: Tue, 19 Dec 2023 12:19:04 -0500 Subject: [PATCH 44/48] [query] cleave Backend paths into normal parallelize and parallelize returning errors (#14085) The original goal of this PR was avoiding `Try` when we are not using the restartability provided by semantic hashing because I strongly suspect it is related to the loss of stacktraces in exceptions. Unrelatedly, we realized the semantic hash PR changed the semantics of Query-on-Spark even when semantic hash is disabled: previously we would abort RDD writing on the first exception. In Hail 0.2.123 through 0.2.126, the semantics were changed to only crash *after* we already ran every other partition. Two bad scenarios of which I can think: 1. Suppose the first partition fails due to OOM. We now waste time/money on the rest of the partitions even though we cannot possibly get a valid output. 2. Suppose every partition hits a permission error. Users should get that feedback after paying for O(1) partitions run, not O(N). I created two Backend paths: the normal `parallelizeAndComputeWithIndex` with its pre-0.2.123 semantics as well as `parallelizeAndComputeWithIndexReturnAllErrors` which, as the name says, returns errors instead of raising them. While making this change, I think I found two other bugs in the "return all errors" path, only one of which I addressed in this PR: 1. I'm pretty sure semantic-hash-enabled QoB batch submission is broken because it uses the logical partition ids as job indices. Suppose there are 10,000 partitions, but we only need to compute 1, 100, and 1543. 0.2.126 would try to submit a batch of size 3 but whose job indices are 1, 100, and 1543. 2. Likewise, the Query-on-Spark path returns an invalid `SparkTaskContext.partitionId` which, at best, produces confusing partition filenames. I only fixed the former because it was simple to fix. I wasn't exactly sure what to do about the latter. We should fix that separately because the changes in this PR need to urgently land in the next release to avoid unexpected cost when one partition fails. --- .../main/scala/is/hail/backend/Backend.scala | 15 +- .../scala/is/hail/backend/BackendUtils.scala | 184 +++++++++++------- .../is/hail/backend/local/LocalBackend.scala | 19 +- .../hail/backend/service/ServiceBackend.scala | 107 ++++++---- .../is/hail/backend/spark/SparkBackend.scala | 43 +++- .../main/scala/is/hail/io/vcf/LoadVCF.scala | 4 +- .../main/scala/is/hail/services/package.scala | 2 +- .../main/scala/is/hail/utils/package.scala | 5 +- .../is/hail/expr/ir/table/TableGenSuite.scala | 5 +- 9 files changed, 262 insertions(+), 122 deletions(-) diff --git a/hail/src/main/scala/is/hail/backend/Backend.scala b/hail/src/main/scala/is/hail/backend/Backend.scala index 0f77f6d91ed..4ebdde16688 100644 --- a/hail/src/main/scala/is/hail/backend/Backend.scala +++ b/hail/src/main/scala/is/hail/backend/Backend.scala @@ -75,13 +75,24 @@ abstract class Backend { def getPersistedBlockMatrixType(backendContext: BackendContext, id: String): BlockMatrixType def parallelizeAndComputeWithIndex( + backendContext: BackendContext, + fs: FS, + collection: Array[Array[Byte]], + stageIdentifier: String, + dependency: Option[TableStageDependency] = None + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): Array[Array[Byte]] + + def parallelizeAndComputeWithIndexReturnAllErrors( backendContext: BackendContext, fs: FS, collection: IndexedSeq[(Array[Byte], Int)], stageIdentifier: String, dependency: Option[TableStageDependency] = None - )(f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte]) - : (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) def stop(): Unit diff --git a/hail/src/main/scala/is/hail/backend/BackendUtils.scala b/hail/src/main/scala/is/hail/backend/BackendUtils.scala index 120d10e4316..8d744bd9567 100644 --- a/hail/src/main/scala/is/hail/backend/BackendUtils.scala +++ b/hail/src/main/scala/is/hail/backend/BackendUtils.scala @@ -7,6 +7,7 @@ import is.hail.backend.local.LocalTaskContext import is.hail.expr.ir.analyses.SemanticHash import is.hail.expr.ir.lowering.TableStageDependency import is.hail.io.fs._ +import is.hail.services._ import is.hail.utils._ import scala.util.Try @@ -23,87 +24,134 @@ class BackendUtils(mods: Array[(String, (HailClassLoader, FS, HailTaskContext, R def getModule(id: String): (HailClassLoader, FS, HailTaskContext, Region) => F = loadedModules(id) - def collectDArray(backendContext: BackendContext, - theDriverHailClassLoader: HailClassLoader, - fs: FS, - modID: String, - contexts: Array[Array[Byte]], - globals: Array[Byte], - stageName: String, - semhash: Option[SemanticHash.Type], - tsd: Option[TableStageDependency] - ): Array[Array[Byte]] = { - - val cachedResults = - semhash - .map { s => - log.info(s"[collectDArray|$stageName]: querying cache for $s") - val cachedResults = backendContext.executionCache.lookup(s) - log.info(s"[collectDArray|$stageName]: found ${cachedResults.length} entries for $s.") - cachedResults + private[this] def lookupSemanticHashResults( + backendContext: BackendContext, + stageName: String, + semanticHash: Option[SemanticHash.Type], + ): Option[IndexedSeq[(Array[Byte], Int)]] = semanticHash.map { s => + log.info(s"[collectDArray|$stageName]: querying cache for $s") + val cachedResults = backendContext.executionCache.lookup(s) + log.info(s"[collectDArray|$stageName]: found ${cachedResults.length} entries for $s.") + cachedResults + } + + def collectDArray( + backendContext: BackendContext, + theDriverHailClassLoader: HailClassLoader, + fs: FS, + modID: String, + contexts: Array[Array[Byte]], + globals: Array[Byte], + stageName: String, + semhash: Option[SemanticHash.Type], + tsd: Option[TableStageDependency] + ): Array[Array[Byte]] = lookupSemanticHashResults(backendContext, stageName, semhash) match { + case None => + if (contexts.isEmpty) + return Array() + + val backend = HailContext.backend + val f = getModule(modID) + + log.info( + s"[collectDArray|$stageName]: executing ${contexts.length} tasks, " + + s"contexts size = ${formatSpace(contexts.map(_.length.toLong).sum)}, " + + s"globals size = ${formatSpace(globals.length)}" + ) + + val t = System.nanoTime() + val results = if (backend.canExecuteParallelTasksOnDriver && contexts.length == 1) { + val context = contexts(0) + using(new LocalTaskContext(0, 0)) { htc => + using(htc.getRegionPool().getRegion()) { r => + val run = f(theDriverHailClassLoader, fs, htc, r) + val result = retryTransientErrors { + run(r, context, globals) + } + Array(result) + } } - .getOrElse(IndexedSeq.empty) - - val remainingContexts = - for { - c@(_, k) <- contexts.zipWithIndex - if !cachedResults.containsOrdered[Int](k, _ < _, _._2) - } yield c - - val results = - if (remainingContexts.isEmpty) cachedResults else { - val backend = HailContext.backend - val f = getModule(modID) - - log.info( - s"[collectDArray|$stageName]: executing ${remainingContexts.length} tasks, " + - s"contexts size = ${formatSpace(contexts.map(_.length.toLong).sum)}, " + - s"globals size = ${formatSpace(globals.length)}" - ) - - val t = System.nanoTime() - val (failureOpt, successes) = - remainingContexts match { - case Array((context, k)) if backend.canExecuteParallelTasksOnDriver => - Try { - using(new LocalTaskContext(k, 0)) { htc => - using(htc.getRegionPool().getRegion()) { r => - val run = f(theDriverHailClassLoader, fs, htc, r) - val res = is.hail.services.retryTransientErrors { - run(r, context, globals) + } else { + val globalsBC = backend.broadcast(globals) + val fsConfigBC = backend.broadcast(fs.getConfiguration()) + backend.parallelizeAndComputeWithIndex(backendContext, fs, contexts, stageName, tsd) { + (ctx, htc, theHailClassLoader, fs) => + val fsConfig = fsConfigBC.value + val gs = globalsBC.value + fs.setConfiguration(fsConfig) + htc.getRegionPool().scopedRegion { region => + f(theHailClassLoader, fs, htc, region)(region, ctx, gs) + } + } + } + + log.info(s"[collectDArray|$stageName]: executed ${contexts.length} tasks " + + s"in ${formatTime(System.nanoTime() - t)}" + ) + + results + case Some(cachedResults) => + val remainingContexts = + for { + c@(_, k) <- contexts.zipWithIndex + if !cachedResults.containsOrdered[Int](k, _ < _, _._2) + } yield c + val results = + if (remainingContexts.isEmpty) { + cachedResults + } else { + val backend = HailContext.backend + val f = getModule(modID) + + log.info( + s"[collectDArray|$stageName]: executing ${remainingContexts.length} tasks, " + + s"contexts size = ${formatSpace(contexts.map(_.length.toLong).sum)}, " + + s"globals size = ${formatSpace(globals.length)}" + ) + + val t = System.nanoTime() + val (failureOpt, successes) = + remainingContexts match { + case Array((context, k)) if backend.canExecuteParallelTasksOnDriver => + Try { + using(new LocalTaskContext(k, 0)) { htc => + using(htc.getRegionPool().getRegion()) { r => + val run = f(theDriverHailClassLoader, fs, htc, r) + val res = retryTransientErrors { + run(r, context, globals) + } + FastSeq(res -> k) } - FastSeq(res -> k) } } - } - .fold(t => (Some(t), IndexedSeq.empty), (None, _)) - - case _ => - val globalsBC = backend.broadcast(globals) - val fsConfigBC = backend.broadcast(fs.getConfiguration()) - val (failureOpt, successes) = - backend.parallelizeAndComputeWithIndex(backendContext, fs, remainingContexts, stageName, tsd) { - (ctx, htc, theHailClassLoader, fs) => + .fold(t => (Some(t), IndexedSeq.empty), (None, _)) + + case _ => + val globalsBC = backend.broadcast(globals) + val fsConfigBC = backend.broadcast(fs.getConfiguration()) + val (failureOpt, successes) = + backend.parallelizeAndComputeWithIndexReturnAllErrors(backendContext, fs, remainingContexts, stageName, tsd) { + (ctx, htc, theHailClassLoader, fs) => val fsConfig = fsConfigBC.value val gs = globalsBC.value fs.setConfiguration(fsConfig) htc.getRegionPool().scopedRegion { region => f(theHailClassLoader, fs, htc, region)(region, ctx, gs) } - } - (failureOpt, successes) - } + } + (failureOpt, successes) + } - log.info(s"[collectDArray|$stageName]: executed ${remainingContexts.length} tasks " + - s"in ${formatTime(System.nanoTime() - t)}" - ) + log.info(s"[collectDArray|$stageName]: executed ${remainingContexts.length} tasks " + + s"in ${formatTime(System.nanoTime() - t)}" + ) - val results = merge[(Array[Byte], Int)](cachedResults, successes.sortBy(_._2), _._2 < _._2) - semhash.foreach(s => backendContext.executionCache.put(s, results)) - failureOpt.foreach(throw _) + val results = merge[(Array[Byte], Int)](cachedResults, successes.sortBy(_._2), _._2 < _._2) + semhash.foreach(s => backendContext.executionCache.put(s, results)) + failureOpt.foreach(throw _) - results - } + results + } results.map(_._1).toArray } diff --git a/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala b/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala index b133f622bd4..96d73a34719 100644 --- a/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala +++ b/hail/src/main/scala/is/hail/backend/local/LocalBackend.scala @@ -132,7 +132,24 @@ class LocalBackend( current } - override def parallelizeAndComputeWithIndex( + def parallelizeAndComputeWithIndex( + backendContext: BackendContext, + fs: FS, + collection: Array[Array[Byte]], + stageIdentifier: String, + dependency: Option[TableStageDependency] = None + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): Array[Array[Byte]] = { + val stageId = nextStageId() + collection.zipWithIndex.map { case (c, i) => + using(new LocalTaskContext(i, stageId)) { htc => + f(c, htc, theHailClassLoader, fs) + } + } + } + + override def parallelizeAndComputeWithIndexReturnAllErrors( backendContext: BackendContext, fs: FS, collection: IndexedSeq[(Array[Byte], Int)], diff --git a/hail/src/main/scala/is/hail/backend/service/ServiceBackend.scala b/hail/src/main/scala/is/hail/backend/service/ServiceBackend.scala index 53850607b10..205361f35fb 100644 --- a/hail/src/main/scala/is/hail/backend/service/ServiceBackend.scala +++ b/hail/src/main/scala/is/hail/backend/service/ServiceBackend.scala @@ -35,6 +35,7 @@ import scala.annotation.switch import scala.collection.mutable import scala.language.higherKinds import scala.reflect.ClassTag +import scala.collection.JavaConverters._ class ServiceBackendContext( val billingProject: String, @@ -151,14 +152,14 @@ class ServiceBackend( new String(bytes, StandardCharsets.UTF_8) } - override def parallelizeAndComputeWithIndex( + private[this] def submitAndWaitForBatch( _backendContext: BackendContext, fs: FS, - collection: IndexedSeq[(Array[Byte], Int)], + collection: Array[Array[Byte]], stageIdentifier: String, - dependency: Option[TableStageDependency] = None - )(f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] - ): (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) = { + dependency: Option[TableStageDependency] = None, + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): (String, String, Int) = { val backendContext = _backendContext.asInstanceOf[ServiceBackendContext] val n = collection.length val token = tokenUrlSafe(32) @@ -179,17 +180,13 @@ class ServiceBackend( retryTransientErrors { fs.writePDOS(s"$root/contexts") { os => var o = 12L * n - - // write header of context offsets and lengths - for ((context, _) <- collection) { + collection.foreach { context => val len = context.length os.writeLong(o) os.writeInt(len) o += len } - - // write context arrays themselves - for ((context, _) <- collection) { + collection.foreach { context => os.write(context) } } @@ -199,7 +196,7 @@ class ServiceBackend( uploadFunction.get() uploadContexts.get() - val jobs = collection.map { case (_, i) => + val jobs = collection.zipWithIndex.map { case (_, i) => var resources = JObject("preemptible" -> JBool(true)) if (backendContext.workerCores != "None") { resources = resources.merge(JObject("cpu" -> JString(backendContext.workerCores))) @@ -210,7 +207,6 @@ class ServiceBackend( if (backendContext.storageRequirement != "0Gi") { resources = resources.merge(JObject("storage" -> JString(backendContext.storageRequirement))) } - JObject( "always_run" -> JBool(false), "job_id" -> JInt(i + 1), @@ -224,8 +220,7 @@ class ServiceBackend( JString(Main.WORKER), JString(root), JString(s"$i"), - JString(s"$n") - )), + JString(s"$n"))), "type" -> JString("jvm"), "profile" -> JBool(backendContext.profile), ), @@ -250,17 +245,14 @@ class ServiceBackend( val (batchId, updateId) = curBatchId match { case Some(id) => (id, batchClient.update(id, token, jobs)) - case None => val batchId = batchClient.create( JObject( "billing_project" -> JString(backendContext.billingProject), "n_jobs" -> JInt(n), "token" -> JString(token), - "attributes" -> JObject("name" -> JString(name + "_" + stageCount)) - ), - jobs - ) + "attributes" -> JObject("name" -> JString(name + "_" + stageCount))), + jobs) (batchId, 1L) } @@ -273,31 +265,68 @@ class ServiceBackend( throw new HailBatchFailure(s"Update $updateId for batch $batchId failed") } - log.info(s"parallelizeAndComputeWithIndex: $token: reading results") + (token, root, n) + } + + private[this] def readResult(root: String, i: Int): Array[Byte] = { + val bytes = fs.readNoCompression(s"$root/result.$i") + if (bytes(0) != 0) { + bytes.slice(1, bytes.length) + } else { + val errorInformationBytes = bytes.slice(1, bytes.length) + val is = new DataInputStream(new ByteArrayInputStream(errorInformationBytes)) + val shortMessage = readString(is) + val expandedMessage = readString(is) + val errorId = is.readInt() + throw new HailWorkerException(i, shortMessage, expandedMessage, errorId) + } + } + + override def parallelizeAndComputeWithIndex( + _backendContext: BackendContext, + fs: FS, + collection: Array[Array[Byte]], + stageIdentifier: String, + dependency: Option[TableStageDependency] = None + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): Array[Array[Byte]] = { + val (token, root, n) = submitAndWaitForBatch(_backendContext, fs, collection, stageIdentifier, dependency, f) + log.info(s"parallelizeAndComputeWithIndex: $token: reading results") val startTime = System.nanoTime() + val results = try { + executor.invokeAll[Array[Byte]]( + IndexedSeq.range(0, n).map { i => + (() => readResult(root, i)): Callable[Array[Byte]] + }.asJavaCollection + ).asScala.map(_.get).toArray + } catch { + case exc: ExecutionException if exc.getCause() != null => throw exc.getCause() + } + val resultsReadingSeconds = (System.nanoTime() - startTime) / 1000000000.0 + val rate = results.length / resultsReadingSeconds + val byterate = results.map(_.length).sum / resultsReadingSeconds / 1024 / 1024 + log.info(s"all results read. $resultsReadingSeconds s. $rate result/s. $byterate MiB/s.") + results + } + override def parallelizeAndComputeWithIndexReturnAllErrors( + _backendContext: BackendContext, + fs: FS, + collection: IndexedSeq[(Array[Byte], Int)], + stageIdentifier: String, + dependency: Option[TableStageDependency] = None + )(f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) = { + val (token, root, n) = submitAndWaitForBatch(_backendContext, fs, collection.map(_._1).toArray, stageIdentifier, dependency, f) + log.info(s"parallelizeAndComputeWithIndex: $token: reading results") + val startTime = System.nanoTime() val r@(_, results) = runAllKeepFirstError(executor) { - collection.map { case (_, i) => - ( - () => { - val bytes = fs.readNoCompression(s"$root/result.$i") - if (bytes(0) != 0) { - bytes.slice(1, bytes.length) - } else { - val errorInformationBytes = bytes.slice(1, bytes.length) - val is = new DataInputStream(new ByteArrayInputStream(errorInformationBytes)) - val shortMessage = readString(is) - val expandedMessage = readString(is) - val errorId = is.readInt() - throw new HailWorkerException(i, shortMessage, expandedMessage, errorId) - } - }, - i - ) + collection.zipWithIndex.map { case ((_, i), jobIndex) => + (() => readResult(root, jobIndex), i) } } - val resultsReadingSeconds = (System.nanoTime() - startTime) / 1000000000.0 val rate = results.length / resultsReadingSeconds val byterate = results.map(_._1.length).sum / resultsReadingSeconds / 1024 / 1024 diff --git a/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala b/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala index 7b48b641487..fdc1b3b64b2 100644 --- a/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala +++ b/hail/src/main/scala/is/hail/backend/spark/SparkBackend.scala @@ -369,18 +369,32 @@ class SparkBackend( } } - def broadcast[T : ClassTag](value: T): BroadcastValue[T] = new SparkBroadcastValue[T](sc.broadcast(value)) override def parallelizeAndComputeWithIndex( backendContext: BackendContext, fs: FS, - contexts: IndexedSeq[(Array[Byte], Int)], + collection: Array[Array[Byte]], stageIdentifier: String, dependency: Option[TableStageDependency] = None - )(f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte]) - : (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) = { + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): Array[Array[Byte]] = { + val sparkDeps = dependency.toIndexedSeq + .flatMap(dep => dep.deps.map(rvdDep => new AnonymousDependency(rvdDep.asInstanceOf[RVDDependency].rvd.crdd.rdd))) + + new SparkBackendComputeRDD(sc, collection, f, sparkDeps).collect() + } + override def parallelizeAndComputeWithIndexReturnAllErrors( + backendContext: BackendContext, + fs: FS, + contexts: IndexedSeq[(Array[Byte], Int)], + stageIdentifier: String, + dependency: Option[TableStageDependency] = None + )( + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte] + ): (Option[Throwable], IndexedSeq[(Array[Byte], Int)]) = { val sparkDeps = for {rvdDep <- dependency.toIndexedSeq; dep <- rvdDep.deps} yield new AnonymousDependency(dep.asInstanceOf[RVDDependency].rvd.crdd.rdd) @@ -409,6 +423,7 @@ class SparkBackend( override def compute(partition: Partition, context: TaskContext): Iterator[(Try[Array[Byte]], Int)] = { val sp = partition.asInstanceOf[TaggedRDDPartition] val fs = new HadoopFS(null) + // FIXME: this is broken: the partitionId of SparkTaskContext will be incorrect val result = Try(f(sp.data, SparkTaskContext.get(), theHailClassLoaderForSparkWorkers, fs)) Iterator.single((result, sp.tag)) } @@ -751,3 +766,23 @@ class SparkBackend( } } } + +case class SparkBackendComputeRDDPartition(data: Array[Byte], index: Int) extends Partition + +class SparkBackendComputeRDD( + sc: SparkContext, + @transient private val collection: Array[Array[Byte]], + f: (Array[Byte], HailTaskContext, HailClassLoader, FS) => Array[Byte], + deps: Seq[Dependency[_]] +) extends RDD[Array[Byte]](sc, deps) { + + override def getPartitions: Array[Partition] = { + Array.tabulate(collection.length)(i => SparkBackendComputeRDDPartition(collection(i), i)) + } + + override def compute(partition: Partition, context: TaskContext): Iterator[Array[Byte]] = { + val sp = partition.asInstanceOf[SparkBackendComputeRDDPartition] + val fs = new HadoopFS(null) + Iterator.single(f(sp.data, SparkTaskContext.get(), theHailClassLoaderForSparkWorkers, fs)) + } +} diff --git a/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala b/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala index 4324f15249c..73a4c3ccf3f 100644 --- a/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala +++ b/hail/src/main/scala/is/hail/io/vcf/LoadVCF.scala @@ -1693,7 +1693,7 @@ object MatrixVCFReader { val localFilterAndReplace = params.filterAndReplace val fsConfigBC = backend.broadcast(fs.getConfiguration()) - val (err, _) = backend.parallelizeAndComputeWithIndex(ctx.backendContext, fs, files.tail.map(_.getBytes).zipWithIndex, "load_vcf_parse_header", None) { (bytes, htc, _, fs) => + backend.parallelizeAndComputeWithIndex(ctx.backendContext, fs, files.tail.map(_.getBytes), "load_vcf_parse_header", None) { (bytes, htc, _, fs) => val fsConfig = fsConfigBC.value fs.setConfiguration(fsConfig) val file = new String(bytes) @@ -1735,8 +1735,6 @@ object MatrixVCFReader { bytes } - - err.foreach(throw _) } } diff --git a/hail/src/main/scala/is/hail/services/package.scala b/hail/src/main/scala/is/hail/services/package.scala index 4e454d853ed..ca906da40df 100644 --- a/hail/src/main/scala/is/hail/services/package.scala +++ b/hail/src/main/scala/is/hail/services/package.scala @@ -20,7 +20,7 @@ import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.client.http.HttpResponseException package object services { - lazy val log: Logger = LogManager.getLogger("is.hail.services") + private lazy val log: Logger = LogManager.getLogger("is.hail.services") val RETRYABLE_HTTP_STATUS_CODES: Set[Int] = { val s = Set(408, 429, 500, 502, 503, 504) diff --git a/hail/src/main/scala/is/hail/utils/package.scala b/hail/src/main/scala/is/hail/utils/package.scala index ade36e71226..e672b4a19f0 100644 --- a/hail/src/main/scala/is/hail/utils/package.scala +++ b/hail/src/main/scala/is/hail/utils/package.scala @@ -1017,8 +1017,9 @@ package object utils extends Logging (err, buffer) } - def runAllKeepFirstError[A](executor: ExecutorService) - : IndexedSeq[(() => A, Int)] => (Option[Throwable], IndexedSeq[(A, Int)]) = + def runAllKeepFirstError[A]( + executor: ExecutorService + ): IndexedSeq[(() => A, Int)] => (Option[Throwable], IndexedSeq[(A, Int)]) = runAll[Option, A](executor) { case (opt, (e, _)) => opt.orElse(Some(e)) } (None) } diff --git a/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala b/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala index ed6b72c9584..567494007c5 100644 --- a/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala +++ b/hail/src/test/scala/is/hail/expr/ir/table/TableGenSuite.scala @@ -9,6 +9,7 @@ import is.hail.rvd.RVDPartitioner import is.hail.types.virtual._ import is.hail.utils.{FastSeq, HailException, Interval} import is.hail.{ExecStrategy, HailSuite} +import org.apache.spark.SparkException import org.apache.spark.sql.Row import org.scalatest.Matchers._ import org.testng.annotations.Test @@ -116,11 +117,11 @@ class TableGenSuite extends HailSuite { errorId = Some(errorId) )) val lowered = LowerTableIR(table, DArrayLowering.All, ctx, LoweringAnalyses(table, ctx)) - val ex = intercept[HailException] { + val ex = intercept[SparkException] { ExecuteContext.scoped() { ctx => loweredExecute(ctx, lowered, Env.empty, FastSeq(), None) } - } + }.getCause.asInstanceOf[HailException] ex.errorId shouldBe errorId ex.getMessage should include("TableGen: Unexpected key in partition") From 08909acfa5e03360edb6f9c69ed570200e0a168d Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 21 Dec 2023 12:38:18 -0500 Subject: [PATCH 45/48] [hailctl] fix parsing in hailctl hdinsight start (#14110) CHANGELOG: Fix `hailctl hdinsight start`, which has been broken since 0.2.118. The shift to typer/click accidentally converted this parameter from a string to an int. --- hail/python/hailtop/hailctl/hdinsight/start.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hail/python/hailtop/hailctl/hdinsight/start.py b/hail/python/hailtop/hailctl/hdinsight/start.py index 61476c62637..dafa1fffb18 100644 --- a/hail/python/hailtop/hailctl/hdinsight/start.py +++ b/hail/python/hailtop/hailctl/hdinsight/start.py @@ -69,7 +69,7 @@ def start( '--location', location, '--workernode-count', - num_workers, + str(num_workers), '--ssh-password', sshuser_password, '--ssh-user', From 0dcc17ff24564b6f5592261d7975e8afd0f95de7 Mon Sep 17 00:00:00 2001 From: Dan King Date: Thu, 28 Dec 2023 23:17:59 -0500 Subject: [PATCH 46/48] Merge pull request from GHSA-487p-qx68-5vjw --- auth/auth/auth.py | 9 +++--- hail/python/hailtop/auth/flow.py | 49 ++++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/auth/auth/auth.py b/auth/auth/auth.py index 2087ca8e2f3..e41bcec51dc 100644 --- a/auth/auth/auth.py +++ b/auth/auth/auth.py @@ -59,7 +59,6 @@ uvloop.install() CLOUD = get_global_config()['cloud'] -ORGANIZATION_DOMAIN = os.environ['HAIL_ORGANIZATION_DOMAIN'] DEFAULT_NAMESPACE = os.environ['HAIL_DEFAULT_NAMESPACE'] is_test_deployment = DEFAULT_NAMESPACE != 'default' @@ -333,7 +332,8 @@ async def callback(request) -> web.Response: cleanup_session(session) try: - flow_result = request.app[AppKeys.FLOW_CLIENT].receive_callback(request, flow_dict) + flow_client = request.app[AppKeys.FLOW_CLIENT] + flow_result = flow_client.receive_callback(request, flow_dict) login_id = flow_result.login_id except asyncio.CancelledError: raise @@ -352,10 +352,11 @@ async def callback(request) -> web.Response: assert caller == 'signup' - username, domain = flow_result.email.split('@') + username, _ = flow_result.unverified_email.split('@') username = ''.join(c for c in username if c.isalnum()) - if domain != ORGANIZATION_DOMAIN: + assert flow_client.organization_id() is not None + if flow_result.organization_id != flow_client.organization_id(): raise web.HTTPUnauthorized() try: diff --git a/hail/python/hailtop/auth/flow.py b/hail/python/hailtop/auth/flow.py index 016e6a2e6ee..2d84e6fca74 100644 --- a/hail/python/hailtop/auth/flow.py +++ b/hail/python/hailtop/auth/flow.py @@ -4,6 +4,7 @@ from cryptography.hazmat.primitives import serialization import json import logging +import os import urllib.parse from typing import Any, Dict, List, Mapping, Optional, TypedDict, ClassVar @@ -21,13 +22,26 @@ class FlowResult: - def __init__(self, login_id: str, email: str, token: Mapping[Any, Any]): + def __init__(self, + login_id: str, + unverified_email: str, + organization_id: Optional[str], + token: Mapping[Any, Any]): self.login_id = login_id - self.email = email + self.unverified_email = unverified_email + self.organization_id = organization_id # In Azure, a Tenant ID. In Google, a domain name. self.token = token class Flow(abc.ABC): + @abc.abstractmethod + async def organization_id(self) -> str: + """ + The unique identifier of the organization (e.g. Azure Tenant, Google Organization) in + which this Hail Batch instance lives. + """ + raise NotImplementedError + @abc.abstractmethod def initiate_flow(self, redirect_uri: str) -> dict: """ @@ -64,7 +78,6 @@ async def get_identity_uid_from_access_token(session: httpx.ClientSession, acces """ raise NotImplementedError - class GoogleFlow(Flow): scopes: ClassVar[List[str]] = [ 'https://www.googleapis.com/auth/userinfo.profile', @@ -75,6 +88,11 @@ class GoogleFlow(Flow): def __init__(self, credentials_file: str): self._credentials_file = credentials_file + def organization_id(self) -> str: + if organization_id := os.environ.get('HAIL_ORGANIZATION_DOMAIN'): + return organization_id + raise ValueError('Only available in the auth pod') + def initiate_flow(self, redirect_uri: str) -> dict: flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file( self._credentials_file, scopes=GoogleFlow.scopes, state=None @@ -98,7 +116,7 @@ def receive_callback(self, request: aiohttp.web.Request, flow_dict: dict) -> Flo flow.credentials.id_token, google.auth.transport.requests.Request() # type: ignore ) email = token['email'] - return FlowResult(email, email, token) + return FlowResult(email, email, token.get('hd'), token) @staticmethod def perform_installed_app_login_flow(oauth2_client: Dict[str, Any]) -> Dict[str, Any]: @@ -134,12 +152,12 @@ async def get_identity_uid_from_access_token(session: httpx.ClientSession, acces if not (is_human_with_hail_audience or is_service_account): return None - email = userinfo['email'] - if email.endswith('iam.gserviceaccount.com'): + domain = userinfo.get('hd') + if domain == 'iam.gserviceaccount.com': return userinfo['sub'] # We don't currently track user's unique GCP IAM ID (sub) in the database, just their email, # but we should eventually use the sub as that is guaranteed to be unique to the user. - return email + return userinfo['email'] except httpx.ClientResponseError as e: if e.status in (400, 401): return None @@ -163,8 +181,16 @@ def __init__(self, credentials_file: str): self._client = msal.ConfidentialClientApplication(data['appId'], data['password'], authority) self._tenant_id = tenant_id + def organization_id(self) -> str: + return self._tenant_id + def initiate_flow(self, redirect_uri: str) -> dict: - flow = self._client.initiate_auth_code_flow(scopes=[], redirect_uri=redirect_uri) + flow = self._client.initiate_auth_code_flow( + scopes=[], # confusingly, scopes=[] is the only way to get the openid, profile, and + # offline_access scopes + # https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/dev/msal/application.py#L568-L580 + redirect_uri=redirect_uri + ) return { 'flow': flow, 'authorization_url': flow['auth_uri'], @@ -184,7 +210,12 @@ def receive_callback(self, request: aiohttp.web.Request, flow_dict: dict) -> Flo if tid != self._tenant_id: raise ValueError('invalid tenant id') - return FlowResult(token['id_token_claims']['oid'], token['id_token_claims']['preferred_username'], token) + return FlowResult( + token['id_token_claims']['oid'], + token['id_token_claims']['preferred_username'], + token['id_token_claims']['tid'], + token + ) @staticmethod def perform_installed_app_login_flow(oauth2_client: Dict[str, Any]) -> Dict[str, Any]: From dfad9c80ae5f6ed51f746ef8490d1ad2dda69d1d Mon Sep 17 00:00:00 2001 From: Dan King Date: Fri, 29 Dec 2023 01:21:56 -0500 Subject: [PATCH 47/48] [auth] hd is not present for iam.gserviceaccount.com (#14114) This would seem to violate this [statement about `hd`](https://developers.google.com/identity/openid-connect/openid-connect#id_token-hd): > The absence of this claim indicates that the account does not belong to a Google hosted domain. but alas, that's the truth. They elide `hd` for iam.gserviceaccount.com accounts. --- hail/python/hailtop/auth/flow.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hail/python/hailtop/auth/flow.py b/hail/python/hailtop/auth/flow.py index 2d84e6fca74..c8268afa71d 100644 --- a/hail/python/hailtop/auth/flow.py +++ b/hail/python/hailtop/auth/flow.py @@ -152,8 +152,8 @@ async def get_identity_uid_from_access_token(session: httpx.ClientSession, acces if not (is_human_with_hail_audience or is_service_account): return None - domain = userinfo.get('hd') - if domain == 'iam.gserviceaccount.com': + email = userinfo['email'] + if email.endswith('iam.gserviceaccount.com'): return userinfo['sub'] # We don't currently track user's unique GCP IAM ID (sub) in the database, just their email, # but we should eventually use the sub as that is guaranteed to be unique to the user. From da6668bfd58fe915c54f052844db18975ec7abc1 Mon Sep 17 00:00:00 2001 From: Dan King Date: Tue, 2 Jan 2024 11:06:45 -0500 Subject: [PATCH 48/48] [hailtop/auth] fix lint (#14115) --- ci/pinned-requirements.txt | 2 +- hail/python/hailtop/auth/flow.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/pinned-requirements.txt b/ci/pinned-requirements.txt index 6ada8c4918f..9bcfd442c2c 100644 --- a/ci/pinned-requirements.txt +++ b/ci/pinned-requirements.txt @@ -30,7 +30,7 @@ cryptography==41.0.7 # via # -c hail/ci/../hail/python/pinned-requirements.txt # pyjwt -distro==1.8.0 +distro==1.9.0 # via zulip gidgethub==5.3.0 # via -r hail/ci/requirements.txt diff --git a/hail/python/hailtop/auth/flow.py b/hail/python/hailtop/auth/flow.py index c8268afa71d..e74a2f9d8ed 100644 --- a/hail/python/hailtop/auth/flow.py +++ b/hail/python/hailtop/auth/flow.py @@ -35,7 +35,7 @@ def __init__(self, class Flow(abc.ABC): @abc.abstractmethod - async def organization_id(self) -> str: + def organization_id(self) -> str: """ The unique identifier of the organization (e.g. Azure Tenant, Google Organization) in which this Hail Batch instance lives.