From 2d885992fc747cb4a019b697c4d68d2b84c6e853 Mon Sep 17 00:00:00 2001 From: Doug Latornell Date: Sat, 4 Jan 2025 11:47:09 -0800 Subject: [PATCH 1/3] Add pre-commit to manage code style & repo QA Introduced `.pre-commit-config.yaml` to configure pre-commit hooks, including Black for code formatting and other QA checks. Updated dependencies and environments to include `pre-commit` and `black` for consistent coding standards. pre-commit hooks: * Code formatting by black * Trim trailing whitespace * Ensure that files are either empty, or end with one newline * Confirm that YAML files have parsable syntax * Confirm that TOML files have parsable syntax * Prevent files larger than 500 kB from being committed --- .pre-commit-config.yaml | 21 ++++++++++++++++++ SalishSeaTools/envs/environment-dev.yaml | 4 ++++ SalishSeaTools/envs/requirements.txt | 28 ++++++++++++++++++------ 3 files changed, 46 insertions(+), 7 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..21e6b879 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# Git pre-commit hooks config file +# Only takes effect if you have pre-commit installed in the env, +# and after you run `pre-commit install` +# +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + # Out-of-the-box hooks from the pre-commit org + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-toml + - id: check-added-large-files + # Code formatting with black + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black diff --git a/SalishSeaTools/envs/environment-dev.yaml b/SalishSeaTools/envs/environment-dev.yaml index 7d3cffb2..914c1476 100644 --- a/SalishSeaTools/envs/environment-dev.yaml +++ b/SalishSeaTools/envs/environment-dev.yaml @@ -46,6 +46,10 @@ dependencies: - tqdm - xarray + # For coding style and repo QA + - black + - pre-commit + # For unit tests - coverage - pytest-cov diff --git a/SalishSeaTools/envs/requirements.txt b/SalishSeaTools/envs/requirements.txt index b725ca40..291dce20 100644 --- a/SalishSeaTools/envs/requirements.txt +++ b/SalishSeaTools/envs/requirements.txt @@ -19,14 +19,17 @@ attrs==24.3.0 autopage==0.5.2 babel==2.16.0 beautifulsoup4==4.12.3 +black==24.10.0 bleach==6.2.0 Bottleneck==1.4.2 Brotli==1.1.0 cached-property==1.5.2 certifi==2024.12.14 cffi==1.17.1 +cfgv==3.3.1 cftime==1.6.4 charset-normalizer==3.4.0 +click==8.1.8 cliff==4.8.0 cmd2==2.5.8 cmocean==4.0.3 @@ -34,11 +37,12 @@ colorama==0.4.6 colorspacious==1.1.2 comm==0.2.2 contourpy==1.3.1 -coverage==7.6.9 +coverage==7.6.10 cycler==0.12.1 debugpy==1.8.11 decorator==5.1.1 defusedxml==0.7.1 +distlib==0.3.9 docutils==0.21.2 entrypoints==0.4 et_xmlfile==2.0.0 @@ -46,6 +50,7 @@ exceptiongroup==1.2.2 executing==2.1.0 f90nml==1.4.4 fastjsonschema==2.21.1 +filelock==3.16.1 fonttools==4.55.3 fqdn==1.5.1 gsw==3.6.19 @@ -57,6 +62,7 @@ hpack==4.0.0 httpcore==1.0.7 httpx==0.28.1 hyperframe==6.0.1 +identify==2.6.4 idna==3.10 imagesize==1.4.1 importlib_metadata==8.5.0 @@ -64,7 +70,7 @@ importlib_resources==6.4.5 iniconfig==2.0.0 ipdb==0.13.13 ipykernel==6.29.5 -ipython==8.30.0 +ipython==8.31.0 isoduration==20.11.0 jedi==0.19.2 Jinja2==3.1.5 @@ -88,20 +94,23 @@ matplotlib==3.10.0 matplotlib-inline==0.1.7 mistune==3.0.2 munkres==1.1.4 +mypy_extensions==1.0.0 nbclient==0.10.1 nbconvert==7.16.4 nbformat==5.10.4 nbsphinx==0.9.5 nest_asyncio==1.6.0 netCDF4==1.7.2 +nodeenv==1.9.1 notebook_shim==0.2.4 -numpy==2.2.0 +numpy==2.2.1 openpyxl==3.1.5 overrides==7.7.0 packaging==24.2 -pandas==2.2.2 +pandas==2.2.3 pandocfilters==1.5.0 parso==0.8.4 +pathspec==0.12.1 pbr==6.1.0 pexpect==4.9.0 pickleshare==0.7.5 @@ -110,6 +119,7 @@ pip==24.3.1 pkgutil_resolve_name==1.3.10 platformdirs==4.3.6 pluggy==1.5.0 +pre_commit==4.0.1 prettytable==3.12.0 prometheus_client==0.21.1 prompt_toolkit==3.0.48 @@ -123,9 +133,11 @@ pyperclip==1.9.0 PySide6==6.8.1 PySocks==1.7.1 pytest==8.3.4 +pytest-cov==6.0.0 +pytest-randomly==3.15.0 python-dateutil==2.9.0.post0 python-json-logger==2.0.7 -pytz==2024.2 +pytz==2024.1 PyYAML==6.0.2 pyzmq==26.2.0 referencing==0.35.1 @@ -135,7 +147,7 @@ rfc3339_validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.22.3 SalishSeaTools==24.1.dev0 -scipy==1.14.1 +scipy==1.15.0 Send2Trash==1.8.3 setuptools==75.6.0 shiboken6==6.8.1 @@ -165,14 +177,16 @@ types-python-dateutil==2.9.0.20241206 typing_extensions==4.12.2 typing_utils==0.1.0 tzdata==2024.2 +ukkonen==1.0.1 unicodedata2==15.1.0 uri-template==1.3.0 urllib3==2.2.3 +virtualenv==20.28.1 wcwidth==0.2.13 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 wheel==0.45.1 -xarray==2024.11.0 +xarray==2025.1.0 zipp==3.21.0 zstandard==0.23.0 From 0747104a86844941d5dcdb83a67d6a5c1bb21e60 Mon Sep 17 00:00:00 2001 From: Doug Latornell Date: Sat, 4 Jan 2025 12:03:19 -0800 Subject: [PATCH 2/3] Refactor code for consistency & formatting cleanup Code style gardening by the pre-commit hooks, especially black. Also fix CRLF line endings. --- .../Atmos/AtmosphereGridSelection.ipynb | 2 +- I_ForcingFiles/Atmos/NegativePrecip.ipynb | 2 +- .../Atmos/ProcessPramodArchive.ipynb | 2 +- I_ForcingFiles/Atmos/README.md | 110 +- I_ForcingFiles/Atmos/RadiationCheck.ipynb | 2 +- .../Atmos/VerifyAtmosphericForcing.ipynb | 2 +- I_ForcingFiles/Atmos/altitude.py | 141 +- I_ForcingFiles/Atmos/convert_files.sh | 2 +- I_ForcingFiles/Atmos/correct_pressure.py | 42 +- I_ForcingFiles/Atmos/gribTnetcdf.ipynb | 2 +- I_ForcingFiles/Atmos/make_readme.py | 3 +- I_ForcingFiles/Initial/make_readme.py | 44 +- I_ForcingFiles/Initial/renameRestartVars.sh | 1 - .../LookAtDianeWendysFile.ipynb | 2 +- .../LookatInitialForcingFiles.ipynb | 2 +- .../LookAtOthersFiles/make_readme.py | 44 +- .../OBC/JohnstoneStraitBoundary.ipynb | 2 +- ...ardy_harmonics_31-Dec-2004_02-Jan-2006.csv | 2 +- ...ardy_harmonics_31-Dec-2007_02-Jan-2009.csv | 2 +- ...ardy_harmonics_31-Dec-2010_02-Jan-2012.csv | 2 +- ..._tide_compare8_31-Dec-2005_02-Jan-2007.csv | 2 +- ..._tide_compare8_31-Dec-2008_02-Jan-2010.csv | 2 +- ..._tide_compare8_31-Dec-2011_02-Jan-2013.csv | 2 +- I_ForcingFiles/OBC/TS_OBC_Softstart.ipynb | 2 +- ...fino_harmonics_31-Dec-2004_02-Jan-2006.csv | 2 +- ...fino_harmonics_31-Dec-2005_02-Jan-2007.csv | 2 +- ...fino_harmonics_31-Dec-2007_02-Jan-2009.csv | 2 +- ...fino_harmonics_31-Dec-2010_02-Jan-2012.csv | 2 +- ..._tide_compare8_31-Dec-2005_02-Jan-2007.csv | 2 +- ..._tide_compare8_31-Dec-2008_02-Jan-2010.csv | 2 +- ..._tide_compare8_31-Dec-2011_02-Jan-2013.csv | 2 +- I_ForcingFiles/OBC/create_TEOS-10_BCs.py | 87 +- I_ForcingFiles/OBC/get_tides.m | 6 +- I_ForcingFiles/OBC/make_readme.py | 48 +- I_ForcingFiles/OBC/reshape_BCs.py | 36 +- I_ForcingFiles/Rivers/DailyRiverFlows.py | 598 +-- I_ForcingFiles/Rivers/make_readme.py | 44 +- I_ForcingFiles/Rivers/todo.list | 2 +- I_ForcingFiles/Tides/Prepare Tide Files.ipynb | 2 +- I_ForcingFiles/Tides/make_readme.py | 44 +- I_ForcingFiles/Tides/webtide_tools.py | 634 +-- Run_Files/AMM_multi.pbs | 2 +- Run_Files/GYRE.pbs | 2 +- Run_Files/arch-salish.fcm | 1 - SOGTools/notebooks/SOG_plotting.ipynb | 2 +- SOGTools/sog_tools/SOG_loader.py | 210 +- SOGTools/sog_tools/carbonate.py | 258 +- SalishSeaTools/__pkg_metadata__.py | 8 +- SalishSeaTools/docs/installation.rst | 1 - .../notebooks/visualisations/make_readme.py | 48 +- .../salishsea_tools/DFOOPDB_to_pandas.py | 593 ++- .../salishsea_tools/LiveOcean_BCs.py | 298 +- .../salishsea_tools/LiveOcean_SQL.py | 1452 ++++--- .../salishsea_tools/LiveOcean_grid.py | 82 +- .../salishsea_tools/UBC_subdomain.py | 125 +- SalishSeaTools/salishsea_tools/bathy_tools.py | 100 +- SalishSeaTools/salishsea_tools/bio_tools.py | 251 +- .../salishsea_tools/bloomdrivers.py | 807 ++-- SalishSeaTools/salishsea_tools/data_tools.py | 30 +- .../salishsea_tools/diagnosis_tools.py | 81 +- SalishSeaTools/salishsea_tools/ellipse.py | 141 +- SalishSeaTools/salishsea_tools/evaltools.py | 3737 +++++++++++------ .../salishsea_tools/formatting_tools.py | 23 +- SalishSeaTools/salishsea_tools/geo_tools.py | 170 +- SalishSeaTools/salishsea_tools/grid_tools.py | 108 +- SalishSeaTools/salishsea_tools/gsw_calls.py | 128 +- SalishSeaTools/salishsea_tools/hg_commands.py | 33 +- SalishSeaTools/salishsea_tools/loadDataFRP.py | 763 ++-- .../salishsea_tools/metric_tools_5x5.py | 74 +- SalishSeaTools/salishsea_tools/namelist.py | 21 +- SalishSeaTools/salishsea_tools/nc_tools.py | 423 +- .../salishsea_tools/onc_sog_adcps.py | 77 +- SalishSeaTools/salishsea_tools/places.py | 1158 ++--- SalishSeaTools/salishsea_tools/psu_tools.py | 25 +- .../salishsea_tools/river_201702.py | 1852 +++++--- .../salishsea_tools/river_202101.py | 2215 +++++++--- .../salishsea_tools/river_202108.py | 2239 +++++++--- .../salishsea_tools/river_downbyone2.py | 1634 +++++-- .../salishsea_tools/river_sss150.py | 842 ++-- SalishSeaTools/salishsea_tools/rivertools.py | 235 +- SalishSeaTools/salishsea_tools/stormtools.py | 409 +- SalishSeaTools/salishsea_tools/tidetools.py | 1096 ++--- .../salishsea_tools/timeseries_tools.py | 123 +- .../salishsea_tools/unit_conversions.py | 55 +- SalishSeaTools/salishsea_tools/utilities.py | 12 +- .../salishsea_tools/visualisations.py | 361 +- SalishSeaTools/salishsea_tools/viz_tools.py | 228 +- SalishSeaTools/salishsea_tools/wind_tools.py | 66 +- SalishSeaTools/setup.py | 78 +- SalishSeaTools/tests/conftest.py | 4 +- SalishSeaTools/tests/test_bathy_tools.py | 37 +- SalishSeaTools/tests/test_data_tools.py | 63 +- SalishSeaTools/tests/test_hg_commands.py | 96 +- SalishSeaTools/tests/test_namelist.py | 328 +- SalishSeaTools/tests/test_nc_tools.py | 570 ++- SalishSeaTools/tests/test_stormtools.py | 28 +- SalishSeaTools/tests/test_teos_tools.py | 57 +- SalishSeaTools/tests/test_tidetools.py | 22 +- SalishSeaTools/tests/test_unit_conversions.py | 152 +- SalishSeaTools/tests/test_viz_tools.py | 219 +- SalishSeaTools/tests/test_wind_tools.py | 129 +- analysis_tools/make_readme.py | 54 +- .../old_notebooks/GYRE_openNC_plot.ipynb | 2 +- .../old_notebooks/NancysCurrents.ipynb | 2 +- .../old_notebooks/SusansViewerWQuiver.ipynb | 2 +- .../old_notebooks/Tidal Movie.ipynb | 2 +- .../Vertical Tracer Cross-sections.ipynb | 2 +- .../old_notebooks/WCSD_openNC_plot.ipynb | 2 +- analysis_tools/old_notebooks/make_readme.py | 44 +- bathymetry/BathyZeroTobaetc.ipynb | 2 +- bathymetry/More Smoothing.ipynb | 2 +- bathymetry/README.md | 182 +- bathymetry/SalishSeaSubdomainBathy.ipynb | 2 +- bathymetry/TowardSmoothing.ipynb | 2 +- bathymetry/agrif/fix_bathy.py | 14 +- bathymetry/bathyImage.ipynb | 2 +- bathymetry/bathy_for_jie.ipynb | 2 +- bathymetry/make_readme.py | 3 +- bathymetry/netCDF4bathy.ipynb | 2 +- bathymetry/thalweg_working.txt | 2 - docs/breaking_changes.rst | 1 - docs/conf.py | 133 +- nocscombine/Makefile.aix | 12 +- nocscombine/Makefile.nautilus | 14 +- nocscombine/Makefile.novel | 14 +- nocscombine/README.nocscombine | 1 - nocscombine/handle_err.F90 | 2 +- nocscombine/make_global_file.F90 | 28 +- nocscombine/ncfixcoord.F90 | 10 +- nocscombine/ncread_and_collate.F90 | 44 +- nocscombine/nocscombine.F90 | 40 +- update_copyright.py | 23 +- 132 files changed, 16862 insertions(+), 10101 deletions(-) diff --git a/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb b/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb index 37842535..b8852859 100644 --- a/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb +++ b/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb @@ -264,4 +264,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/NegativePrecip.ipynb b/I_ForcingFiles/Atmos/NegativePrecip.ipynb index 84be9f79..adff8086 100644 --- a/I_ForcingFiles/Atmos/NegativePrecip.ipynb +++ b/I_ForcingFiles/Atmos/NegativePrecip.ipynb @@ -179,4 +179,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb b/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb index d8b8a91e..dcf4dd89 100644 --- a/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb +++ b/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb @@ -1308,4 +1308,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/README.md b/I_ForcingFiles/Atmos/README.md index fa62ceda..25e53315 100644 --- a/I_ForcingFiles/Atmos/README.md +++ b/I_ForcingFiles/Atmos/README.md @@ -6,28 +6,28 @@ The links below are to static renderings of the notebooks via Descriptions below the links are from the first cell of the notebooks (if that cell contains Markdown or raw text). -* ## [GetGrib.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/GetGrib.ipynb) - +* ## [GetGrib.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/GetGrib.ipynb) + Notebook to design script to download GRIB2 data from EC webpage -* ## [NegativePrecip.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/NegativePrecip.ipynb) - -* ## [InitialGEMCheck.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/InitialGEMCheck.ipynb) - +* ## [NegativePrecip.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/NegativePrecip.ipynb) + +* ## [InitialGEMCheck.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/InitialGEMCheck.ipynb) + **Initial Check of GEM Products Forcing Data** - + This notebook is about initial checks and exploration of the 2.5 km grid GEM products atmospheric forcing dataset provided by Luc Fillion's group at EC Dorval. -* ## [RebaseCGRF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RebaseCGRF.ipynb) - +* ## [RebaseCGRF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RebaseCGRF.ipynb) + **Rebasing CGRF Atmospheric Forcing Files** - + This notebook documents and verifies the algorithm for rebasing the [CGRF atmospheric forcing dataset][CGRF dataset] files. - + [CGRF dataset]: https://salishsea-meopar-docs.readthedocs.org/en/latest/code-notes/salishsea-nemo/nemo-forcing/atmospheric.html#cgrf-dataset - + The raw CGRF files contain hourly values that run from 06:00 UTC on the file's date to 06:00 UTC on the following day. For hourly forcing, @@ -39,102 +39,102 @@ Descriptions below the links are from the first cell of the notebooks by creating them as netCDF4 files with variable-level compression enabled. Metadata that conforms to [CF-1.6 and Salish Sea MEOPAR project conventions][netCDF4 conventions] is included in the new files. - + [netCDF4 conventions]: https://salishsea-meopar-docs.readthedocs.org/en/latest/code-notes/salishsea-nemo/nemo-forcing/netcdf4.html#netcdf4-file-conventions - + All of that processing is implemented in the [`salishsea get_cgrf`][salishsea get_cgrf] command. This notebook provides explanation of that code, and verification that the created files contain wind and precipitation forcing values that are consistent with observations at Sandheads and YVR. - + [salishsea get_cgrf]: https://salishsea-meopar-tools.readthedocs.org/en/latest/SalishSeaCmd/salishsea-cmd.html#get-cgrf-sub-command -* ## [ProcessPramodArchive.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb) - +* ## [ProcessPramodArchive.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/ProcessPramodArchive.ipynb) + Notebook to process Pramod's archived grib files to produce our netcdf files -* ## [OriginalVelocities.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/OriginalVelocities.ipynb) - -* ## [CheckAltitude.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckAltitude.ipynb) - +* ## [OriginalVelocities.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/OriginalVelocities.ipynb) + +* ## [CheckAltitude.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckAltitude.ipynb) + This notebook checks that the altitude.py script generates a files that are reasonable. - + It also combines the monthly altitude calculation into one file by averaging. -* ## [VerifyAtmosphericForcing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb) - +* ## [VerifyAtmosphericForcing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb) + **Verification of Atmospheric Forcing** -* ## [CGridLocations.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CGridLocations.ipynb) - -* ## [RadiationCheck.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RadiationCheck.ipynb) - +* ## [CGridLocations.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CGridLocations.ipynb) + +* ## [RadiationCheck.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RadiationCheck.ipynb) + This notebook compares the longwave/shortwave radiation from the GEM2.5 model provided by - + 1. Pramod converted from grib2 format to netcdf covering all of Dec 2012 2. Kao-Shen at Environment Canada covering 1 hour on Dec 16, 2012 - + Question: Are the longwave/shortwave radiation variables in Pramod's netcdf files the ones we should be using? There are several radiation flux variables in the grib2 output. - + Description of grib2 variables https://weather.gc.ca/grib/HRDPS_HR/HRDPS_ps2p5km_P000_deterministic_e.html - + Plan: Comapre Pramod's radiation variables to Kao Shen's. -* ## [gribTnetcdf.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/gribTnetcdf.ipynb) - +* ## [gribTnetcdf.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/gribTnetcdf.ipynb) + Notebook to convert grib2 files to netCDF files that can be used in NEMO Makes use of wgrib2 -* ## [netCDF4weights-CGRF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/netCDF4weights-CGRF.ipynb) - +* ## [netCDF4weights-CGRF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/netCDF4weights-CGRF.ipynb) + **Convert Atmospheric Forcing Weights to netCDF4** - - Transfer the values from the `met_gem_weight.nc` + + Transfer the values from the `met_gem_weight.nc` from the 2-Oct-2013 `WC3_PREP` tarball into a netCDF4 file with zlib compression on variables and CF-1.6 conventions conformant attributes. -* ## [ImproveWeightsFile.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/ImproveWeightsFile.ipynb) - +* ## [ImproveWeightsFile.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/ImproveWeightsFile.ipynb) + **Improve Atmospheric Forcing Weights File** - + Transfer the values from a `met_gem_weight.nc` created by `NEMO_EastCoast/NEMO_Preparation/4_weights_ATMOS/get_weight_nemo` into a netCDF4 file with zlib compression on variables and CF-1.6 conventions conformant attributes. -* ## [CheckGridCoLocation.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckGridCoLocation.ipynb) - +* ## [CheckGridCoLocation.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckGridCoLocation.ipynb) + Are the ugrid, vgrid and temperature at the same points -* ## [NoSnowIce.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/NoSnowIce.ipynb) - +* ## [NoSnowIce.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/NoSnowIce.ipynb) + **Create `no_snow_ice.nc` File for NEMO Surface Forcing** - + Create a netCDF4 file containing 2 variables named `snow` and `ice`. The coordinates of the variable are `y` and `x`. The values of `snow` and `ice` at all points in the domain is floating point zero. - + The resulting `no_snow_ice.nc` file can be used as an annual climatology in NEMO atmospheric forcing that does not require on-the-fly interpolation. It imposes a no snow, ever condition on the NEMO configuration. The no ice, ever condition that it provides works in conjunction with the code in `sbcice_if.F90` contributed by Michael Dunphy - to provide a minimal ice-model substitute. + to provide a minimal ice-model substitute. In Michael's words, - > "The point is to make sure water temperatures don’t go below the local freezing point, + > "The point is to make sure water temperatures don’t go below the local freezing point, > and there are some limits on heat exchanges as well." -* ## [RotateVelocities.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RotateVelocities.ipynb) - +* ## [RotateVelocities.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/RotateVelocities.ipynb) + Rotate the Wind -* ## [AtmosphereGridSelection.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb) - +* ## [AtmosphereGridSelection.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/AtmosphereGridSelection.ipynb) + Notebook to Look at Atmosphereic Domains and Choose Ours -* ## [CheckRotation.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckRotation.ipynb) - +* ## [CheckRotation.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/I_ForcingFiles/Atmos/CheckRotation.ipynb) + ## License diff --git a/I_ForcingFiles/Atmos/RadiationCheck.ipynb b/I_ForcingFiles/Atmos/RadiationCheck.ipynb index 5940412c..aed814b7 100644 --- a/I_ForcingFiles/Atmos/RadiationCheck.ipynb +++ b/I_ForcingFiles/Atmos/RadiationCheck.ipynb @@ -778,4 +778,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb b/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb index 73cf7445..ba70efed 100644 --- a/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb +++ b/I_ForcingFiles/Atmos/VerifyAtmosphericForcing.ipynb @@ -1545,4 +1545,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/altitude.py b/I_ForcingFiles/Atmos/altitude.py index 83329b88..f23a4463 100644 --- a/I_ForcingFiles/Atmos/altitude.py +++ b/I_ForcingFiles/Atmos/altitude.py @@ -13,91 +13,94 @@ # See the License for the specific language governing permissions and # limitations under the License. -#A script to calculate the average grid cell height from CGRF atmospheric -#forcing files. One month of data should be given at a time. Saves a netcdf -#file with the averagre grid cell height. +# A script to calculate the average grid cell height from CGRF atmospheric +# forcing files. One month of data should be given at a time. Saves a netcdf +# file with the averagre grid cell height. -#Assumes atmosphere is a dry ideal gas in hydrostatic balance and has a -#constant lapse rate. See this notebook: +# Assumes atmosphere is a dry ideal gas in hydrostatic balance and has a +# constant lapse rate. See this notebook: # https://nbviewer.org/github/SalishSeaCast/analysis/blob/main/storm_surges/Pressure%20to%20sea%20level.ipynb import netCDF4 as NC import numpy as np -#define the month and year. +# define the month and year. start_day = 1 -end_day=31 -month=12 -year =2003 - -#define constants -R = 287 #ideal gas constant -g = 9.81 #gravity -gam = 0.0098 #lapse rate(deg/m) -p0=101000 #average sea surface heigh in Pa - -#path for CGRF data -CGRF = '/home/dlatorne/MEOPAR/CGRF/NEMO-atmos/' - -#function for calculating altitude -def altitude(P,T,p0): - #P is the pressure over the grid and T is the temperature - #p0 is the sea level pressure. An appropriate choice is 101000 Pa. - z = T/gam*((P/p0)**(-gam*R/g) -1 ) +end_day = 31 +month = 12 +year = 2003 + +# define constants +R = 287 # ideal gas constant +g = 9.81 # gravity +gam = 0.0098 # lapse rate(deg/m) +p0 = 101000 # average sea surface heigh in Pa + +# path for CGRF data +CGRF = "/home/dlatorne/MEOPAR/CGRF/NEMO-atmos/" + + +# function for calculating altitude +def altitude(P, T, p0): + # P is the pressure over the grid and T is the temperature + # p0 is the sea level pressure. An appropriate choice is 101000 Pa. + z = T / gam * ((P / p0) ** (-gam * R / g) - 1) return z -#an array to store cumulative grid cell height -zcum =0 -for d in range(start_day,end_day+1): - #load CGRF data - m = "%02d" % (month,); da= "%02d" % (d,) - pstr = 'slp_y'+str(year)+'m' + str(m) +'d'+str(da)+'.nc' +# an array to store cumulative grid cell height +zcum = 0 + +for d in range(start_day, end_day + 1): + # load CGRF data + m = "%02d" % (month,) + da = "%02d" % (d,) + pstr = "slp_y" + str(year) + "m" + str(m) + "d" + str(da) + ".nc" print(pstr) - Tstr = 't2_y'+str(year)+'m' + str(m) +'d'+str(da)+'.nc' - C = NC.Dataset(CGRF+pstr,'r') - press=C.variables['atmpres'][:] - C = NC.Dataset(CGRF+Tstr,'r') - temp=C.variables['tair'][:] - - #average pressure and temperature over one day - pavg = np.mean(press,0) - tavg = np.mean(temp,0) - - #caclulate average grid cell altitude over one day - zavg=altitude(pavg,tavg,p0) - zcum=zcum+zavg - -Z = zcum/end_day -lat=C.variables['nav_lat'][:] -lon=C.variables['nav_lon'][:] - -#save in a netcdf file -alt_str= 'altitude_y' +str(year) +'m'+str(m)+'.nc' -alt_file = NC.Dataset(alt_str, 'w', zlib=True) + Tstr = "t2_y" + str(year) + "m" + str(m) + "d" + str(da) + ".nc" + C = NC.Dataset(CGRF + pstr, "r") + press = C.variables["atmpres"][:] + C = NC.Dataset(CGRF + Tstr, "r") + temp = C.variables["tair"][:] + + # average pressure and temperature over one day + pavg = np.mean(press, 0) + tavg = np.mean(temp, 0) + + # caclulate average grid cell altitude over one day + zavg = altitude(pavg, tavg, p0) + zcum = zcum + zavg + +Z = zcum / end_day +lat = C.variables["nav_lat"][:] +lon = C.variables["nav_lon"][:] + +# save in a netcdf file +alt_str = "altitude_y" + str(year) + "m" + str(m) + ".nc" +alt_file = NC.Dataset(alt_str, "w", zlib=True) # dataset attributes - can't get this to work -#nc_tools.init_dataset_attrs( +# nc_tools.init_dataset_attrs( # alt_file, # title='Average monthly altitude', # notebook='altitude.py', # nc_filepath=alt_str, # comment='Average altitude at each CGRF grid cell') -#dimensions -alt_file.createDimension('x', Z.shape[0]) -alt_file.createDimension('y', Z.shape[1]) -#lat/lon variables -nav_lat = alt_file.createVariable('nav_lat','float32',('x','y')) -nav_lat.long_name = 'Latitude' -nav_lat.units = 'degrees_north' -nav_lat[:]=lat -nav_lon = alt_file.createVariable('nav_lon','float32',('x','y')) -nav_lon.long_name = 'Longitude' -nav_lon.units = 'degrees_east' -nav_lon[:]=lon -#altitude -alt = alt_file.createVariable('alt','float32',('x','y')) -alt.long_name = 'Altitude' -alt.units = 'metres' -alt[:]=Z +# dimensions +alt_file.createDimension("x", Z.shape[0]) +alt_file.createDimension("y", Z.shape[1]) +# lat/lon variables +nav_lat = alt_file.createVariable("nav_lat", "float32", ("x", "y")) +nav_lat.long_name = "Latitude" +nav_lat.units = "degrees_north" +nav_lat[:] = lat +nav_lon = alt_file.createVariable("nav_lon", "float32", ("x", "y")) +nav_lon.long_name = "Longitude" +nav_lon.units = "degrees_east" +nav_lon[:] = lon +# altitude +alt = alt_file.createVariable("alt", "float32", ("x", "y")) +alt.long_name = "Altitude" +alt.units = "metres" +alt[:] = Z alt_file.close() diff --git a/I_ForcingFiles/Atmos/convert_files.sh b/I_ForcingFiles/Atmos/convert_files.sh index 6c11ff9e..2bb321eb 100755 --- a/I_ForcingFiles/Atmos/convert_files.sh +++ b/I_ForcingFiles/Atmos/convert_files.sh @@ -6,6 +6,6 @@ SAV=/ocean/nsoontie/MEOPAR/GEM2.5/ops/ for i in $( ls $DIR ); do PRE=$i TMP=$i - + python correct_pressure_ops.py $PRE $TMP $SAV done diff --git a/I_ForcingFiles/Atmos/correct_pressure.py b/I_ForcingFiles/Atmos/correct_pressure.py index c5ff5190..2959b361 100644 --- a/I_ForcingFiles/Atmos/correct_pressure.py +++ b/I_ForcingFiles/Atmos/correct_pressure.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -#Script to correct CGRF pressure to sea level. -#Assumes atmosphere is a dry ideal gas in hydrostatic balance and has a -#constant lapse rate. See this notebook: +# Script to correct CGRF pressure to sea level. +# Assumes atmosphere is a dry ideal gas in hydrostatic balance and has a +# constant lapse rate. See this notebook: # https://nbviewer.org/github/SalishSeaCast/analysis/blob/main/storm_surges/Pressure%20to%20sea%20level.ipynb # Call like this: python correct_pressure.py pressure_file temperature_file save_directory @@ -24,7 +24,8 @@ import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np -from salishsea_tools import (viz_tools, +from salishsea_tools import ( + viz_tools, bathy_tools, nc_tools, ) @@ -32,21 +33,22 @@ import arrow # Read from command line -pres_file=sys.argv[1] #pressure file -tmp_file=sys.argv[2] #temperature file -sav_dir=sys.argv[3] #directory for saving +pres_file = sys.argv[1] # pressure file +tmp_file = sys.argv[2] # temperature file +sav_dir = sys.argv[3] # directory for saving -#Grab time +# Grab time f = nc.Dataset(tmp_file) -time =f.variables['time_counter'] - -#generate strings for saving file -a=arrow.get(time.time_origin, 'YYYY-MMM-DD HH:mm:ss') -y=a.year; mo=a.month; da=a.day -m = "%02d" % (mo,); d= "%02d" % (da,) -sav_str = sav_dir + '/slp_corr_y'+str(y) + 'm'+str(m)+'d'+str(d)+'.nc' - -#generate the pressure file -nc_tools.generate_pressure_file(sav_str,pres_file,tmp_file,'altitude_CGRF.nc',a) - - +time = f.variables["time_counter"] + +# generate strings for saving file +a = arrow.get(time.time_origin, "YYYY-MMM-DD HH:mm:ss") +y = a.year +mo = a.month +da = a.day +m = "%02d" % (mo,) +d = "%02d" % (da,) +sav_str = sav_dir + "/slp_corr_y" + str(y) + "m" + str(m) + "d" + str(d) + ".nc" + +# generate the pressure file +nc_tools.generate_pressure_file(sav_str, pres_file, tmp_file, "altitude_CGRF.nc", a) diff --git a/I_ForcingFiles/Atmos/gribTnetcdf.ipynb b/I_ForcingFiles/Atmos/gribTnetcdf.ipynb index 35174bc9..f2d48828 100644 --- a/I_ForcingFiles/Atmos/gribTnetcdf.ipynb +++ b/I_ForcingFiles/Atmos/gribTnetcdf.ipynb @@ -510,4 +510,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Atmos/make_readme.py b/I_ForcingFiles/Atmos/make_readme.py index 1a31d8df..f0f9e55c 100755 --- a/I_ForcingFiles/Atmos/make_readme.py +++ b/I_ForcingFiles/Atmos/make_readme.py @@ -23,6 +23,7 @@ and commit and push the updated `README.md` to GitHub. """ + import json from pathlib import Path import re @@ -37,7 +38,7 @@ def main(): cwd_parts = Path.cwd().parts - repo_path = Path(*cwd_parts[cwd_parts.index(REPO_NAME)+1:]) + repo_path = Path(*cwd_parts[cwd_parts.index(REPO_NAME) + 1 :]) url = f"{NBVIEWER}/{GITHUB_ORG}/{REPO_NAME}/blob/{DEFAULT_BRANCH_NAME}/{repo_path}" readme = f"""\ diff --git a/I_ForcingFiles/Initial/make_readme.py b/I_ForcingFiles/Initial/make_readme.py index 2c4aa06b..bd98def4 100644 --- a/I_ForcingFiles/Initial/make_readme.py +++ b/I_ForcingFiles/Initial/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'I_ForcingFiles/Initial' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "I_ForcingFiles/Initial" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """This is a collection of Jupyter Notebooks for creating, manipulating, and visualizing initial conditions netCDF files. @@ -37,27 +38,24 @@ """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) - first_cell_type = contents['worksheets'][0]['cells'][0]['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = contents['worksheets'][0]['cells'][0]['source'] + first_cell_type = contents["worksheets"][0]["cells"][0]["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = contents["worksheets"][0]["cells"][0]["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -68,7 +66,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/I_ForcingFiles/Initial/renameRestartVars.sh b/I_ForcingFiles/Initial/renameRestartVars.sh index cf398d7e..a45b921d 100644 --- a/I_ForcingFiles/Initial/renameRestartVars.sh +++ b/I_ForcingFiles/Initial/renameRestartVars.sh @@ -21,4 +21,3 @@ ncrename -v sbc_O2_b,sbc_TRA_b $2 ncrename -v rnf_pis_O2_b,rnf_pis_TRA_b $2 ncrename -v TRNO2,TRNTRA $2 ncrename -v TRBO2,TRBTRA $2 - diff --git a/I_ForcingFiles/LookAtOthersFiles/LookAtDianeWendysFile.ipynb b/I_ForcingFiles/LookAtOthersFiles/LookAtDianeWendysFile.ipynb index ae12b8a0..c5da8d06 100644 --- a/I_ForcingFiles/LookAtOthersFiles/LookAtDianeWendysFile.ipynb +++ b/I_ForcingFiles/LookAtOthersFiles/LookAtDianeWendysFile.ipynb @@ -1194,4 +1194,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/LookAtOthersFiles/LookatInitialForcingFiles.ipynb b/I_ForcingFiles/LookAtOthersFiles/LookatInitialForcingFiles.ipynb index 46d61933..b5b72890 100644 --- a/I_ForcingFiles/LookAtOthersFiles/LookatInitialForcingFiles.ipynb +++ b/I_ForcingFiles/LookAtOthersFiles/LookatInitialForcingFiles.ipynb @@ -495,4 +495,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/LookAtOthersFiles/make_readme.py b/I_ForcingFiles/LookAtOthersFiles/make_readme.py index 4b3c8b02..51cfa966 100644 --- a/I_ForcingFiles/LookAtOthersFiles/make_readme.py +++ b/I_ForcingFiles/LookAtOthersFiles/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'I_ForcingFiles/LookAtOthersFiles' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "I_ForcingFiles/LookAtOthersFiles" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """This is a collection of Jupyter Notebooks for visualizing initial conditions and forcing netCDF files from other groups. @@ -36,27 +37,24 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) - first_cell_type = contents['worksheets'][0]['cells'][0]['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = contents['worksheets'][0]['cells'][0]['source'] + first_cell_type = contents["worksheets"][0]["cells"][0]["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = contents["worksheets"][0]["cells"][0]["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -67,7 +65,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/I_ForcingFiles/OBC/JohnstoneStraitBoundary.ipynb b/I_ForcingFiles/OBC/JohnstoneStraitBoundary.ipynb index 6cd97aa2..764ff66c 100644 --- a/I_ForcingFiles/OBC/JohnstoneStraitBoundary.ipynb +++ b/I_ForcingFiles/OBC/JohnstoneStraitBoundary.ipynb @@ -106,4 +106,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2004_02-Jan-2006.csv b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2004_02-Jan-2006.csv index 41123758..f2696236 100644 --- a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2004_02-Jan-2006.csv +++ b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2004_02-Jan-2006.csv @@ -1,6 +1,6 @@ Mean 2.915135 Latitude 50.722000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.092983 0.032730 26.637214 16.438842 SSA 0.000228 0.029836 0.025770 133.146135 59.852844 MSM 0.001310 0.005635 0.023090 20.204699 215.882284 diff --git a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2007_02-Jan-2009.csv b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2007_02-Jan-2009.csv index d22952d6..53908389 100644 --- a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2007_02-Jan-2009.csv +++ b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2007_02-Jan-2009.csv @@ -1,6 +1,6 @@ Mean 2.863674 Latitude 50.722000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.079520 0.036595 333.995514 26.661014 SSA 0.000228 0.047897 0.037681 232.921431 45.319654 MSM 0.001310 0.008609 0.025630 255.072488 157.073190 diff --git a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2010_02-Jan-2012.csv b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2010_02-Jan-2012.csv index 6190074d..6539e772 100644 --- a/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2010_02-Jan-2012.csv +++ b/I_ForcingFiles/OBC/Port Hardy_harmonics_31-Dec-2010_02-Jan-2012.csv @@ -1,6 +1,6 @@ Mean 2.882685 Latitude 50.722000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.067888 0.033487 34.456918 26.790456 SSA 0.000228 0.048360 0.033016 2.045806 43.859071 MSM 0.001310 0.033671 0.031009 340.343829 52.881772 diff --git a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv index c5ba605e..6870682a 100644 --- a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv +++ b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8408_31-Dec-2004_02-Jan-2006.csv Mean 2.915135 Latitude 50.722000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2005 00:00:00 0.872826 1.066987 31-Dec-2005 01:00:00 1.244000 1.454146 31-Dec-2005 02:00:00 1.223064 1.432731 diff --git a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv index 8fe09363..65dcb71b 100644 --- a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv +++ b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8408_31-Dec-2007_02-Jan-2009.csv Mean 2.863674 Latitude 50.722000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2008 00:00:00 -0.545042 -0.392969 31-Dec-2008 01:00:00 0.249201 0.402549 31-Dec-2008 02:00:00 0.893493 1.040034 diff --git a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv index 1dbc5529..55bd9e51 100644 --- a/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv +++ b/I_ForcingFiles/OBC/Port Hardy_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8408_31-Dec-2010_02-Jan-2012.csv Mean 2.882685 Latitude 50.722000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2011 00:00:00 -1.230172 -1.200935 31-Dec-2011 01:00:00 -0.805242 -0.765064 31-Dec-2011 02:00:00 -0.203604 -0.163287 diff --git a/I_ForcingFiles/OBC/TS_OBC_Softstart.ipynb b/I_ForcingFiles/OBC/TS_OBC_Softstart.ipynb index f1f65aaa..ec3c26cf 100644 --- a/I_ForcingFiles/OBC/TS_OBC_Softstart.ipynb +++ b/I_ForcingFiles/OBC/TS_OBC_Softstart.ipynb @@ -629,4 +629,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2004_02-Jan-2006.csv b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2004_02-Jan-2006.csv index 64f9cdc3..4e9315ba 100644 --- a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2004_02-Jan-2006.csv +++ b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2004_02-Jan-2006.csv @@ -1,6 +1,6 @@ Mean 2.103916 Latitude 49.154000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.104969 0.030667 24.500928 19.147709 SSA 0.000228 0.029080 0.032951 146.254787 61.489941 MSM 0.001310 0.013884 0.023384 32.564852 113.595642 diff --git a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2005_02-Jan-2007.csv b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2005_02-Jan-2007.csv index 6f7ca791..a5dea5fa 100644 --- a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2005_02-Jan-2007.csv +++ b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2005_02-Jan-2007.csv @@ -1,6 +1,6 @@ Mean 2.110716 Latitude 49.154000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.164257 0.045679 15.744887 17.567790 SSA 0.000228 0.038497 0.043180 191.195373 68.629793 MSM 0.001310 0.060494 0.047917 62.234383 44.990388 diff --git a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2007_02-Jan-2009.csv b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2007_02-Jan-2009.csv index d95bc3de..002859c9 100644 --- a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2007_02-Jan-2009.csv +++ b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2007_02-Jan-2009.csv @@ -1,6 +1,6 @@ Mean 2.044004 Latitude 49.154000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.096756 0.038163 338.606342 21.272921 SSA 0.000228 0.053920 0.033917 235.865645 33.377970 MSM 0.001310 0.005661 0.023997 316.619286 211.962760 diff --git a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2010_02-Jan-2012.csv b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2010_02-Jan-2012.csv index c27f4d73..940895ee 100644 --- a/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2010_02-Jan-2012.csv +++ b/I_ForcingFiles/OBC/Tofino_harmonics_31-Dec-2010_02-Jan-2012.csv @@ -1,6 +1,6 @@ Mean 2.078962 Latitude 49.154000 -Constituent freq amp (m) amp error phase (deg PST) phase error +Constituent freq amp (m) amp error phase (deg PST) phase error SA 0.000114 0.085188 0.039990 38.768256 25.229569 SSA 0.000228 0.057395 0.034254 355.316889 36.990099 MSM 0.001310 0.032098 0.036210 340.053746 61.410065 diff --git a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv index c8a044ec..9b9d5a2a 100644 --- a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv +++ b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2005_02-Jan-2007.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8615_31-Dec-2004_02-Jan-2006.csv Mean 2.103916 Latitude 49.154000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2005 00:00:00 0.781693 0.975890 31-Dec-2005 01:00:00 0.928991 1.136533 31-Dec-2005 02:00:00 0.803277 0.976670 diff --git a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv index 19dfc323..e14e8408 100644 --- a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv +++ b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2008_02-Jan-2010.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8615_31-Dec-2007_02-Jan-2009.csv Mean 2.044004 Latitude 49.154000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2008 00:00:00 -0.148470 -0.002684 31-Dec-2008 01:00:00 0.405889 0.548158 31-Dec-2008 02:00:00 0.802973 0.959565 diff --git a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv index e347b61e..305e4f57 100644 --- a/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv +++ b/I_ForcingFiles/OBC/Tofino_t_tide_compare8_31-Dec-2011_02-Jan-2013.csv @@ -1,7 +1,7 @@ Harmonics from: wlev_8615_31-Dec-2010_02-Jan-2012.csv Mean 2.078962 Latitude 49.154000 -Time_Local pred_8 pred_all +Time_Local pred_8 pred_all 31-Dec-2011 00:00:00 -0.786502 -0.775651 31-Dec-2011 01:00:00 -0.402299 -0.399936 31-Dec-2011 02:00:00 0.074033 0.074163 diff --git a/I_ForcingFiles/OBC/create_TEOS-10_BCs.py b/I_ForcingFiles/OBC/create_TEOS-10_BCs.py index 097fef6c..4ab4953a 100644 --- a/I_ForcingFiles/OBC/create_TEOS-10_BCs.py +++ b/I_ForcingFiles/OBC/create_TEOS-10_BCs.py @@ -33,66 +33,71 @@ def create_BCs(infile, outfile, title, dS_is_zero=True): shutil.copy(infile, outfile) except shutil.Error: print("Variables in {} will be overwritten.".format(infile)) - F = nc.Dataset(outfile, 'r+') + F = nc.Dataset(outfile, "r+") # Load variables - sal = F.variables['vosaline'] - temp = F.variables['votemper'] + sal = F.variables["vosaline"] + temp = F.variables["votemper"] dep = np.expand_dims( - np.expand_dims( - np.expand_dims(F.variables['deptht'][:], axis=0), - axis=2), - axis=3) + np.zeros(sal.shape) - long = F.variables['nav_lon'][:] + np.zeros(sal[:].shape) - lat = F.variables['nav_lat'][:] + np.zeros(sal[:].shape) + np.expand_dims(np.expand_dims(F.variables["deptht"][:], axis=0), axis=2), axis=3 + ) + np.zeros(sal.shape) + long = F.variables["nav_lon"][:] + np.zeros(sal[:].shape) + lat = F.variables["nav_lat"][:] + np.zeros(sal[:].shape) # Create TEOS-10 stuff - p = gsw_calls.generic_gsw_caller('gsw_p_from_z.m', [-dep, lat]) + p = gsw_calls.generic_gsw_caller("gsw_p_from_z.m", [-dep, lat]) sal_pract = np.copy(sal[:]) if dS_is_zero: - sal_abs = gsw_calls.generic_gsw_caller('gsw_SR_from_SP.m', - [sal_pract, ]) - sal_title = 'Reference Salinity' + sal_abs = gsw_calls.generic_gsw_caller( + "gsw_SR_from_SP.m", + [ + sal_pract, + ], + ) + sal_title = "Reference Salinity" else: - sal_abs = gsw_calls.generic_gsw_caller('gsw_SA_from_SP.m', - [sal_pract, p, long, lat]) - sal_title = 'Absolute Salinity' + sal_abs = gsw_calls.generic_gsw_caller( + "gsw_SA_from_SP.m", [sal_pract, p, long, lat] + ) + sal_title = "Absolute Salinity" temp_pot = np.copy(temp[:]) - temp_cons = gsw_calls.generic_gsw_caller('gsw_CT_from_pt.m', - [sal_abs, temp_pot]) + temp_cons = gsw_calls.generic_gsw_caller("gsw_CT_from_pt.m", [sal_abs, temp_pot]) # Write into netcdf file sal[:] = sal_abs temp[:] = temp_cons # Update variable metadata - sal.setncatts({'units': 'g/kg', - 'long_name': sal_title}) - temp.setncatts({'units': 'deg C', - 'long_name': 'Conservative Temperature'}) + sal.setncatts({"units": "g/kg", "long_name": sal_title}) + temp.setncatts({"units": "deg C", "long_name": "Conservative Temperature"}) # Update file metadata - maybe we should just pass this into function? F.title = title source = F.source - F.source = source + ("\n https://github.com/SalishSeaCast/" - "tools/src/tip/I_ForcingFiles/OBC/" - "Temperature to conservative temperature in" - " boundary conditions.ipynb" - "\n https://github.com/SalishSeaCast/" - "tools/I_ForcingFiles/OBC/" - "create_TEOS-10_BCs.py") + F.source = source + ( + "\n https://github.com/SalishSeaCast/" + "tools/src/tip/I_ForcingFiles/OBC/" + "Temperature to conservative temperature in" + " boundary conditions.ipynb" + "\n https://github.com/SalishSeaCast/" + "tools/I_ForcingFiles/OBC/" + "create_TEOS-10_BCs.py" + ) - F.comment = "Temperature and salinity are TEOS-10 variables:"\ - " Conservative Temperature and {}".format(sal_title) + F.comment = ( + "Temperature and salinity are TEOS-10 variables:" + " Conservative Temperature and {}".format(sal_title) + ) - F.references = "https://github.com/SalishSeaCast/nemo-forcing/src/tip/"\ - "open_boundaries/{}/{}".format(os.path.basename( - os.path.split(outfile)[0]), - os.path.basename(outfile)) + F.references = ( + "https://github.com/SalishSeaCast/nemo-forcing/src/tip/" + "open_boundaries/{}/{}".format( + os.path.basename(os.path.split(outfile)[0]), os.path.basename(outfile) + ) + ) history = F.history - F.history = history + \ - ("\n [{} ] Converted temperature and salinity to Conservative " - "Temperature and " - "{}".format(datetime.datetime.today().strftime('%Y-%m-%d'), - sal_title) - ) + F.history = history + ( + "\n [{} ] Converted temperature and salinity to Conservative " + "Temperature and " + "{}".format(datetime.datetime.today().strftime("%Y-%m-%d"), sal_title) + ) F.close() diff --git a/I_ForcingFiles/OBC/get_tides.m b/I_ForcingFiles/OBC/get_tides.m index 68c8d05d..7ac50af8 100644 --- a/I_ForcingFiles/OBC/get_tides.m +++ b/I_ForcingFiles/OBC/get_tides.m @@ -11,7 +11,7 @@ % KLS November 2013 % Feb 2014: This has been adapted to use the t_tide package. NKS -% This fucntion will save the harmonics data and the predictions in separate files. +% This fucntion will save the harmonics data and the predictions in separate files. %Read in the measured water level data the location fid = fopen(csvfilename); @@ -50,13 +50,13 @@ %Use t_tide to determine harmonic constituents. Needs to be at least one %year time series (366 days) [tidestruc,~] = t_tide(wlev,'start time',start_date(1,1),'latitude',lat); - + %Get predicted tide for same period pred = t_predic(tim,tidestruc,'latitude',lat); %%% Determine latitude somehow from file -%Add mean to the predicted water levels. +%Add mean to the predicted water levels. pred = pred +nanmean(wlev); %Calculate sea level anomaly diff --git a/I_ForcingFiles/OBC/make_readme.py b/I_ForcingFiles/OBC/make_readme.py index 8ce7be31..d41dbbf9 100644 --- a/I_ForcingFiles/OBC/make_readme.py +++ b/I_ForcingFiles/OBC/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'I_ForcingFiles/OBC' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "I_ForcingFiles/OBC" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """This is a collection of Jupyter Notebooks for creating, manipulating, and visualizing open boundary netCDF files. @@ -36,31 +37,28 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) try: - first_cell = contents['worksheets'][0]['cells'][0] + first_cell = contents["worksheets"][0]["cells"][0] except KeyError: - first_cell = contents['cells'][0] - first_cell_type = first_cell['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = first_cell['source'] + first_cell = contents["cells"][0] + first_cell_type = first_cell["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = first_cell["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -71,7 +69,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/I_ForcingFiles/OBC/reshape_BCs.py b/I_ForcingFiles/OBC/reshape_BCs.py index 3d823d0c..cecb6420 100644 --- a/I_ForcingFiles/OBC/reshape_BCs.py +++ b/I_ForcingFiles/OBC/reshape_BCs.py @@ -1,6 +1,7 @@ import netCDF4 as nc import datetime, sys + def reshape_BCs(infile, outfile): """ This script rewrites an unstructured boundary condition file in @@ -16,33 +17,35 @@ def reshape_BCs(infile, outfile): python reshape_BCs.py SalishSea_north_TEOS10.nc SalishSea_north_TEOS10-structured.nc python reshape_BCs.py SalishSea_west_TEOS10.nc SalishSea_west_TEOS10-structured.nc """ - fin = nc.Dataset(infile, 'r') - fout = nc.Dataset(outfile, 'w') + fin = nc.Dataset(infile, "r") + fout = nc.Dataset(outfile, "w") # Copy global attributes for attr in fin.ncattrs(): fout.setncattr(attr, fin.getncattr(attr)) # Copy depth and time dimensions - dim = fin.dimensions['deptht'] + dim = fin.dimensions["deptht"] fout.createDimension(dim.name, dim.size) - dim = fin.dimensions['time_counter'] + dim = fin.dimensions["time_counter"] fout.createDimension(dim.name, None) # Adjust the yb and xbT dimensions - dim = fin.dimensions['yb'] + dim = fin.dimensions["yb"] fout.createDimension(dim.name, 10) - dim = fin.dimensions['xbT'] - fout.createDimension(dim.name, dim.size/10) + dim = fin.dimensions["xbT"] + fout.createDimension(dim.name, dim.size / 10) # Copy variables for k, v in fin.variables.items(): # Skip nbidta, nbjdta, and nbrdta because they are not used - if v.name in ['nbidta', 'nbjdta', 'nbrdta']: + if v.name in ["nbidta", "nbjdta", "nbrdta"]: print("Skipping {} ...".format(v.name)) continue # Create variables - fout.createVariable(v.name, v.datatype, v.dimensions, zlib=True, complevel=4, shuffle=False) + fout.createVariable( + v.name, v.datatype, v.dimensions, zlib=True, complevel=4, shuffle=False + ) # Copy data (reshape implicit here) fout.variables[v.name][:] = fin.variables[v.name][:] # Copy attributes @@ -50,15 +53,20 @@ def reshape_BCs(infile, outfile): fout.variables[v.name].setncattr(attr, v.getncattr(attr)) # Update notes - fout.source += ("\n https://github.com/SalishSeaCast/" - "tools/I_ForcingFiles/OBC/reshape_BCs.py") + fout.source += ( + "\n https://github.com/SalishSeaCast/" "tools/I_ForcingFiles/OBC/reshape_BCs.py" + ) - fout.history += ("\n [{}] Reshaped to structured format and drop " - "variables nbidta, nbjdta, and nbrdta, with compression." - .format(datetime.datetime.today().strftime('%Y-%m-%d'))) + fout.history += ( + "\n [{}] Reshaped to structured format and drop " + "variables nbidta, nbjdta, and nbrdta, with compression.".format( + datetime.datetime.today().strftime("%Y-%m-%d") + ) + ) fin.close() fout.close() + if __name__ == "__main__": reshape_BCs(sys.argv[1], sys.argv[2]) diff --git a/I_ForcingFiles/Rivers/DailyRiverFlows.py b/I_ForcingFiles/Rivers/DailyRiverFlows.py index ac4766e2..383e7eb3 100644 --- a/I_ForcingFiles/Rivers/DailyRiverFlows.py +++ b/I_ForcingFiles/Rivers/DailyRiverFlows.py @@ -1,6 +1,6 @@ -''' +""" Module for calculating daily river flows -''' +""" from pathlib import Path @@ -13,161 +13,201 @@ from salishsea_tools import rivertools from salishsea_tools import river_202108 as rivers -prop_dict_name ='river_202108' -bathy_type = 'b202108' +prop_dict_name = "river_202108" +bathy_type = "b202108" -names = ['bute', 'evi_n', 'jervis', 'evi_s', 'howe', 'jdf', 'skagit', 'puget', 'toba', 'fraser'] + +names = [ + "bute", + "evi_n", + "jervis", + "evi_s", + "howe", + "jdf", + "skagit", + "puget", + "toba", + "fraser", +] watershed_from_river = { - 'bute': {'primary': 2.015}, - 'jervis': {'primary': 8.810, 'secondary': 140.3}, - 'howe': {'primary': 2.276}, - 'jdf': {'primary': 8.501}, - 'evi_n': {'primary': 10.334}, - 'evi_s': {'primary': 24.60}, - 'toba': {'primary': 0.4563, 'secondary': 14.58}, - 'skagit': {'primary': 1.267, 'secondary': 1.236}, - 'puget': {'primary': 8.790, 'secondary': 29.09}, - 'fraser' : {'primary': 1.161, 'secondary': 162, 'nico_into_fraser': 0.83565} + "bute": {"primary": 2.015}, + "jervis": {"primary": 8.810, "secondary": 140.3}, + "howe": {"primary": 2.276}, + "jdf": {"primary": 8.501}, + "evi_n": {"primary": 10.334}, + "evi_s": {"primary": 24.60}, + "toba": {"primary": 0.4563, "secondary": 14.58}, + "skagit": {"primary": 1.267, "secondary": 1.236}, + "puget": {"primary": 8.790, "secondary": 29.09}, + "fraser": {"primary": 1.161, "secondary": 162, "nico_into_fraser": 0.83565}, } rivers_for_watershed = { - 'bute': {'primary': 'Homathko_Mouth', - 'secondary': 'False'}, - 'evi_n': {'primary': 'Salmon_Sayward', - 'secondary': 'False'}, - 'jervis': {'primary': 'Clowhom_ClowhomLake', - 'secondary': 'RobertsCreek'}, - 'evi_s': {'primary': 'Englishman', - 'secondary': 'False'}, - 'howe': {'primary': 'Squamish_Brackendale', - 'secondary': 'False'}, - 'jdf': {'primary': 'SanJuan_PortRenfrew', - 'secondary': 'False'}, - 'skagit': {'primary': 'Skagit_MountVernon', - 'secondary': 'Snohomish_Monroe'}, - 'puget': {'primary': 'Nisqually_McKenna', - 'secondary': 'Greenwater_Greenwater'}, - 'toba': {'primary': 'Homathko_Mouth', - 'secondary': 'Theodosia'}, - 'fraser': {'primary': 'Fraser', - 'secondary': 'Nicomekl_Langley'}, + "bute": {"primary": "Homathko_Mouth", "secondary": "False"}, + "evi_n": {"primary": "Salmon_Sayward", "secondary": "False"}, + "jervis": {"primary": "Clowhom_ClowhomLake", "secondary": "RobertsCreek"}, + "evi_s": {"primary": "Englishman", "secondary": "False"}, + "howe": {"primary": "Squamish_Brackendale", "secondary": "False"}, + "jdf": {"primary": "SanJuan_PortRenfrew", "secondary": "False"}, + "skagit": {"primary": "Skagit_MountVernon", "secondary": "Snohomish_Monroe"}, + "puget": {"primary": "Nisqually_McKenna", "secondary": "Greenwater_Greenwater"}, + "toba": {"primary": "Homathko_Mouth", "secondary": "Theodosia"}, + "fraser": {"primary": "Fraser", "secondary": "Nicomekl_Langley"}, +} +theodosia_from_diversion_only = 1.429 # see TheodosiaWOScotty +matching_dictionary = { + "Englishman": "Salmon_Sayward", + "Theodosia": "Clowhom_ClowhomLake", + "RobertsCreek": "Englishman", + "Salmon_Sayward": "Englishman", + "Squamish_Brackendale": "Homathko_Mouth", + "SanJuan_PortRenfrew": "Englishman", + "Nisqually_McKenna": "Snohomish_Monroe", + "Snohomish_Monroe": "Skagit_MountVernon", + "Skagit_MountVernon": "Snohomish_Monroe", + "Homathko_Mouth": "Squamish_Brackendale", + "Nicomekl_Langley": "RobertsCreek", + "Greenwater_Greenwater": "Snohomish_Monroe", + "Clowhom_ClowhomLake": "Theodosia_Diversion", +} +backup_dictionary = {"SanJuan_PortRenfrew": "RobertsCreek", "Theodosia": "Englishman"} +patching_dictionary = { + "Englishman": ["fit", "persist"], + "Theodosia": ["fit", "backup", "persist"], + "RobertsCreek": ["fit", "persist"], + "Salmon_Sayward": ["fit", "persist"], + "Squamish_Brackendale": ["fit", "persist"], + "SanJuan_PortRenfrew": ["fit", "backup", "persist"], + "Nisqually_McKenna": ["fit", "persist"], + "Snohomish_Monroe": ["fit", "persist"], + "Skagit_MountVernon": ["fit", "persist"], + "Homathko_Mouth": ["fit", "persist"], + "Nicomekl_Langley": ["fit", "persist"], + "Greenwater_Greenwater": ["fit", "persist"], + "Clowhom_ClowhomLake": ["fit", "persist"], +} +persist_until = { + "Englishman": 0, + "Theodosia": 0, + "RobertsCreek": 0, + "Salmon_Sayward": 0, + "Squamish_Brackendale": 0, + "SanJuan_PortRenfrew": 0, + "Nisqually_McKenna": 4, + "Snohomish_Monroe": 0, + "Skagit_MountVernon": 3, + "Homathko_Mouth": 1, + "Nicomekl_Langley": 0, + "Greenwater_Greenwater": 1, + "Clowhom_ClowhomLake": 2, } -theodosia_from_diversion_only = 1.429 # see TheodosiaWOScotty -matching_dictionary = {'Englishman': 'Salmon_Sayward', - 'Theodosia': 'Clowhom_ClowhomLake', - 'RobertsCreek': 'Englishman', - 'Salmon_Sayward': 'Englishman', - 'Squamish_Brackendale': 'Homathko_Mouth', - 'SanJuan_PortRenfrew': 'Englishman', - 'Nisqually_McKenna': 'Snohomish_Monroe', - 'Snohomish_Monroe': 'Skagit_MountVernon', - 'Skagit_MountVernon': 'Snohomish_Monroe', - 'Homathko_Mouth': 'Squamish_Brackendale', - 'Nicomekl_Langley': 'RobertsCreek', - 'Greenwater_Greenwater': 'Snohomish_Monroe', - 'Clowhom_ClowhomLake': 'Theodosia_Diversion'} -backup_dictionary = {'SanJuan_PortRenfrew': 'RobertsCreek', - 'Theodosia': 'Englishman'} -patching_dictionary = {'Englishman': ['fit', 'persist'], - 'Theodosia': ['fit', 'backup', 'persist'], - 'RobertsCreek': ['fit', 'persist'], - 'Salmon_Sayward': ['fit', 'persist'], - 'Squamish_Brackendale': ['fit', 'persist'], - 'SanJuan_PortRenfrew': ['fit', 'backup', 'persist'], - 'Nisqually_McKenna': ['fit', 'persist'], - 'Snohomish_Monroe': ['fit', 'persist'], - 'Skagit_MountVernon': ['fit', 'persist'], - 'Homathko_Mouth': ['fit', 'persist'], - 'Nicomekl_Langley': ['fit', 'persist'], - 'Greenwater_Greenwater': ['fit', 'persist'], - 'Clowhom_ClowhomLake': ['fit', 'persist']} -persist_until = {'Englishman': 0, - 'Theodosia': 0, - 'RobertsCreek': 0, - 'Salmon_Sayward': 0, - 'Squamish_Brackendale': 0, - 'SanJuan_PortRenfrew': 0, - 'Nisqually_McKenna': 4, - 'Snohomish_Monroe': 0, - 'Skagit_MountVernon': 3, - 'Homathko_Mouth': 1, - 'Nicomekl_Langley': 0, - 'Greenwater_Greenwater': 1, - 'Clowhom_ClowhomLake': 2} def get_area(config): - #directory = Path(config["run"]["enabled hosts"]["salish-nowcast"]["grid dir"]) - directory = Path('/home/sallen/MEOPAR/grid/') + # directory = Path(config["run"]["enabled hosts"]["salish-nowcast"]["grid dir"]) + directory = Path("/home/sallen/MEOPAR/grid/") coords_file = directory / config["run types"]["nowcast-green"]["coordinates"] with xr.open_dataset(coords_file, decode_times=False) as fB: - area = fB['e1t'][0,:] * fB['e2t'][0,:] + area = fB["e1t"][0, :] * fB["e2t"][0, :] return area def read_river(river_name, ps, config): """Read daily average discharge data for river_name from river flow file.""" - print (river_name) - filename = Path(config["rivers"]["SOG river files"][river_name.replace('_', '')]) - river_flow = pd.read_csv(filename, header=None, sep='\s+', index_col=False, - names=['year', 'month', 'day', 'flow']) - river_flow['date'] = pd.to_datetime(river_flow.drop(columns='flow')) - river_flow.set_index('date', inplace=True) - river_flow = river_flow.drop(columns=['year', 'month', 'day']) + print(river_name) + filename = Path(config["rivers"]["SOG river files"][river_name.replace("_", "")]) + river_flow = pd.read_csv( + filename, + header=None, + sep="\s+", + index_col=False, + names=["year", "month", "day", "flow"], + ) + river_flow["date"] = pd.to_datetime(river_flow.drop(columns="flow")) + river_flow.set_index("date", inplace=True) + river_flow = river_flow.drop(columns=["year", "month", "day"]) river_flow = river_flow.dropna(axis=0) # remove any rows with nans\n", - if ps == 'primary': - river_flow = river_flow.rename(columns={'flow': 'Primary River Flow'}) - elif ps == 'secondary': - river_flow = river_flow.rename(columns={'flow': 'Secondary River Flow'}) + if ps == "primary": + river_flow = river_flow.rename(columns={"flow": "Primary River Flow"}) + elif ps == "secondary": + river_flow = river_flow.rename(columns={"flow": "Secondary River Flow"}) return river_flow def read_river_Theodosia(config): - part1 = pd.read_csv(config["rivers"]["SOG river files"]["TheodosiaScotty"], header=None, sep='\s+', index_col=False, - names=['year', 'month', 'day', 'flow']) - part2 = pd.read_csv(config["rivers"]["SOG river files"]["TheodosiaBypass"], header=None, sep='\s+', index_col=False, - names=['year', 'month', 'day', 'flow']) - part3 = pd.read_csv(config["rivers"]["SOG river files"]["TheodosiaDiversion"], header=None, sep='\s+', index_col=False, - names=['year', 'month', 'day', 'flow']) + part1 = pd.read_csv( + config["rivers"]["SOG river files"]["TheodosiaScotty"], + header=None, + sep="\s+", + index_col=False, + names=["year", "month", "day", "flow"], + ) + part2 = pd.read_csv( + config["rivers"]["SOG river files"]["TheodosiaBypass"], + header=None, + sep="\s+", + index_col=False, + names=["year", "month", "day", "flow"], + ) + part3 = pd.read_csv( + config["rivers"]["SOG river files"]["TheodosiaDiversion"], + header=None, + sep="\s+", + index_col=False, + names=["year", "month", "day", "flow"], + ) for part in [part1, part2, part3]: - part['date'] = pd.to_datetime(part.drop(columns='flow')) - part.set_index('date', inplace=True) - part.drop(columns=['year', 'month', 'day'], inplace=True) - part1 = part1.rename(columns={'flow': 'Scotty'}) - part2 = part2.rename(columns={'flow': 'Bypass'}) - part3 = part3.rename(columns={'flow': 'Diversion'}) - theodosia = (part3.merge(part2, how='outer', on='date')).merge(part1, how='outer', on='date') - theodosia['Secondary River Flow'] = theodosia['Scotty'] + theodosia['Diversion'] - theodosia['Bypass'] - part3['FlowFromDiversion'] = part3.Diversion * theodosia_from_diversion_only - theodosia = theodosia.merge(part3, how='outer', on='date', sort=True) - print (theodosia) - theodosia['Secondary River Flow'] = theodosia['Secondary River Flow'].fillna( - theodosia['FlowFromDiversion']) - theodosia = theodosia.drop(['Diversion_x', 'Bypass', 'Scotty', 'Diversion_y', - 'FlowFromDiversion'], axis=1) + part["date"] = pd.to_datetime(part.drop(columns="flow")) + part.set_index("date", inplace=True) + part.drop(columns=["year", "month", "day"], inplace=True) + part1 = part1.rename(columns={"flow": "Scotty"}) + part2 = part2.rename(columns={"flow": "Bypass"}) + part3 = part3.rename(columns={"flow": "Diversion"}) + theodosia = (part3.merge(part2, how="outer", on="date")).merge( + part1, how="outer", on="date" + ) + theodosia["Secondary River Flow"] = ( + theodosia["Scotty"] + theodosia["Diversion"] - theodosia["Bypass"] + ) + part3["FlowFromDiversion"] = part3.Diversion * theodosia_from_diversion_only + theodosia = theodosia.merge(part3, how="outer", on="date", sort=True) + print(theodosia) + theodosia["Secondary River Flow"] = theodosia["Secondary River Flow"].fillna( + theodosia["FlowFromDiversion"] + ) + theodosia = theodosia.drop( + ["Diversion_x", "Bypass", "Scotty", "Diversion_y", "FlowFromDiversion"], axis=1 + ) return theodosia def patch_fitting(primary_river, useriver, dateneeded, gap_length, config): bad = False - firstchoice = read_river(useriver, 'primary', config) + firstchoice = read_river(useriver, "primary", config) length = 7 # number of days we use to fit against ratio = 0 - for day in arrow.Arrow.range('day', dateneeded.shift(days=-length-gap_length), - dateneeded.shift(days=-1-gap_length)): + for day in arrow.Arrow.range( + "day", + dateneeded.shift(days=-length - gap_length), + dateneeded.shift(days=-1 - gap_length), + ): numer = primary_river[primary_river.index == str(day.date())].values denom = firstchoice[firstchoice.index == str(day.date())].values if (len(denom) == 1) and (len(numer) == 1): ratio = ratio + numer / denom else: bad = True - + if len(firstchoice[firstchoice.index == str(dateneeded.date())].values) != 1: bad = True - + if not bad: - flux = ratio/length * firstchoice[firstchoice.index == str(dateneeded.date())].values + flux = ( + ratio + / length + * firstchoice[firstchoice.index == str(dateneeded.date())].values + ) else: flux = np.nan return bad, flux @@ -179,33 +219,35 @@ def patch_gaps(name, primary_river, dateneeded, config): # Find the length of gap assuming that the required day is beyond the time series available lastdata = primary_river.iloc[-1] if lastdata.name > dateneeded.naive: - print ('Not working at end of time series, use MakeDailyNCFiles notebook') + print("Not working at end of time series, use MakeDailyNCFiles notebook") stop else: day = dt.datetime(2020, 1, 2) - dt.datetime(2020, 1, 1) gap_length = int((dateneeded.naive - lastdata.name) / day) - print (gap_length) - + print(gap_length) + notfitted = True method = 0 while notfitted: if gap_length > persist_until[name]: fittype = patching_dictionary[name][method] else: - fittype = 'persist' - print (fittype) - if fittype == 'persist': + fittype = "persist" + print(fittype) + if fittype == "persist": flux = lastdata.values notfitted = False else: - if fittype == 'fit': + if fittype == "fit": useriver = matching_dictionary[name] - elif fittype == 'backup': + elif fittype == "backup": useriver = backup_dictionary[name] else: - print ('typo in fit list') + print("typo in fit list") stop - bad, flux = patch_fitting(primary_river, useriver, dateneeded, gap_length, config) + bad, flux = patch_fitting( + primary_river, useriver, dateneeded, gap_length, config + ) if bad: method = method + 1 else: @@ -213,81 +255,101 @@ def patch_gaps(name, primary_river, dateneeded, config): return flux -def do_a_pair(water_shed, watershed_from_river, dateneeded, - primary_river_name, use_secondary, config, - secondary_river_name='Null'): - primary_river = read_river(primary_river_name, 'primary', config) - +def do_a_pair( + water_shed, + watershed_from_river, + dateneeded, + primary_river_name, + use_secondary, + config, + secondary_river_name="Null", +): + primary_river = read_river(primary_river_name, "primary", config) + if len(primary_river[primary_river.index == str(dateneeded.date())]) == 1: - primary_flow = primary_river[primary_river.index == str(dateneeded.date())].values + primary_flow = primary_river[ + primary_river.index == str(dateneeded.date()) + ].values else: - print (primary_river_name, ' need to patch') + print(primary_river_name, " need to patch") primary_flow = patch_gaps(primary_river_name, primary_river, dateneeded, config) - + if use_secondary: if secondary_river_name == "Theodosia": secondary_river = read_river_Theodosia(config) - print (secondary_river) - + print(secondary_river) + else: - secondary_river = read_river(secondary_river_name, 'secondary', config) - + secondary_river = read_river(secondary_river_name, "secondary", config) + if len(secondary_river[secondary_river.index == str(dateneeded.date())]) == 1: - secondary_flow = secondary_river[secondary_river.index == str(dateneeded.date())].values + secondary_flow = secondary_river[ + secondary_river.index == str(dateneeded.date()) + ].values else: - print (secondary_river_name, ' need to patch') - secondary_flow = patch_gaps(secondary_river_name, secondary_river, dateneeded, config) - - watershed_flux = (primary_flow * - watershed_from_river[water_shed]['primary'] - + secondary_flow - * watershed_from_river[water_shed]['secondary']) + print(secondary_river_name, " need to patch") + secondary_flow = patch_gaps( + secondary_river_name, secondary_river, dateneeded, config + ) + + watershed_flux = ( + primary_flow * watershed_from_river[water_shed]["primary"] + + secondary_flow * watershed_from_river[water_shed]["secondary"] + ) else: - watershed_flux = (primary_flow * - watershed_from_river[water_shed]['primary']) + watershed_flux = primary_flow * watershed_from_river[water_shed]["primary"] - return watershed_flux -def do_fraser(watershed_from_river, dateneeded, - primary_river_name, secondary_river_name, config): - primary_river = read_river(primary_river_name, 'primary', config) - +def do_fraser( + watershed_from_river, dateneeded, primary_river_name, secondary_river_name, config +): + primary_river = read_river(primary_river_name, "primary", config) + if len(primary_river[primary_river.index == str(dateneeded.date())]) == 1: good = True - primary_flow = primary_river[primary_river.index == str(dateneeded.date())].values + primary_flow = primary_river[ + primary_river.index == str(dateneeded.date()) + ].values else: good = False - print (primary_river_name, ' need to patch') + print(primary_river_name, " need to patch") lastdata = primary_river.iloc[-1] if lastdata.name > dateneeded.naive: - print ('Not working at end of time series, use MakeDailyNCFiles notebook') + print("Not working at end of time series, use MakeDailyNCFiles notebook") stop else: day = dt.datetime(2020, 1, 2) - dt.datetime(2020, 1, 1) gap_length = int((dateneeded.naive - lastdata.name) / day) - print (gap_length) + print(gap_length) primary_flow = lastdata.values - - secondary_river = read_river(secondary_river_name, 'secondary', config) - + + secondary_river = read_river(secondary_river_name, "secondary", config) + if len(secondary_river[secondary_river.index == str(dateneeded.date())]) == 1: good = True - secondary_flow = secondary_river[secondary_river.index == str(dateneeded.date())].values + secondary_flow = secondary_river[ + secondary_river.index == str(dateneeded.date()) + ].values else: good = False - print (secondary_river_name, ' need to patch') - secondary_flow = patch_gaps(secondary_river_name, secondary_river, dateneeded, config) - - Fraser_flux = (primary_flow * - watershed_from_river['fraser']['primary'] - + secondary_flow - * watershed_from_river['fraser']['secondary'] * - watershed_from_river['fraser']['nico_into_fraser']) - secondary_flux = (secondary_flow * - watershed_from_river['fraser']['secondary'] * - (1 - watershed_from_river['fraser']['nico_into_fraser'])) + print(secondary_river_name, " need to patch") + secondary_flow = patch_gaps( + secondary_river_name, secondary_river, dateneeded, config + ) + + Fraser_flux = ( + primary_flow * watershed_from_river["fraser"]["primary"] + + secondary_flow + * watershed_from_river["fraser"]["secondary"] + * watershed_from_river["fraser"]["nico_into_fraser"] + ) + secondary_flux = ( + secondary_flow + * watershed_from_river["fraser"]["secondary"] + * (1 - watershed_from_river["fraser"]["nico_into_fraser"]) + ) return Fraser_flux, secondary_flux @@ -296,126 +358,146 @@ def calculate_watershed_flows(dateneeded, config): flows = {} for name in names: - print (name) - if rivers_for_watershed[name]['secondary'] == 'False': - print ('no secondary') - flows[name] = do_a_pair(name, watershed_from_river, dateneeded, - rivers_for_watershed[name]['primary'], False, config) - elif name == 'fraser': - flows['Fraser'], flows['nonFraser'] = do_fraser(watershed_from_river, dateneeded, - rivers_for_watershed[name]['primary'], - rivers_for_watershed[name]['secondary'], config) + print(name) + if rivers_for_watershed[name]["secondary"] == "False": + print("no secondary") + flows[name] = do_a_pair( + name, + watershed_from_river, + dateneeded, + rivers_for_watershed[name]["primary"], + False, + config, + ) + elif name == "fraser": + flows["Fraser"], flows["nonFraser"] = do_fraser( + watershed_from_river, + dateneeded, + rivers_for_watershed[name]["primary"], + rivers_for_watershed[name]["secondary"], + config, + ) else: - flows[name] = do_a_pair(name, watershed_from_river, dateneeded, - rivers_for_watershed[name]['primary'], True, config, - rivers_for_watershed[name]['secondary']) - if name == 'fraser': - print (flows['Fraser']) + flows[name] = do_a_pair( + name, + watershed_from_river, + dateneeded, + rivers_for_watershed[name]["primary"], + True, + config, + rivers_for_watershed[name]["secondary"], + ) + if name == "fraser": + print(flows["Fraser"]) else: - print (flows[name]) + print(flows[name]) - print ('files read') + print("files read") return flows def create_runoff_array(flows, horz_area): - - fraserratio = rivers.prop_dict['fraser']['Fraser']['prop'] - + + fraserratio = rivers.prop_dict["fraser"]["Fraser"]["prop"] + runoff = np.zeros((horz_area.shape[0], horz_area.shape[1])) run_depth = np.ones_like(runoff) run_temp = np.empty_like(runoff) - + for name in names: - if name == 'fraser': + if name == "fraser": for key in rivers.prop_dict[name]: if "Fraser" in key: - flux = flows['Fraser'].flatten()[0] + flux = flows["Fraser"].flatten()[0] subarea = fraserratio else: - flux = flows['nonFraser'].flatten()[0] + flux = flows["nonFraser"].flatten()[0] subarea = 1 - fraserratio - river = rivers.prop_dict['fraser'][key] - runoff = rivertools.fill_runoff_array(flux*river['prop']/subarea, river['i'], - river['di'], river['j'], river['dj'], river['depth'], runoff, - run_depth, horz_area)[0] + river = rivers.prop_dict["fraser"][key] + runoff = rivertools.fill_runoff_array( + flux * river["prop"] / subarea, + river["i"], + river["di"], + river["j"], + river["dj"], + river["depth"], + runoff, + run_depth, + horz_area, + )[0] else: flowtoday = flows[name].flatten()[0] - runoff, run_depth, run_temp = rivertools.put_watershed_into_runoff('constant', horz_area, - flowtoday, runoff, run_depth, run_temp, - rivers.prop_dict[name]) + runoff, run_depth, run_temp = rivertools.put_watershed_into_runoff( + "constant", + horz_area, + flowtoday, + runoff, + run_depth, + run_temp, + rivers.prop_dict[name], + ) return runoff def write_file(day, runoff, config): "keep it small and simple, runoff only" - notebook = 'ProductionDailyRiverNCfile.ipynb' + notebook = "ProductionDailyRiverNCfile.ipynb" coords = { - 'x' : range(398), - 'y' : range(898), - 'time_counter' : [0], + "x": range(398), + "y": range(898), + "time_counter": [0], } - var_attrs = {'units': 'kg m-2 s-1', - 'long_name': 'runoff_flux'} - - # set up filename - directory = Path('./ncfiles/') + var_attrs = {"units": "kg m-2 s-1", "long_name": "runoff_flux"} + + # set up filename + directory = Path("./ncfiles/") filename_tmpls = config["rivers"]["file templates"][bathy_type] - + filename = directory / filename_tmpls.format(day.date()) - print (filename) - - netcdf_title = f'Rivers for {day.date()}' + print(filename) + + netcdf_title = f"Rivers for {day.date()}" ds_attrs = { - 'acknowledgements': - 'Based on river fit', - 'creator_email': - 'sallen@eoas.ubc.ca', - 'creator_name': - 'Salish Sea MEOPAR Project Contributors', - 'creator_url': - 'https://salishsea-meopar-docs.readthedocs.org/', - 'institution': - 'UBC EOAS', - 'institution_fullname': ( - 'Earth, Ocean & Atmospheric Sciences,' - ' University of British Columbia' + "acknowledgements": "Based on river fit", + "creator_email": "sallen@eoas.ubc.ca", + "creator_name": "Salish Sea MEOPAR Project Contributors", + "creator_url": "https://salishsea-meopar-docs.readthedocs.org/", + "institution": "UBC EOAS", + "institution_fullname": ( + "Earth, Ocean & Atmospheric Sciences," " University of British Columbia" ), - 'title': netcdf_title, - 'notebook': notebook, - 'rivers_base': prop_dict_name, - 'summary': f'Daily Runoff for Bathymetry 202108', - 'history': ( - '[{}] File creation.' - .format(dt.datetime.today().strftime('%Y-%m-%d')) - ) - } + "title": netcdf_title, + "notebook": notebook, + "rivers_base": prop_dict_name, + "summary": f"Daily Runoff for Bathymetry 202108", + "history": ( + "[{}] File creation.".format(dt.datetime.today().strftime("%Y-%m-%d")) + ), + } runoffs = np.empty((1, runoff.shape[0], runoff.shape[1])) runoffs[0] = runoff da = xr.DataArray( - data = runoffs, - name='rorunoff', - dims=('time_counter', 'y', 'x'), - coords = coords, - attrs = var_attrs) - - - ds = xr.Dataset( - data_vars={ - 'rorunoff': da}, - coords = coords, - attrs = ds_attrs - ) - - encoding = {var: {'zlib': True} for var in ds.data_vars} + data=runoffs, + name="rorunoff", + dims=("time_counter", "y", "x"), + coords=coords, + attrs=var_attrs, + ) + + ds = xr.Dataset(data_vars={"rorunoff": da}, coords=coords, attrs=ds_attrs) + + encoding = {var: {"zlib": True} for var in ds.data_vars} + + ds.to_netcdf( + filename, + unlimited_dims=("time_counter"), + encoding=encoding, + ) + - ds.to_netcdf(filename, unlimited_dims=('time_counter'), - encoding=encoding,) - - def make_runoff_files(dateneeded, config): flows = calculate_watershed_flows(dateneeded, config) horz_area = get_area(config) diff --git a/I_ForcingFiles/Rivers/make_readme.py b/I_ForcingFiles/Rivers/make_readme.py index 5c3a5e57..7eaf327e 100644 --- a/I_ForcingFiles/Rivers/make_readme.py +++ b/I_ForcingFiles/Rivers/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'I_ForcingFiles/Rivers' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "I_ForcingFiles/Rivers" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """This is a collection of Jupyter Notebooks for creating, manipulating, and visualizing netCDF files to do with Rivers. @@ -36,27 +37,24 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) - first_cell_type = contents['worksheets'][0]['cells'][0]['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = contents['worksheets'][0]['cells'][0]['source'] + first_cell_type = contents["worksheets"][0]["cells"][0]["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = contents["worksheets"][0]["cells"][0]["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -67,7 +65,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/I_ForcingFiles/Rivers/todo.list b/I_ForcingFiles/Rivers/todo.list index 3884548b..41d598ce 100644 --- a/I_ForcingFiles/Rivers/todo.list +++ b/I_ForcingFiles/Rivers/todo.list @@ -1 +1 @@ -redo river worker to create daily river files for both bathy2 and bathy6 \ No newline at end of file +redo river worker to create daily river files for both bathy2 and bathy6 diff --git a/I_ForcingFiles/Tides/Prepare Tide Files.ipynb b/I_ForcingFiles/Tides/Prepare Tide Files.ipynb index 30169978..b1d7efaf 100644 --- a/I_ForcingFiles/Tides/Prepare Tide Files.ipynb +++ b/I_ForcingFiles/Tides/Prepare Tide Files.ipynb @@ -577,4 +577,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/I_ForcingFiles/Tides/make_readme.py b/I_ForcingFiles/Tides/make_readme.py index 54272116..2f80f940 100644 --- a/I_ForcingFiles/Tides/make_readme.py +++ b/I_ForcingFiles/Tides/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'I_ForcingFiles/Tides' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "I_ForcingFiles/Tides" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """This is a collection of Jupyter Notebooks for creating, manipulating, and visualizing tidal forcing netCDF files. @@ -36,27 +37,24 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) - first_cell_type = contents['worksheets'][0]['cells'][0]['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = contents['worksheets'][0]['cells'][0]['source'] + first_cell_type = contents["worksheets"][0]["cells"][0]["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = contents["worksheets"][0]["cells"][0]["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -67,7 +65,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/I_ForcingFiles/Tides/webtide_tools.py b/I_ForcingFiles/Tides/webtide_tools.py index 797c25ae..a2828408 100644 --- a/I_ForcingFiles/Tides/webtide_tools.py +++ b/I_ForcingFiles/Tides/webtide_tools.py @@ -17,12 +17,19 @@ """ -def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, - Tfilename='Tidal Elevation Constituents T.csv', - Ufilename='Tidal Current Constituents U.csv', - Vfilename='Tidal Current Constituents V.csv', inorth=0): - """Get the constituent data from the csv file. - """ +def get_data_from_csv( + tidevar, + constituent, + depth, + CFactor, + ibmin, + ibmax, + Tfilename="Tidal Elevation Constituents T.csv", + Ufilename="Tidal Current Constituents U.csv", + Vfilename="Tidal Current Constituents V.csv", + inorth=0, +): + """Get the constituent data from the csv file.""" import pandas as pd from math import radians import numpy @@ -32,55 +39,58 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, # correction factors base = constituent - corr = 1 # if not otherwise set - corr_shift = 0 # if not otherwise set + corr = 1 # if not otherwise set + corr_shift = 0 # if not otherwise set if constituent == "M2": - corr_pha = CFactor['A2 Phase'] - corr_amp = CFactor['A2 Amp'] - corr = CFactor['A2 Flux'] - corr_shift = CFactor['A2 Shift'] + corr_pha = CFactor["A2 Phase"] + corr_amp = CFactor["A2 Amp"] + corr = CFactor["A2 Flux"] + corr_shift = CFactor["A2 Shift"] elif constituent == "S2": - corr_pha = CFactor['A2 Phase'] + CFactor['S2 Phase'] - corr_amp = CFactor['A2 Amp'] * CFactor['S2 Amp'] - corr = CFactor['A2 Flux'] - corr_shift = CFactor['A2 Shift'] + corr_pha = CFactor["A2 Phase"] + CFactor["S2 Phase"] + corr_amp = CFactor["A2 Amp"] * CFactor["S2 Amp"] + corr = CFactor["A2 Flux"] + corr_shift = CFactor["A2 Shift"] elif constituent == "N2": - corr_pha = CFactor['A2 Phase'] + CFactor['N2 Phase'] - corr_amp = CFactor['A2 Amp'] * CFactor['N2 Amp'] - corr = CFactor['A2 Flux'] - corr_shift = CFactor['A2 Shift'] + corr_pha = CFactor["A2 Phase"] + CFactor["N2 Phase"] + corr_amp = CFactor["A2 Amp"] * CFactor["N2 Amp"] + corr = CFactor["A2 Flux"] + corr_shift = CFactor["A2 Shift"] elif constituent == "K2": # based on S2 base = "S2" - corr_pha = CFactor['A2 Phase'] + CFactor['S2 Phase'] - corr_amp = CFactor['A2 Amp'] * CFactor['S2 Amp'] - corr = CFactor['A2 Flux'] - corr_shift = CFactor['A2 Shift'] + corr_pha = CFactor["A2 Phase"] + CFactor["S2 Phase"] + corr_amp = CFactor["A2 Amp"] * CFactor["S2 Amp"] + corr = CFactor["A2 Flux"] + corr_shift = CFactor["A2 Shift"] elif constituent == "K1": - corr_pha = CFactor['A1 Phase'] - corr_amp = CFactor['A1 Amp'] + corr_pha = CFactor["A1 Phase"] + corr_amp = CFactor["A1 Amp"] elif constituent == "O1": - corr_pha = CFactor['A1 Phase'] + CFactor['O1 Phase'] - corr_amp = CFactor['A1 Amp'] * CFactor['O1 Amp'] + corr_pha = CFactor["A1 Phase"] + CFactor["O1 Phase"] + corr_amp = CFactor["A1 Amp"] * CFactor["O1 Amp"] elif constituent == "P1": # based on K1 base = "K1" - corr_pha = CFactor['A1 Phase'] - corr_amp = CFactor['A1 Amp'] + corr_pha = CFactor["A1 Phase"] + corr_amp = CFactor["A1 Amp"] elif constituent == "Q1": - corr_pha = CFactor['A1 Phase'] + CFactor['Q1 Phase'] - corr_amp = CFactor['A1 Amp'] * CFactor['Q1 Amp'] + corr_pha = CFactor["A1 Phase"] + CFactor["Q1 Phase"] + corr_amp = CFactor["A1 Amp"] * CFactor["Q1 Amp"] # WATER LEVEL ELEVATION - if tidevar == 'T': - webtide = pd.read_csv(Tfilename, - skiprows=2) - webtide = webtide.rename(columns={'Constituent': 'const', - 'Longitude': 'lon', - 'Latitude': 'lat', - 'Amplitude (m)': 'amp', - 'Phase (deg GMT)': 'pha'}) + if tidevar == "T": + webtide = pd.read_csv(Tfilename, skiprows=2) + webtide = webtide.rename( + columns={ + "Constituent": "const", + "Longitude": "lon", + "Latitude": "lat", + "Amplitude (m)": "amp", + "Phase (deg GMT)": "pha", + } + ) # number of points from webtide - nwebtide = int(webtide.shape[0]/8.) + nwebtide = int(webtide.shape[0] / 8.0) # how long is the boundary? boundlen = ibmax - ibmin @@ -96,16 +106,20 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, # allocate the M2 phase and amplitude from Webtide # to the boundary cells # (CHECK: Are these allocated in the right order?) - amp_W[gap:boundlen-inorth, 0] = webtide[webtide.const == (base + ':')].amp * corr_amp + amp_W[gap : boundlen - inorth, 0] = ( + webtide[webtide.const == (base + ":")].amp * corr_amp + ) amp_W[0:gap, 0] = amp_W[gap, 0] - amp_W[boundlen-inorth:, 0] = amp_W[boundlen-inorth-1,0] + amp_W[boundlen - inorth :, 0] = amp_W[boundlen - inorth - 1, 0] - pha_W[gap:boundlen-inorth, 0] = webtide[webtide.const == (base + ':')].pha + corr_pha + pha_W[gap : boundlen - inorth, 0] = ( + webtide[webtide.const == (base + ":")].pha + corr_pha + ) pha_W[0:gap, 0] = pha_W[gap, 0] - pha_W[boundlen-inorth:, 0] = pha_W[boundlen-inorth-1, 0] + pha_W[boundlen - inorth :, 0] = pha_W[boundlen - inorth - 1, 0] if constituent == "K1" or constituent == "M2": - print (constituent, "eta") + print(constituent, "eta") if constituent == "P1": amp_W = amp_W * 0.310 @@ -118,20 +132,23 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, Z1 = amp_W * numpy.cos(numpy.radians(pha_W)) Z2 = amp_W * numpy.sin(numpy.radians(pha_W)) - #U VELOCITY - if tidevar == 'U': - webtide = pd.read_csv(Ufilename, - skiprows=2) - webtide = webtide.rename(columns={'Constituent': 'const', - 'Longitude': 'lon', - 'Latitude': 'lat', - 'U Amplitude (m)': 'ewamp', - 'U Phase (deg GMT)': 'ewpha', - 'V Amplitude (m)': 'nsamp', - 'V Phase (deg GMT)': 'nspha'}) + # U VELOCITY + if tidevar == "U": + webtide = pd.read_csv(Ufilename, skiprows=2) + webtide = webtide.rename( + columns={ + "Constituent": "const", + "Longitude": "lon", + "Latitude": "lat", + "U Amplitude (m)": "ewamp", + "U Phase (deg GMT)": "ewpha", + "V Amplitude (m)": "nsamp", + "V Phase (deg GMT)": "nspha", + } + ) # number of points from webtide - nwebtide = int(webtide.shape[0]/8.) + nwebtide = int(webtide.shape[0] / 8.0) # how long is the boundary? boundlen = ibmax - ibmin gap = boundlen - nwebtide - inorth @@ -139,21 +156,29 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, # Convert amplitudes from north/south u/v into grid co-ordinates # Convert phase from north/south into grid co-ordinates (see docs/tides/tides_data_acquisition for details) - ua_ugrid = numpy.array(webtide[webtide.const == (base + ':')].ewamp) * corr - va_ugrid = numpy.array(webtide[webtide.const == (base + ':')].nsamp) * corr - uphi_ugrid = numpy.radians(numpy.array(webtide[webtide.const == (base + ':')].ewpha)) - vphi_ugrid = numpy.radians(numpy.array(webtide[webtide.const == (base + ':')].nspha)) - - uZ1 = (ua_ugrid * numpy.cos(theta) * numpy.cos(uphi_ugrid) - - va_ugrid * numpy.sin(theta) * numpy.sin(vphi_ugrid)) - uZ2 = (ua_ugrid * numpy.cos(theta) * numpy.sin(uphi_ugrid) + - va_ugrid * numpy.sin(theta) * numpy.cos(vphi_ugrid)) + ua_ugrid = numpy.array(webtide[webtide.const == (base + ":")].ewamp) * corr + va_ugrid = numpy.array(webtide[webtide.const == (base + ":")].nsamp) * corr + uphi_ugrid = numpy.radians( + numpy.array(webtide[webtide.const == (base + ":")].ewpha) + ) + vphi_ugrid = numpy.radians( + numpy.array(webtide[webtide.const == (base + ":")].nspha) + ) + + uZ1 = ua_ugrid * numpy.cos(theta) * numpy.cos( + uphi_ugrid + ) - va_ugrid * numpy.sin(theta) * numpy.sin(vphi_ugrid) + uZ2 = ua_ugrid * numpy.cos(theta) * numpy.sin( + uphi_ugrid + ) + va_ugrid * numpy.sin(theta) * numpy.cos(vphi_ugrid) # adjustments for phase correction - amp = numpy.sqrt(uZ1[:]**2 + uZ2[:]**2) + amp = numpy.sqrt(uZ1[:] ** 2 + uZ2[:] ** 2) pha = [] for i in range(0, len(amp)): - pha.append(math.atan2(uZ2[i], uZ1[i]) + numpy.radians(corr_pha + corr_shift)) + pha.append( + math.atan2(uZ2[i], uZ1[i]) + numpy.radians(corr_pha + corr_shift) + ) if constituent == "P1": amp = amp * 0.310 @@ -168,48 +193,65 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, # find the boundary I = numpy.arange(ibmin, ibmax) - #allocate the z1 and z2 I calculated from Webtide to the boundary cells - #along western boundary, etaZ1 and etaZ2 are 0 in masked cells - #(CHECK: Are these allocated in the right order?) - Z1 = numpy.zeros((boundlen,1)) - Z2 = numpy.zeros((boundlen,1)) - Z1[gap:boundlen-inorth,0] = uZ1 - Z2[gap:boundlen-inorth,0] = uZ2 - - Z1[0:gap,0] = Z1[gap, 0] - Z2[0:gap,0] = Z2[gap, 0] - - Z1[boundlen-inorth:, 0] = Z1[boundlen-inorth-1, 0] - Z2[boundlen-inorth:, 0] = Z2[boundlen-inorth-1, 0] - - #V VELOCITY - if tidevar == 'V': - webtide = pd.read_csv(Vfilename,\ - skiprows = 2) - webtide = webtide.rename(columns={'Constituent': 'const', 'Longitude': 'lon', 'Latitude': 'lat', \ - 'U Amplitude (m)': 'ewamp', 'U Phase (deg GMT)': 'ewpha',\ - 'V Amplitude (m)': 'nsamp', 'V Phase (deg GMT)': 'nspha'}) + # allocate the z1 and z2 I calculated from Webtide to the boundary cells + # along western boundary, etaZ1 and etaZ2 are 0 in masked cells + # (CHECK: Are these allocated in the right order?) + Z1 = numpy.zeros((boundlen, 1)) + Z2 = numpy.zeros((boundlen, 1)) + Z1[gap : boundlen - inorth, 0] = uZ1 + Z2[gap : boundlen - inorth, 0] = uZ2 + + Z1[0:gap, 0] = Z1[gap, 0] + Z2[0:gap, 0] = Z2[gap, 0] + + Z1[boundlen - inorth :, 0] = Z1[boundlen - inorth - 1, 0] + Z2[boundlen - inorth :, 0] = Z2[boundlen - inorth - 1, 0] + + # V VELOCITY + if tidevar == "V": + webtide = pd.read_csv(Vfilename, skiprows=2) + webtide = webtide.rename( + columns={ + "Constituent": "const", + "Longitude": "lon", + "Latitude": "lat", + "U Amplitude (m)": "ewamp", + "U Phase (deg GMT)": "ewpha", + "V Amplitude (m)": "nsamp", + "V Phase (deg GMT)": "nspha", + } + ) # number of points from webtide - nwebtide = int(webtide.shape[0]/8.) + nwebtide = int(webtide.shape[0] / 8.0) # how long is the boundary? boundlen = ibmax - ibmin gap = boundlen - nwebtide - inorth - #Convert phase from north/south into grid co-ordinates (see docs/tides/tides_data_acquisition for details) - ua_vgrid = numpy.array(webtide[webtide.const==(base+':')].ewamp)*corr - va_vgrid = numpy.array(webtide[webtide.const==(base+':')].nsamp)*corr - uphi_vgrid = numpy.radians(numpy.array(webtide[webtide.const==(base+':')].ewpha)) - vphi_vgrid = numpy.radians(numpy.array(webtide[webtide.const==(base+':')].nspha)) - - vZ1 = -ua_vgrid*numpy.sin(theta)*numpy.cos(uphi_vgrid) - va_vgrid*numpy.cos(theta)*numpy.sin(vphi_vgrid) - vZ2 = -ua_vgrid*numpy.sin(theta)*numpy.sin(uphi_vgrid) + va_vgrid*numpy.cos(theta)*numpy.cos(vphi_vgrid) + # Convert phase from north/south into grid co-ordinates (see docs/tides/tides_data_acquisition for details) + ua_vgrid = numpy.array(webtide[webtide.const == (base + ":")].ewamp) * corr + va_vgrid = numpy.array(webtide[webtide.const == (base + ":")].nsamp) * corr + uphi_vgrid = numpy.radians( + numpy.array(webtide[webtide.const == (base + ":")].ewpha) + ) + vphi_vgrid = numpy.radians( + numpy.array(webtide[webtide.const == (base + ":")].nspha) + ) + + vZ1 = -ua_vgrid * numpy.sin(theta) * numpy.cos( + uphi_vgrid + ) - va_vgrid * numpy.cos(theta) * numpy.sin(vphi_vgrid) + vZ2 = -ua_vgrid * numpy.sin(theta) * numpy.sin( + uphi_vgrid + ) + va_vgrid * numpy.cos(theta) * numpy.cos(vphi_vgrid) # adjustments for phase correction - amp = numpy.sqrt(vZ1[:]**2 + vZ2[:]**2); - pha=[] - for i in range(0,len(amp)): - pha.append(math.atan2(vZ2[i],vZ1[i])+numpy.radians(corr_pha+corr_shift)) + amp = numpy.sqrt(vZ1[:] ** 2 + vZ2[:] ** 2) + pha = [] + for i in range(0, len(amp)): + pha.append( + math.atan2(vZ2[i], vZ1[i]) + numpy.radians(corr_pha + corr_shift) + ) if constituent == "P1": amp = amp * 0.310 @@ -218,81 +260,99 @@ def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax, amp = amp * 0.235 pha[:] = [phase - numpy.radians(5.7) for phase in pha] - vZ1 = amp*numpy.cos(pha)*corr_amp - vZ2 = amp*numpy.sin(pha)*corr_amp + vZ1 = amp * numpy.cos(pha) * corr_amp + vZ2 = amp * numpy.sin(pha) * corr_amp - #find the boundary + # find the boundary I = numpy.arange(ibmin, ibmax) - #allocate the z1 and z2 I calculated from Webtide to the boundary cells - #along western boundary, etaZ1 and etaZ2 are 0 in masked cells - #(CHECK: Are these allocated in the right order?) - Z1 = numpy.zeros((boundlen,1)) - Z2 = numpy.zeros((boundlen,1)) - Z1[gap:boundlen-inorth, 0] = vZ1 - Z2[gap:boundlen-inorth ,0] = vZ2 - Z1[0:gap,0] = Z1[gap, 0] - Z2[0:gap,0] = Z2[gap, 0] - - Z1[boundlen-inorth:, 0] = Z1[boundlen-inorth-1, 0] - Z2[boundlen-inorth:, 0] = Z2[boundlen-inorth-1, 0] + # allocate the z1 and z2 I calculated from Webtide to the boundary cells + # along western boundary, etaZ1 and etaZ2 are 0 in masked cells + # (CHECK: Are these allocated in the right order?) + Z1 = numpy.zeros((boundlen, 1)) + Z2 = numpy.zeros((boundlen, 1)) + Z1[gap : boundlen - inorth, 0] = vZ1 + Z2[gap : boundlen - inorth, 0] = vZ2 + Z1[0:gap, 0] = Z1[gap, 0] + Z2[0:gap, 0] = Z2[gap, 0] + Z1[boundlen - inorth :, 0] = Z1[boundlen - inorth - 1, 0] + Z2[boundlen - inorth :, 0] = Z2[boundlen - inorth - 1, 0] return Z1, Z2, I, boundlen -#Define a function that creates Netcdf files from the following information + +# Define a function that creates Netcdf files from the following information # - choose variable (elevation ('T'), u ('U') or v ('V')) # - choose constituent ('O1', 'P1', 'Q1', 'K1', 'K2', 'N2', 'M2', 'S2') # - give z1 and z2 data # - depth data -def create_tide_netcdf(tidevar, constituent, depth, number, code, CFactors, - ibmin, ibmax, - Tfilename='Tidal Elevation Constituents T.csv', - Ufilename='Tidal Current Constituents U.csv', - Vfilename='Tidal Current Constituents V.csv', inorth=0 +def create_tide_netcdf( + tidevar, + constituent, + depth, + number, + code, + CFactors, + ibmin, + ibmax, + Tfilename="Tidal Elevation Constituents T.csv", + Ufilename="Tidal Current Constituents U.csv", + Vfilename="Tidal Current Constituents V.csv", + inorth=0, ): import netCDF4 as NC import numpy # get the data from the csv file Z1, Z2, I, boundlen = get_data_from_csv( - tidevar, constituent, depth, CFactors, ibmin, ibmax, - Tfilename, Ufilename, Vfilename, inorth) + tidevar, + constituent, + depth, + CFactors, + ibmin, + ibmax, + Tfilename, + Ufilename, + Vfilename, + inorth, + ) if len(number) == 1: - name = 'SalishSea' + number + name = "SalishSea" + number else: name = number nemo = NC.Dataset( - name+'_'+code+'_west_tide_'+constituent+'_grid_'+tidevar+'.nc', - 'w') - nemo.description = 'Tide data from WebTide' + name + "_" + code + "_west_tide_" + constituent + "_grid_" + tidevar + ".nc", + "w", + ) + nemo.description = "Tide data from WebTide" # give the netcdf some dimensions - nemo.createDimension('xb', boundlen) - nemo.createDimension('yb', 1) + nemo.createDimension("xb", boundlen) + nemo.createDimension("yb", 1) # add in the counter around the boundary # (taken from Susan's code in Prepare Tide Files) - xb = nemo.createVariable('xb', 'int32', ('xb',), zlib=True) - xb.units = 'non dim' - xb.longname = 'counter around boundary' - yb = nemo.createVariable('yb', 'int32', ('yb',), zlib=True) - yb.units = 'non dim' + xb = nemo.createVariable("xb", "int32", ("xb",), zlib=True) + xb.units = "non dim" + xb.longname = "counter around boundary" + yb = nemo.createVariable("yb", "int32", ("yb",), zlib=True) + yb.units = "non dim" xb[:] = I[:] yb[0] = 1 # create i and j grid position - nbidta = nemo.createVariable('nbidta', 'int32', ('yb', 'xb'), zlib=True) - nbidta.units = 'non dim' - nbidta.longname = 'i grid position' - nbjdta = nemo.createVariable('nbjdta', 'int32', ('yb', 'xb'), zlib=True) - nbjdta.units = 'non dim' - nbjdta.longname = 'j grid position' - nbrdta = nemo.createVariable('nbrdta', 'int32', ('yb', 'xb'), zlib=True) - nbrdta.units = 'non dim' + nbidta = nemo.createVariable("nbidta", "int32", ("yb", "xb"), zlib=True) + nbidta.units = "non dim" + nbidta.longname = "i grid position" + nbjdta = nemo.createVariable("nbjdta", "int32", ("yb", "xb"), zlib=True) + nbjdta.units = "non dim" + nbjdta.longname = "j grid position" + nbrdta = nemo.createVariable("nbrdta", "int32", ("yb", "xb"), zlib=True) + nbrdta.units = "non dim" # give values for West Boundary (this is where the webtide points go) nbidta[:] = 1 @@ -301,33 +361,33 @@ def create_tide_netcdf(tidevar, constituent, depth, number, code, CFactors, # give values for the corner nbrdta[:] = 1 - if tidevar == 'T': - z1 = nemo.createVariable('z1', 'float32', ('yb', 'xb'), zlib=True) - z1.units = 'm' - z1.longname = 'tidal elevation: cosine' - z2 = nemo.createVariable('z2', 'float32', ('yb', 'xb'), zlib=True) - z2.units = 'm' - z2.longname = 'tidal elevation: sine' + if tidevar == "T": + z1 = nemo.createVariable("z1", "float32", ("yb", "xb"), zlib=True) + z1.units = "m" + z1.longname = "tidal elevation: cosine" + z2 = nemo.createVariable("z2", "float32", ("yb", "xb"), zlib=True) + z2.units = "m" + z2.longname = "tidal elevation: sine" z1[0, 0:boundlen] = Z1[:, 0] z2[0, 0:boundlen] = Z2[:, 0] - if tidevar == 'U': - u1 = nemo.createVariable('u1', 'float32', ('yb', 'xb'), zlib=True) - u1.units = 'm' - u1.longname = 'tidal x-velocity: cosine' - u2 = nemo.createVariable('u2', 'float32', ('yb', 'xb'), zlib=True) - u2.units = 'm' - u2.longname = 'tidal x-velocity: sine' + if tidevar == "U": + u1 = nemo.createVariable("u1", "float32", ("yb", "xb"), zlib=True) + u1.units = "m" + u1.longname = "tidal x-velocity: cosine" + u2 = nemo.createVariable("u2", "float32", ("yb", "xb"), zlib=True) + u2.units = "m" + u2.longname = "tidal x-velocity: sine" u1[0, 0:boundlen] = Z1[:, 0] u2[0, 0:boundlen] = Z2[:, 0] - if tidevar == 'V': - v1 = nemo.createVariable('v1', 'float32', ('yb', 'xb'), zlib=True) - v1.units = 'm' - v1.longname = 'tidal y-velocity: cosine' - v2 = nemo.createVariable('v2', 'float32', ('yb', 'xb'), zlib=True) - v2.units = 'm' - v2.longname = 'tidal y-velocity: sine' + if tidevar == "V": + v1 = nemo.createVariable("v1", "float32", ("yb", "xb"), zlib=True) + v1.units = "m" + v1.longname = "tidal y-velocity: cosine" + v2 = nemo.createVariable("v2", "float32", ("yb", "xb"), zlib=True) + v2.units = "m" + v2.longname = "tidal y-velocity: sine" v1[0, 0:boundlen] = Z1[:, 0] v2[0, 0:boundlen] = Z2[:, 0] @@ -335,180 +395,190 @@ def create_tide_netcdf(tidevar, constituent, depth, number, code, CFactors, nemo.close() -def create_northern_tides(Z1,Z2,tidevar,constituent,code, name='SalishSea2'): +def create_northern_tides(Z1, Z2, tidevar, constituent, code, name="SalishSea2"): import netCDF4 as NC import numpy as np from salishsea_tools import nc_tools - nemo = NC.Dataset(name+'_'+code+'_North_tide_'+constituent+'_grid_'+tidevar+'.nc', 'w', zlib=True) + nemo = NC.Dataset( + name + "_" + code + "_North_tide_" + constituent + "_grid_" + tidevar + ".nc", + "w", + zlib=True, + ) - #start and end points + # start and end points starti = 32 endi = 62 - lengthi = endi-starti + lengthi = endi - starti # dataset attributes nc_tools.init_dataset_attrs( nemo, - title='Tidal Boundary Conditions for Northern Boundary', - notebook_name='johnstone_strait_tides', - nc_filepath='../../../nemo-forcing/open_boundaries/north/SalishSea2_North_tide_'+constituent+'_grid_'+tidevar+'.nc', - comment='Tidal current and amplitude data from Thomson & Huggett 1980') + title="Tidal Boundary Conditions for Northern Boundary", + notebook_name="johnstone_strait_tides", + nc_filepath="../../../nemo-forcing/open_boundaries/north/SalishSea2_North_tide_" + + constituent + + "_grid_" + + tidevar + + ".nc", + comment="Tidal current and amplitude data from Thomson & Huggett 1980", + ) # dimensions (only need x and y, don't need depth or time_counter) - nemo.createDimension('xb', lengthi) - nemo.createDimension('yb', 1) + nemo.createDimension("xb", lengthi) + nemo.createDimension("yb", 1) # variables # nbidta, ndjdta, ndrdta - nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb')) - nbidta.long_name = 'i grid position' + nbidta = nemo.createVariable("nbidta", "int32", ("yb", "xb")) + nbidta.long_name = "i grid position" nbidta.units = 1 - nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb')) - nbjdta.long_name = 'j grid position' + nbjdta = nemo.createVariable("nbjdta", "int32", ("yb", "xb")) + nbjdta.long_name = "j grid position" nbjdta.units = 1 - nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb')) - nbrdta.long_name = 'position from boundary' + nbrdta = nemo.createVariable("nbrdta", "int32", ("yb", "xb")) + nbrdta.long_name = "position from boundary" nbrdta.units = 1 # add in the counter around the boundary (taken from Susan's code in Prepare Tide Files) - xb = nemo.createVariable('xb', 'int32', ('xb',),zlib=True) - xb.units = 'non dim' - xb.long_name = 'counter around boundary' - yb = nemo.createVariable('yb', 'int32', ('yb',),zlib=True) - yb.units = 'non dim' - yb.long_name = 'counter along boundary' + xb = nemo.createVariable("xb", "int32", ("xb",), zlib=True) + xb.units = "non dim" + xb.long_name = "counter around boundary" + yb = nemo.createVariable("yb", "int32", ("yb",), zlib=True) + yb.units = "non dim" + yb.long_name = "counter along boundary" yb[0] = 897 - xb[:] = np.arange(starti,endi) + xb[:] = np.arange(starti, endi) # values # nbidta, nbjdta - nbidta[:] = np.arange(starti,endi) + nbidta[:] = np.arange(starti, endi) nbjdta[:] = 897 nbrdta[:] = 1 - if tidevar=='T': - z1 = nemo.createVariable('z1','float32',('yb','xb'),zlib=True) - z1.units = 'm' - z1.long_name = 'tidal elevation: cosine' - z2 = nemo.createVariable('z2','float32',('yb','xb'),zlib=True) - z2.units = 'm' - z2.long_name = 'tidal elevation: sine' - z1[0,:] = np.array([Z1]*lengthi) - z2[0,:] = np.array([Z2]*lengthi) - - if tidevar=='U': - u1 = nemo.createVariable('u1','float32',('yb','xb'),zlib=True) - u1.units = 'm' - u1.long_name = 'tidal x-velocity: cosine' - u2 = nemo.createVariable('u2','float32',('yb','xb'),zlib=True) - u2.units = 'm' - u2.long_name = 'tidal x-velocity: sine' - u1[0,0:lengthi] = Z1[:,0] - u2[0,0:lengthi] = Z2[:,0] - - if tidevar=='V': - v1 = nemo.createVariable('v1','float32',('yb','xb'),zlib=True) - v1.units = 'm' - v1.long_name = 'tidal y-velocity: cosine' - v2 = nemo.createVariable('v2','float32',('yb','xb'),zlib=True) - v2.units = 'm' - v2.long_name = 'tidal y-velocity: sine' - v1[0,0:lengthi] = Z1[:,0] - v2[0,0:lengthi] = Z2[:,0] + if tidevar == "T": + z1 = nemo.createVariable("z1", "float32", ("yb", "xb"), zlib=True) + z1.units = "m" + z1.long_name = "tidal elevation: cosine" + z2 = nemo.createVariable("z2", "float32", ("yb", "xb"), zlib=True) + z2.units = "m" + z2.long_name = "tidal elevation: sine" + z1[0, :] = np.array([Z1] * lengthi) + z2[0, :] = np.array([Z2] * lengthi) + + if tidevar == "U": + u1 = nemo.createVariable("u1", "float32", ("yb", "xb"), zlib=True) + u1.units = "m" + u1.long_name = "tidal x-velocity: cosine" + u2 = nemo.createVariable("u2", "float32", ("yb", "xb"), zlib=True) + u2.units = "m" + u2.long_name = "tidal x-velocity: sine" + u1[0, 0:lengthi] = Z1[:, 0] + u2[0, 0:lengthi] = Z2[:, 0] + + if tidevar == "V": + v1 = nemo.createVariable("v1", "float32", ("yb", "xb"), zlib=True) + v1.units = "m" + v1.long_name = "tidal y-velocity: cosine" + v2 = nemo.createVariable("v2", "float32", ("yb", "xb"), zlib=True) + v2.units = "m" + v2.long_name = "tidal y-velocity: sine" + v1[0, 0:lengthi] = Z1[:, 0] + v2[0, 0:lengthi] = Z2[:, 0] nc_tools.check_dataset_attrs(nemo) nemo.close() -def create_northern_tides_contd(Z1,Z2,tidevar,constituent,code, name='SalishSea2'): + +def create_northern_tides_contd(Z1, Z2, tidevar, constituent, code, name="SalishSea2"): import netCDF4 as NC import numpy as np from salishsea_tools import nc_tools - nemo = NC.Dataset(name+'_'+code+'_North_tide_'+constituent+'_grid_'+tidevar+'.nc', 'w', zlib=True) + nemo = NC.Dataset( + name + "_" + code + "_North_tide_" + constituent + "_grid_" + tidevar + ".nc", + "w", + zlib=True, + ) - #start and end points + # start and end points starti = 32 endi = 62 - lengthi = endi-starti + lengthi = endi - starti # dataset attributes nc_tools.init_dataset_attrs( nemo, - title='Tidal Boundary Conditions for Northern Boundary', - notebook_name='johnstone_tides_contd', - nc_filepath='../../../NEMO-forcing/open_boundaries/north/tides/SalishSea2_North_tide_'+constituent+'_grid_'+tidevar+'.nc', - comment='Tidal current and amplitude data scaled based on differences between K1/M2 and North observations and webtide.') + title="Tidal Boundary Conditions for Northern Boundary", + notebook_name="johnstone_tides_contd", + nc_filepath="../../../NEMO-forcing/open_boundaries/north/tides/SalishSea2_North_tide_" + + constituent + + "_grid_" + + tidevar + + ".nc", + comment="Tidal current and amplitude data scaled based on differences between K1/M2 and North observations and webtide.", + ) # dimensions (only need x and y, don't need depth or time_counter) - nemo.createDimension('xb', lengthi) - nemo.createDimension('yb', 1) + nemo.createDimension("xb", lengthi) + nemo.createDimension("yb", 1) # variables # nbidta, ndjdta, ndrdta - nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb')) - nbidta.long_name = 'i grid position' + nbidta = nemo.createVariable("nbidta", "int32", ("yb", "xb")) + nbidta.long_name = "i grid position" nbidta.units = 1 - nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb')) - nbjdta.long_name = 'j grid position' + nbjdta = nemo.createVariable("nbjdta", "int32", ("yb", "xb")) + nbjdta.long_name = "j grid position" nbjdta.units = 1 - nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb')) - nbrdta.long_name = 'position from boundary' + nbrdta = nemo.createVariable("nbrdta", "int32", ("yb", "xb")) + nbrdta.long_name = "position from boundary" nbrdta.units = 1 - print (nbidta.shape) + print(nbidta.shape) # add in the counter around the boundary (taken from Susan's code in Prepare Tide Files) - xb = nemo.createVariable('xb', 'int32', ('xb',),zlib=True) - xb.units = 'non dim' - xb.long_name = 'counter around boundary' - yb = nemo.createVariable('yb', 'int32', ('yb',),zlib=True) - yb.units = 'non dim' - yb.long_name = 'counter along boundary' + xb = nemo.createVariable("xb", "int32", ("xb",), zlib=True) + xb.units = "non dim" + xb.long_name = "counter around boundary" + yb = nemo.createVariable("yb", "int32", ("yb",), zlib=True) + yb.units = "non dim" + yb.long_name = "counter along boundary" yb[0] = 897 - xb[:] = np.arange(starti,endi) + xb[:] = np.arange(starti, endi) # values # nbidta, nbjdta - nbidta[:] = np.arange(starti,endi) + nbidta[:] = np.arange(starti, endi) nbjdta[:] = 897 nbrdta[:] = 1 - if tidevar=='T': - z1 = nemo.createVariable('z1','float32',('yb','xb'),zlib=True) - z1.units = 'm' - z1.long_name = 'tidal elevation: cosine' - z2 = nemo.createVariable('z2','float32',('yb','xb'),zlib=True) - z2.units = 'm' - z2.long_name = 'tidal elevation: sine' - z1[0,:] = np.array([Z1]*lengthi) - z2[0,:] = np.array([Z2]*lengthi) - - if tidevar=='U': - u1 = nemo.createVariable('u1','float32',('yb','xb'),zlib=True) - u1.units = 'm' - u1.long_name = 'tidal x-velocity: cosine' - u2 = nemo.createVariable('u2','float32',('yb','xb'),zlib=True) - u2.units = 'm' - u2.long_name = 'tidal x-velocity: sine' - u1[0,0:lengthi] = Z1[:,0] - u2[0,0:lengthi] = Z2[:,0] - - if tidevar=='V': - v1 = nemo.createVariable('v1','float32',('yb','xb'),zlib=True) - v1.units = 'm' - v1.long_name = 'tidal y-velocity: cosine' - v2 = nemo.createVariable('v2','float32',('yb','xb'),zlib=True) - v2.units = 'm' - v2.long_name = 'tidal y-velocity: sine' - v1[0,0:lengthi] = Z1[:,0] - v2[0,0:lengthi] = Z2[:,0] + if tidevar == "T": + z1 = nemo.createVariable("z1", "float32", ("yb", "xb"), zlib=True) + z1.units = "m" + z1.long_name = "tidal elevation: cosine" + z2 = nemo.createVariable("z2", "float32", ("yb", "xb"), zlib=True) + z2.units = "m" + z2.long_name = "tidal elevation: sine" + z1[0, :] = np.array([Z1] * lengthi) + z2[0, :] = np.array([Z2] * lengthi) + + if tidevar == "U": + u1 = nemo.createVariable("u1", "float32", ("yb", "xb"), zlib=True) + u1.units = "m" + u1.long_name = "tidal x-velocity: cosine" + u2 = nemo.createVariable("u2", "float32", ("yb", "xb"), zlib=True) + u2.units = "m" + u2.long_name = "tidal x-velocity: sine" + u1[0, 0:lengthi] = Z1[:, 0] + u2[0, 0:lengthi] = Z2[:, 0] + + if tidevar == "V": + v1 = nemo.createVariable("v1", "float32", ("yb", "xb"), zlib=True) + v1.units = "m" + v1.long_name = "tidal y-velocity: cosine" + v2 = nemo.createVariable("v2", "float32", ("yb", "xb"), zlib=True) + v2.units = "m" + v2.long_name = "tidal y-velocity: sine" + v1[0, 0:lengthi] = Z1[:, 0] + v2[0, 0:lengthi] = Z2[:, 0] nc_tools.check_dataset_attrs(nemo) nemo.close() - - - - - - - - - diff --git a/Run_Files/AMM_multi.pbs b/Run_Files/AMM_multi.pbs index 2ab60af1..6fd30302 100644 --- a/Run_Files/AMM_multi.pbs +++ b/Run_Files/AMM_multi.pbs @@ -2,7 +2,7 @@ #PBS -l procs=32 #PBS -l pmem=500mb -#PBS -l walltime=00:15:00 +#PBS -l walltime=00:15:00 module load compiler/intel/12.1 module load library/intelmpi/4.0.3.008 diff --git a/Run_Files/GYRE.pbs b/Run_Files/GYRE.pbs index 524ebcd6..a2a84184 100644 --- a/Run_Files/GYRE.pbs +++ b/Run_Files/GYRE.pbs @@ -2,7 +2,7 @@ #PBS -l procs=1 #PBS -l pmem=500mb -#PBS -l walltime=00:05:00 +#PBS -l walltime=00:05:00 module load compiler/intel/12.1 module load library/intelmpi/4.0.3.008 diff --git a/Run_Files/arch-salish.fcm b/Run_Files/arch-salish.fcm index 12087e4b..4b1ee45b 100644 --- a/Run_Files/arch-salish.fcm +++ b/Run_Files/arch-salish.fcm @@ -28,4 +28,3 @@ %MK make %USER_INC %NCDF_INC %USER_LIB %NCDF_LIB - diff --git a/SOGTools/notebooks/SOG_plotting.ipynb b/SOGTools/notebooks/SOG_plotting.ipynb index faaf4606..a56b4f09 100644 --- a/SOGTools/notebooks/SOG_plotting.ipynb +++ b/SOGTools/notebooks/SOG_plotting.ipynb @@ -1051,4 +1051,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/SOGTools/sog_tools/SOG_loader.py b/SOGTools/sog_tools/SOG_loader.py index 8714f57a..ebbc24a0 100644 --- a/SOGTools/sog_tools/SOG_loader.py +++ b/SOGTools/sog_tools/SOG_loader.py @@ -24,73 +24,81 @@ def load_TS(filename): - '''Load the timeseries file from path FILENAME into a dataframe TS_OUT - ''' + """Load the timeseries file from path FILENAME into a dataframe TS_OUT""" # Load timeseries file and extract headers - file_obj = open(filename, 'rt') + file_obj = open(filename, "rt") for index, line in enumerate(file_obj): line = line.strip() - if line.startswith('*FieldNames:'): - field_names = line.split(': ', 1)[1].split(', ') - elif line.startswith('*FieldUnits:'): - field_units = line.split(': ', 1)[1].split(', ') - elif line.startswith('*EndOfHeader'): + if line.startswith("*FieldNames:"): + field_names = line.split(": ", 1)[1].split(", ") + elif line.startswith("*FieldUnits:"): + field_units = line.split(": ", 1)[1].split(", ") + elif line.startswith("*EndOfHeader"): break # Read timeseries data into dataframe and assign header data = pd.read_csv( - filename, delim_whitespace=True, header=0, names=field_names, - skiprows=index+1, + filename, + delim_whitespace=True, + header=0, + names=field_names, + skiprows=index + 1, ) # Extract startdate and convert to MPL time datetime_start = parse( - field_units[0].split('hr since ', 1)[1].split(' LST', 1)[0], + field_units[0].split("hr since ", 1)[1].split(" LST", 1)[0], ) # Create date dataframe and append to DATA - date = pd.DataFrame({ - 'date': [ - datetime_start + timedelta(hours=hour) for hour in data['time'] - ], - }) - TS_out = pd.concat([date, data], axis=1).set_index('date').to_xarray() + date = pd.DataFrame( + { + "date": [datetime_start + timedelta(hours=hour) for hour in data["time"]], + } + ) + TS_out = pd.concat([date, data], axis=1).set_index("date").to_xarray() return TS_out def load_hoff(filename): - '''Load the hoffmueller file from path FILENAME into a panel HOFF_OUT - ''' + """Load the hoffmueller file from path FILENAME into a panel HOFF_OUT""" # Load timeseries file and extract headers - file_obj = open(filename, 'rt') + file_obj = open(filename, "rt") for index, line in enumerate(file_obj): line = line.strip() - if line.startswith('*FieldNames:'): - field_names = line.split(': ', 1)[1].split(', ') - elif line.startswith('*FieldUnits:'): - field_units = line.split(': ', 1)[1].split(', ') - elif line.startswith('*HoffmuellerStartYr:'): - year_start = line.split(': ', 1)[1] - elif line.startswith('*HoffmuellerStartDay:'): - day_start = line.split(': ', 1)[1] - elif line.startswith('*HoffmuellerStartSec:'): - sec_start = line.split(': ', 1)[1] - elif line.startswith('*HoffmuellerInterval:'): - interval = line.split(': ', 1)[1] - elif line.startswith('*EndOfHeader'): + if line.startswith("*FieldNames:"): + field_names = line.split(": ", 1)[1].split(", ") + elif line.startswith("*FieldUnits:"): + field_units = line.split(": ", 1)[1].split(", ") + elif line.startswith("*HoffmuellerStartYr:"): + year_start = line.split(": ", 1)[1] + elif line.startswith("*HoffmuellerStartDay:"): + day_start = line.split(": ", 1)[1] + elif line.startswith("*HoffmuellerStartSec:"): + sec_start = line.split(": ", 1)[1] + elif line.startswith("*HoffmuellerInterval:"): + interval = line.split(": ", 1)[1] + elif line.startswith("*EndOfHeader"): break # Read timeseries data into dataframe and assign header - data = pd.read_csv(filename, delim_whitespace=True, header=0, - names=field_names, skiprows=index, chunksize=82, - index_col=0) + data = pd.read_csv( + filename, + delim_whitespace=True, + header=0, + names=field_names, + skiprows=index, + chunksize=82, + index_col=0, + ) # Timestamp in matplotlib time datetime_start = datetime.strptime( - year_start + day_start, '%Y%j', + year_start + day_start, + "%Y%j", ) + timedelta(seconds=int(sec_start)) # Extract dataframe chunks @@ -98,88 +106,107 @@ def load_hoff(filename): data_list = [] for index, chunk in enumerate(data): datetime_index.append( - datetime_start + timedelta(days=index*float(interval)), + datetime_start + timedelta(days=index * float(interval)), ) data_list.append(chunk.to_xarray()) # Concatenate xarray dataset list along time axis hoff_out = xr.concat( - data_list, dim=xr.DataArray(datetime_index, name='time', dims='time'), + data_list, + dim=xr.DataArray(datetime_index, name="time", dims="time"), ) return hoff_out def loadSOG(filepath): - '''Loads SOG timeseries and hoffmueller files from FILEPATH and returns + """Loads SOG timeseries and hoffmueller files from FILEPATH and returns Pandas dataframes PHYS_TS, BIO_TS, CHEM_TS, and panel HOFF - ''' + """ # Load timeseries and hoff files from FILEPATH - phys_TS = load_TS(filepath + 'timeseries/std_phys_SOG.out') - bio_TS = load_TS(filepath + 'timeseries/std_bio_SOG.out') - chem_TS = load_TS(filepath + 'timeseries/std_chem_SOG.out') - hoff = load_hoff(filepath + 'profiles/hoff-SOG.dat') + phys_TS = load_TS(filepath + "timeseries/std_phys_SOG.out") + bio_TS = load_TS(filepath + "timeseries/std_bio_SOG.out") + chem_TS = load_TS(filepath + "timeseries/std_chem_SOG.out") + hoff = load_hoff(filepath + "profiles/hoff-SOG.dat") # Construct depth array for calcs depth_array = hoff.minor_axis.values - date_array = hoff.major_axis.values + date_array = hoff.major_axis.values depth, dummy = np.meshgrid(depth_array, np.ones(date_array.size)) # Calculate surface pH and Omega_A pH_sur, Omega_A_sur = carb.calc_carbonate( - chem_TS['surface alkalinity'], # TAlk [uM] - chem_TS['surface DIC concentration'], # DIC [uM] - calc_sigma(phys_TS['surface temperature'], - phys_TS['surface salinity']), # sigma_t [kg m3] - phys_TS['surface salinity'], # salinity [PSS 78] - phys_TS['surface temperature'], # temperature [deg C] - 0.0, # pressure [dbar] - bio_TS['surface nitrate concentration'] / 16, # phosphate [uM] - bio_TS['surface silicon concentration']) # silicate [uM] + chem_TS["surface alkalinity"], # TAlk [uM] + chem_TS["surface DIC concentration"], # DIC [uM] + calc_sigma( + phys_TS["surface temperature"], phys_TS["surface salinity"] + ), # sigma_t [kg m3] + phys_TS["surface salinity"], # salinity [PSS 78] + phys_TS["surface temperature"], # temperature [deg C] + 0.0, # pressure [dbar] + bio_TS["surface nitrate concentration"] / 16, # phosphate [uM] + bio_TS["surface silicon concentration"], + ) # silicate [uM] # Calculate 3 m avg pH and Omega_A - pH_3m, Omega_A_3m = carb.calc_carbonate( - chem_TS['3 m avg alkalinity'], # TAlk [uM] - chem_TS['3 m avg DIC concentration'], # DIC [uM] - calc_sigma(phys_TS['3 m avg temperature'], - phys_TS['3 m avg salinity']), # sigma_t [kg m3] - phys_TS['3 m avg salinity'], # salinity [PSS 78] - phys_TS['3 m avg temperature'], # temperature [deg C] - 0.0, # pressure [dbar] - bio_TS['3 m avg nitrate concentration'] / 16, # phosphate [uM] - bio_TS['3 m avg silicon concentration']) # silicate [uM] + pH_3m, Omega_A_3m = carb.calc_carbonate( + chem_TS["3 m avg alkalinity"], # TAlk [uM] + chem_TS["3 m avg DIC concentration"], # DIC [uM] + calc_sigma( + phys_TS["3 m avg temperature"], phys_TS["3 m avg salinity"] + ), # sigma_t [kg m3] + phys_TS["3 m avg salinity"], # salinity [PSS 78] + phys_TS["3 m avg temperature"], # temperature [deg C] + 0.0, # pressure [dbar] + bio_TS["3 m avg nitrate concentration"] / 16, # phosphate [uM] + bio_TS["3 m avg silicon concentration"], + ) # silicate [uM] # Calculate hoffmueller pH and Omega_A - hoff['pH'], hoff['Omega_A'] = carb.calc_carbonate( - hoff.ix['alkalinity', :, :], # TAlk [uM] - hoff.ix['dissolved inorganic carbon', :, :], # DIC [uM] - hoff.ix['sigma-t', :, :], # sigma_t [kg m3] - hoff.ix['salinity', :, :], # salinity [PSS 78] - hoff.ix['temperature', :, :], # temperature [deg C] - depth, # pressure [dbar] - hoff.ix['nitrate', :, :] / 16, # phosphate [uM] - hoff.ix['silicon', :, :]) # silicate [uM] + hoff["pH"], hoff["Omega_A"] = carb.calc_carbonate( + hoff.ix["alkalinity", :, :], # TAlk [uM] + hoff.ix["dissolved inorganic carbon", :, :], # DIC [uM] + hoff.ix["sigma-t", :, :], # sigma_t [kg m3] + hoff.ix["salinity", :, :], # salinity [PSS 78] + hoff.ix["temperature", :, :], # temperature [deg C] + depth, # pressure [dbar] + hoff.ix["nitrate", :, :] / 16, # phosphate [uM] + hoff.ix["silicon", :, :], + ) # silicate [uM] # Append pH and Omega timeseries to CHEM_TS - chem_TS = pd.concat([chem_TS, pd.DataFrame({ - 'surface pH': pH_sur, - '3 m avg pH': pH_3m, - 'surface Omega_A': Omega_A_sur, - '3 m avg Omega_A': Omega_A_3m})], axis=1) + chem_TS = pd.concat( + [ + chem_TS, + pd.DataFrame( + { + "surface pH": pH_sur, + "3 m avg pH": pH_3m, + "surface Omega_A": Omega_A_sur, + "3 m avg Omega_A": Omega_A_3m, + } + ), + ], + axis=1, + ) return phys_TS, bio_TS, chem_TS, hoff def loadSOG_batch(filesystem, bloomyear, filestr): - '''Loads SOG timeseries and hoffmueller files given parameters FILESYSTEM, + """Loads SOG timeseries and hoffmueller files given parameters FILESYSTEM, BLOOMYEAR, and FILESTR and returns PHYS_TS, BIO_TS, CHEM_TS, and HOFF - ''' + """ # Specify standard timeseries output paths - filepath = '/ocean/bmoorema/research/SOG/{0}/{1}/{2}/{3}/{3}_{4}/'.format( - filesystem['category'], filesystem['test'], filesystem['type'], - bloomyear, filestr) + filepath = "/ocean/bmoorema/research/SOG/{0}/{1}/{2}/{3}/{3}_{4}/".format( + filesystem["category"], + filesystem["test"], + filesystem["type"], + bloomyear, + filestr, + ) # Load timeseries and hoffmueller files from FILEPATH phys_TS, bio_TS, chem_TS, hoff = loadSOG(filepath) @@ -188,9 +215,9 @@ def loadSOG_batch(filesystem, bloomyear, filestr): def calc_sigma(T, S): - '''Calculate and return density anomaly SIGMA_T given temperature T and + """Calculate and return density anomaly SIGMA_T given temperature T and salinity S - ''' + """ # Calculate the square root of the salinities sqrtS = np.sqrt(S) @@ -198,13 +225,16 @@ def calc_sigma(T, S): # Calculate the density profile at the grid point depths # Pure water density at atmospheric pressure # (Bigg P.H., (1967) Br. J. Applied Physics 8 pp 521-537) - R1 = ((((6.536332e-9 * T - 1.120083e-6) * T + 1.001685e-4) * T - - 9.095290e-3) * T + 6.793952e-2) * T - 28.263737 + R1 = ( + (((6.536332e-9 * T - 1.120083e-6) * T + 1.001685e-4) * T - 9.095290e-3) * T + + 6.793952e-2 + ) * T - 28.263737 # Seawater density at atmospheric pressure # Coefficients of salinity - R2 = (((5.3875e-9 * T - 8.2467e-7) * T + 7.6438e-5) * T - - 4.0899e-3) * T + 8.24493e-1 + R2 = ( + ((5.3875e-9 * T - 8.2467e-7) * T + 7.6438e-5) * T - 4.0899e-3 + ) * T + 8.24493e-1 R3 = (-1.6546e-6 * T + 1.0227e-4) * T - 5.72466e-3 # International one-atmosphere equation of state of seawater diff --git a/SOGTools/sog_tools/carbonate.py b/SOGTools/sog_tools/carbonate.py index 3e9ab817..d1d9810e 100644 --- a/SOGTools/sog_tools/carbonate.py +++ b/SOGTools/sog_tools/carbonate.py @@ -25,7 +25,7 @@ import numpy as np -def calc_carbonate(values, TP, TSi, T, S, P, params=['TA', 'TC']): +def calc_carbonate(values, TP, TSi, T, S, P, params=["TA", "TC"]): """Calculate carbonate system parameters given two inputs :arg values: List of input values or arrays corresponding to params arg @@ -69,39 +69,37 @@ def calc_carbonate(values, TP, TSi, T, S, P, params=['TA', 'TC']): # Assign inputs for param, val in zip(params, values): - if param is 'TA': + if param is "TA": TA = val * 1.0e-6 - elif param is 'TC': + elif param is "TC": TC = val * 1.0e-6 - elif param is 'pH': + elif param is "pH": pH = val - elif param is 'pCO2': + elif param is "pCO2": pCO2 = val * 1.0e-6 - elif param is 'OmegaA': + elif param is "OmegaA": OmegaA = val CO3 = OmegaA * KAr / Ca else: - raise ValueError('Unknown CO2 parameter: {}'.format(param)) + raise ValueError("Unknown CO2 parameter: {}".format(param)) # Calculate pH - if 'TA' in params and 'TC' in params: - pH = CalculatepHfromTA('TC', TA, TC, TP, TSi) - elif 'TA' in params and 'pCO2' in params: - pH = CalculatepHfromTA('pCO2', TA, pCO2, TP, TSi) - elif 'TA' in params and 'OmegaA' in params: - pH = CalculatepHfromTA('CO3', TA, CO3, TP, TSi) - elif 'TC' in params and 'pCO2' in params: + if "TA" in params and "TC" in params: + pH = CalculatepHfromTA("TC", TA, TC, TP, TSi) + elif "TA" in params and "pCO2" in params: + pH = CalculatepHfromTA("pCO2", TA, pCO2, TP, TSi) + elif "TA" in params and "OmegaA" in params: + pH = CalculatepHfromTA("CO3", TA, CO3, TP, TSi) + elif "TC" in params and "pCO2" in params: RR = K0 * pCO2 / TC Discr = (K1 * RR) * (K1 * RR) + 4 * (1 - RR) * (K1 * K2 * RR) H = 0.5 * (K1 * RR + np.sqrt(Discr)) / (1 - RR) pH = np.log(H) / np.log(0.1) - elif 'TC' in params and 'OmegaA' in params: - HCO3 = K1 * CO3 / (2 * K2) * ( - np.sqrt(4 * K2 / K1 * (TC / CO3 - 1) + 1) - 1 - ) + elif "TC" in params and "OmegaA" in params: + HCO3 = K1 * CO3 / (2 * K2) * (np.sqrt(4 * K2 / K1 * (TC / CO3 - 1) + 1) - 1) H = K2 * HCO3 / CO3 pH = np.log(H) / np.log(0.1) - elif 'pCO2' in params and 'OmegaA' in params: + elif "pCO2" in params and "OmegaA" in params: TC = np.sqrt(K0 * K1 / K2 * pCO2 * CO3) + K0 * pCO2 + CO3 H = K0 * K1 * pCO2 / (TC - K0 * pCO2 - CO3) pH = np.log(H) / np.log(0.1) @@ -109,11 +107,11 @@ def calc_carbonate(values, TP, TSi, T, S, P, params=['TA', 'TC']): # Fill in remaining params H, Beta = CalculateHfrompH(pH) NCAlk = CalculateNCAlkfrompH(H, TP, TSi) - if 'pH' in params and 'pCO2' in params: + if "pH" in params and "pCO2" in params: TC = K0 * pCO2 * (1 + K1 / H + K1 * K2 / (H * H)) - elif 'pH' in params and 'OmegaA' in params: + elif "pH" in params and "OmegaA" in params: TC = CO3 * (H * H / (K1 * K2) + H / K2 + 1) - if 'TA' in params: + if "TA" in params: CAlk = TA - NCAlk TC = CAlk * Beta / (K1 * (H + 2 * K2)) else: @@ -125,11 +123,11 @@ def calc_carbonate(values, TP, TSi, T, S, P, params=['TA', 'TC']): # Unit conversions and dict out Calc_values = { - 'TA': TA * 1e6, - 'TC': TC * 1e6, - 'pH': pH, - 'pCO2': pCO2 * 1e6, - 'OmegaA': OmegaA, + "TA": TA * 1e6, + "TC": TC * 1e6, + "pH": pH, + "pCO2": pCO2 * 1e6, + "OmegaA": OmegaA, } return Calc_values @@ -165,21 +163,19 @@ def set_constants(Sal, TempK, Pdbar): sqrIonS = np.sqrt(IonS) # Calculate H ion activity coefficient - fH = 1.2948 - 0.002036 * TempK + ( - 0.0004607 - 0.000001475 * TempK - ) * Sal**2 + fH = 1.2948 - 0.002036 * TempK + (0.0004607 - 0.000001475 * TempK) * Sal**2 # CALCULATE SEAWATER CONSTITUENTS USING EMPIRCAL FITS # Calculate total borate: # Uppstrom, L., Deep-Sea Research 21:161-162, 1974: # this is 0.000416 * Sali / 35 = 0.0000119 * Sali # TB = (0.000232d0 / 10.811d0) * (Sal / 1.80655d0) ! in mol/kg-SW - TB = 0.0004157 * Sal / 35.0 # in mol/kg-SW + TB = 0.0004157 * Sal / 35.0 # in mol/kg-SW # Calculate total sulfate: # Morris, A. W., and Riley, J. P., Deep-Sea Research 13:699-705, 1966: # this is .02824 * Sali / 35 = .0008067 * Sali - TS = (0.14 / 96.062) * (Sal / 1.80655) # in mol/kg-SW + TS = (0.14 / 96.062) * (Sal / 1.80655) # in mol/kg-SW # Calculate total fluoride: (in mol/kg-SW) # Riley, J. P., Deep-Sea Research 12:219-220, 1965: @@ -195,23 +191,24 @@ def set_constants(Sal, TempK, Pdbar): # TYPO on p. 121: the constant e9 should be e8. # This is from eqs 22 and 23 on p. 123, and Table 4 on p 121: lnKS = ( - -4276.1 / TempK + 141.328 - 23.093 * logTempK + - (-13856.0 / TempK + 324.57 - 47.986 * logTempK) * sqrIonS + - (35474.0 / TempK - 771.54 + 114.723 * logTempK) * IonS + - (-2698.0 / TempK) * sqrIonS * IonS + (1776.0 / TempK) * IonS**2 - ) - KS = ( - np.exp(lnKS) # this is on the free pH scale in mol/kg-H2O - * (1.0 - 0.001005 * Sal) # convert to mol/kg-SW + -4276.1 / TempK + + 141.328 + - 23.093 * logTempK + + (-13856.0 / TempK + 324.57 - 47.986 * logTempK) * sqrIonS + + (35474.0 / TempK - 771.54 + 114.723 * logTempK) * IonS + + (-2698.0 / TempK) * sqrIonS * IonS + + (1776.0 / TempK) * IonS**2 ) + KS = np.exp(lnKS) * ( # this is on the free pH scale in mol/kg-H2O + 1.0 - 0.001005 * Sal + ) # convert to mol/kg-SW # Calculate KF: # Dickson, A. G. and Riley, J. P., Marine Chemistry 7:89-99, 1979: lnKF = 1590.2 / TempK - 12.641 + 1.525 * sqrIonS - KF = ( - np.exp(lnKF) # this is on the free pH scale in mol/kg-H2O - * (1.0 - 0.001005 * Sal) # convert to mol/kg-SW - ) + KF = np.exp(lnKF) * ( # this is on the free pH scale in mol/kg-H2O + 1.0 - 0.001005 * Sal + ) # convert to mol/kg-SW # Calculate pH scale conversion factors ( NOT pressure-corrected) SWStoTOT = (1 + TS / KS) / (1 + TS / KS + TF / KF) @@ -219,8 +216,10 @@ def set_constants(Sal, TempK, Pdbar): # Calculate K0: # Weiss, R. F., Marine Chemistry 2:203-215, 1974. lnK0 = ( - -60.2409 + 93.4517 / TempK100 + 23.3585 * np.log(TempK100) + - Sal * (0.023517 - 0.023656 * TempK100 + 0.0047036 * TempK100**2) + -60.2409 + + 93.4517 / TempK100 + + 23.3585 * np.log(TempK100) + + Sal * (0.023517 - 0.023656 * TempK100 + 0.0047036 * TempK100**2) ) K0 = np.exp(lnK0) # this is in mol/kg-SW/atm @@ -237,7 +236,7 @@ def set_constants(Sal, TempK, Pdbar): B1 = -530.659 * Sal**0.5 - 5.8210 * Sal C1 = -2.0664 * Sal**0.5 pK1 = pK10 + A1 + B1 / TempK + C1 * np.log(TempK) - K1 = 10**(-pK1) + K1 = 10 ** (-pK1) # This is from page 141 pK20 = -90.18333 + 5143.692 / TempK + 14.613358 * np.log(TempK) # This is from their table 3, page 140. @@ -245,30 +244,40 @@ def set_constants(Sal, TempK, Pdbar): B2 = -788.289 * Sal**0.5 - 19.189 * Sal C2 = -3.374 * Sal**0.5 pK2 = pK20 + A2 + B2 / TempK + C2 * np.log(TempK) - K2 = 10**(-pK2) + K2 = 10 ** (-pK2) # Calculate KW: # Millero, Geochemica et Cosmochemica Acta 59:661-677, 1995. # his check value of 1.6 umol/kg-SW should be 6.2 lnKW = ( - 148.9802 - 13847.26 / TempK - 23.6521 * logTempK + - (-5.977 + 118.67 / TempK + 1.0495 * logTempK) * sqrSal - - 0.01615 * Sal + 148.9802 + - 13847.26 / TempK + - 23.6521 * logTempK + + (-5.977 + 118.67 / TempK + 1.0495 * logTempK) * sqrSal + - 0.01615 * Sal ) - KW = np.exp(lnKW) # this is on the SWS pH scale in (mol/kg-SW)^2 + KW = np.exp(lnKW) # this is on the SWS pH scale in (mol/kg-SW)^2 # Calculate KB: # Dickson, A. G., Deep-Sea Research 37:755-766, 1990: lnKB = ( - (-8966.9 - 2890.53 * sqrSal - 77.942 * Sal + - 1.728 * sqrSal * Sal - 0.0996 * Sal**2) / TempK + - 148.0248 + 137.1942 * sqrSal + 1.62142 * Sal + - (-24.4344 - 25.085 * sqrSal - 0.2474 * Sal) * logTempK + - 0.053105 * sqrSal * TempK + ( + -8966.9 + - 2890.53 * sqrSal + - 77.942 * Sal + + 1.728 * sqrSal * Sal + - 0.0996 * Sal**2 + ) + / TempK + + 148.0248 + + 137.1942 * sqrSal + + 1.62142 * Sal + + (-24.4344 - 25.085 * sqrSal - 0.2474 * Sal) * logTempK + + 0.053105 * sqrSal * TempK ) KB = ( np.exp(lnKB) # this is on the total pH scale in mol/kg-SW - / SWStoTOT # convert to SWS pH scale + / SWStoTOT # convert to SWS pH scale ) # Calculate KP1, KP2, KP3, and KSi: @@ -276,44 +285,49 @@ def set_constants(Sal, TempK, Pdbar): # KP1, KP2, KP3 are on the SWS pH scale in mol/kg-SW. # KSi was given on the SWS pH scale in molal units. lnKP1 = ( - -4576.752 / TempK + 115.54 - 18.453 * logTempK + - (-106.736 / TempK + 0.69171) * sqrSal + - (-0.65643 / TempK - 0.01844) * Sal + -4576.752 / TempK + + 115.54 + - 18.453 * logTempK + + (-106.736 / TempK + 0.69171) * sqrSal + + (-0.65643 / TempK - 0.01844) * Sal ) KP1 = np.exp(lnKP1) lnKP2 = ( - -8814.715 / TempK + 172.1033 - 27.927 * logTempK + - (-160.34 / TempK + 1.3566) * sqrSal + - (0.37335 / TempK - 0.05778) * Sal + -8814.715 / TempK + + 172.1033 + - 27.927 * logTempK + + (-160.34 / TempK + 1.3566) * sqrSal + + (0.37335 / TempK - 0.05778) * Sal ) KP2 = np.exp(lnKP2) lnKP3 = ( - -3070.75 / TempK - 18.126 + - (17.27039 / TempK + 2.81197) * sqrSal + - (-44.99486 / TempK - 0.09984) * Sal + -3070.75 / TempK + - 18.126 + + (17.27039 / TempK + 2.81197) * sqrSal + + (-44.99486 / TempK - 0.09984) * Sal ) KP3 = np.exp(lnKP3) lnKSi = ( - -8904.2 / TempK + 117.4 - 19.334 * logTempK + - (-458.79 / TempK + 3.5913) * sqrIonS + - (188.74 / TempK - 1.5998) * IonS + - (-12.1652 / TempK + 0.07871) * IonS**2 - ) - KSi = ( - np.exp(lnKSi) # this is on the SWS pH scale in mol/kg-H2O - * (1.0 - 0.001005 * Sal) # convert to mol/kg-SW + -8904.2 / TempK + + 117.4 + - 19.334 * logTempK + + (-458.79 / TempK + 3.5913) * sqrIonS + + (188.74 / TempK - 1.5998) * IonS + + (-12.1652 / TempK + 0.07871) * IonS**2 ) + KSi = np.exp(lnKSi) * ( # this is on the SWS pH scale in mol/kg-H2O + 1.0 - 0.001005 * Sal + ) # convert to mol/kg-SW # Correct constants for pressure pressure_corrections(TempK, Pbar) def pressure_corrections(TempK, Pbar): - """Calculate pressure corrections for constants defined in set_constants - """ + """Calculate pressure corrections for constants defined in set_constants""" # Declare global constants global R_gas, fH, K1, K2, KW, KB, KF, KS, KP1, KP2, KP3, KSi, TB, TS, TF @@ -324,10 +338,7 @@ def pressure_corrections(TempK, Pbar): # Fugacity Factor Delta = 57.7 - 0.118 * TempK - b = ( - -1636.75 + 12.0408 * TempK - 0.0327957 * TempK**2 + - 3.16528 * 1.0e-5 * TempK**3 - ) + b = -1636.75 + 12.0408 * TempK - 0.0327957 * TempK**2 + 3.16528 * 1.0e-5 * TempK**3 FugFac = np.exp((b + 2.0 * Delta) * 1.01325 / RT) # Pressure effects on K1 & K2: @@ -365,7 +376,7 @@ def pressure_corrections(TempK, Pbar): # deltaV = -29.48 + 0.1622 * TempC + 0.295 * (Sal - 34.8) # Millero, 1992 # deltaV = -29.48 - 0.1622 * TempC - 0.002608 * TempC**2 # Millero, 1995 # deltaV = deltaV + 0.295 * (Sal - 34.8) # Millero, 1979 - Kappa = -2.84 / 1000.0 # Millero, 1979 + Kappa = -2.84 / 1000.0 # Millero, 1979 # Millero, 1992 and Millero, 1995 also have this. # Kappa = Kappa + 0.354 * (Sal - 34.8) / 1000 # Millero, 1979 # Kappa = (-3.0 + 0.0427 * TempC) / 1000 # Millero, 1983 @@ -417,7 +428,7 @@ def pressure_corrections(TempK, Pbar): def CalculatepHfromTA(param, TA, val, TP, TSi): - """ SUB CalculatepHfromTATC, version 04.01, 10-13-96, written by Ernie Lewis. + """SUB CalculatepHfromTATC, version 04.01, 10-13-96, written by Ernie Lewis. Inputs: TA, TC, TP, TSi Output: pH This calculates pH from TA and TC using K1 and K2 by Newton's method. @@ -432,11 +443,11 @@ def CalculatepHfromTA(param, TA, val, TP, TSi): global K0, K1, K2, KW, KB # Set iteration parameters - pHGuess = 8.0 # this is the first guess - pHTol = 1.0e-4 # tolerance for iterations end + pHGuess = 8.0 # this is the first guess + pHTol = 1.0e-4 # tolerance for iterations end ln10 = np.log(10.0) # creates a vector holding the first guess for all samples - if hasattr(TA, 'shape'): + if hasattr(TA, "shape"): pH = np.ones(TA.shape) * pHGuess else: pH = pHGuess @@ -447,14 +458,16 @@ def CalculatepHfromTA(param, TA, val, TP, TSi): H, Beta = CalculateHfrompH(pH) NCAlk = CalculateNCAlkfrompH(H, TP, TSi) - if param is 'TC': + if param is "TC": CAlk = val * K1 * (H + 2 * K2) / Beta # find Slope dTA/dpH (not exact, but keeps all important terms) Slope = ln10 * ( - val * K1 * H * (H * H + K1 * K2 + 4.0 * H * K2) - / Beta / Beta + TB * KB * H / (KB + H) / (KB + H) + KW / H + H + val * K1 * H * (H * H + K1 * K2 + 4.0 * H * K2) / Beta / Beta + + TB * KB * H / (KB + H) / (KB + H) + + KW / H + + H ) - elif param is 'pCO2': + elif param is "pCO2": HCO3 = K0 * K1 * val / H CO3 = K0 * K1 * K2 * val / (H * H) CAlk = HCO3 + 2 * CO3 @@ -462,7 +475,7 @@ def CalculatepHfromTA(param, TA, val, TP, TSi): Slope = ln10 * ( HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H ) - elif param is 'CO3': + elif param is "CO3": HCO3 = H * val / K2 CAlk = HCO3 + 2 * val # find Slope dTA/dpH (not exact, but keeps all important terms) @@ -470,11 +483,11 @@ def CalculatepHfromTA(param, TA, val, TP, TSi): HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H ) else: - raise ValueError('Unknown carbon param: {}'.format(param)) + raise ValueError("Unknown carbon param: {}".format(param)) TA_calc = CAlk + NCAlk Residual = TA - TA_calc - deltapH = Residual / Slope # this is Newton's method + deltapH = Residual / Slope # this is Newton's method # to keep the jump from being too big while np.any(abs(deltapH) > 1): deltapH = deltapH / 2.0 @@ -501,7 +514,7 @@ def CalculateNCAlkfrompH(H, TP, TSi): PhosBot = H * H * H + KP1 * H * H + KP1 * KP2 * H + KP1 * KP2 * KP3 PAlk = TP * PhosTop / PhosBot SiAlk = TSi * KSi / (KSi + H) - FREEtoTOT = (1 + TS / KS) # pH scale conversion factor + FREEtoTOT = 1 + TS / KS # pH scale conversion factor Hfree = H / FREEtoTOT # for H on the total scale HSO4 = TS / (1 + KS / Hfree) # since KS is on the free scale HF = TF / (1 + KF / Hfree) # since KF is on the free scale @@ -511,12 +524,11 @@ def CalculateNCAlkfrompH(H, TP, TSi): def CalculateHfrompH(pH): - """ - """ + """ """ global K1, K2 - H = 10**(-pH) + H = 10 ** (-pH) Beta = H * H + K1 * H + K1 * K2 return H, Beta @@ -567,20 +579,26 @@ def CaSolubility(S, TempK, P): # Calcite solubility: # Mucci, Alphonso, Amer. J. of Science 283:781-799, 1983. - KCa = 10.0**( - -171.9065 - 0.077993 * TempK + 2839.319 / TempK + KCa = 10.0 ** ( + -171.9065 + - 0.077993 * TempK + + 2839.319 / TempK + 71.595 * logTempK / np.log(10.0) + (-0.77712 + 0.0028426 * TempK + 178.34 / TempK) * sqrtS - - 0.07711 * S + 0.0041249 * sqrtS * S + - 0.07711 * S + + 0.0041249 * sqrtS * S ) # Aragonite solubility: # Mucci, Alphonso, Amer. J. of Science 283:781-799, 1983. - KAr = 10.0**( - -171.945 - 0.077993 * TempK + 2903.293 / TempK + KAr = 10.0 ** ( + -171.945 + - 0.077993 * TempK + + 2903.293 / TempK + 71.595 * logTempK / np.log(10.0) + (-0.068393 + 0.0017276 * TempK + 88.135 / TempK) * sqrtS - - 0.10018 * S + 0.0059415 * sqrtS * S + - 0.10018 * S + + 0.0059415 * sqrtS * S ) # Pressure correction for calcite: @@ -589,9 +607,7 @@ def CaSolubility(S, TempK, P): # has typos (-0.5304, -0.3692, and 10^3 for Kappa factor) deltaV_KCa = -48.76 + 0.5304 * TempC Kappa_KCa = (-11.76 + 0.3692 * TempC) / 1000.0 - KCa = KCa * np.exp( - (-deltaV_KCa + 0.5 * Kappa_KCa * P) * P / (R_gas * TempK) - ) + KCa = KCa * np.exp((-deltaV_KCa + 0.5 * Kappa_KCa * P) * P / (R_gas * TempK)) # Pressure correction for aragonite: # Millero, Geochemica et Cosmochemica Acta 43:1651-1661, 1979, @@ -599,9 +615,7 @@ def CaSolubility(S, TempK, P): # and 10^3 for Kappa factor) deltaV_KAr = deltaV_KCa + 2.8 Kappa_KAr = Kappa_KCa - KAr = KAr * np.exp( - (-deltaV_KAr + 0.5 * Kappa_KAr * P) * P / (R_gas * TempK) - ) + KAr = KAr * np.exp((-deltaV_KAr + 0.5 * Kappa_KAr * P) * P / (R_gas * TempK)) # Calculate Omegas: # H = 10.0**(-pH) @@ -612,8 +626,8 @@ def CaSolubility(S, TempK, P): return Ca, KCa, KAr -def FindpHOnAllScales(pH_in, T, S, P, scale='total'): - """ SUB FindpHOnAllScales, version 01.02, 01-08-97, written by Ernie Lewis. +def FindpHOnAllScales(pH_in, T, S, P, scale="total"): + """SUB FindpHOnAllScales, version 01.02, 01-08-97, written by Ernie Lewis. Inputs: pH, scale Outputs: pH dict containing all scales """ @@ -625,28 +639,26 @@ def FindpHOnAllScales(pH_in, T, S, P, scale='total'): set_constants(S, TempK, P) # pH scale conversion factors - FREEtoTOT = (1 + TS / KS) + FREEtoTOT = 1 + TS / KS SWStoTOT = (1 + TS / KS) / (1 + TS / KS + TF / KF) # Determine input pH scale - if scale is 'total': + if scale is "total": factor = 0 - elif scale is 'seawater': + elif scale is "seawater": factor = -np.log(SWStoTOT) / np.log(0.1) - elif scale is 'free': + elif scale is "free": factor = -np.log(FREEtoTOT) / np.log(0.1) - elif scale is 'NBS': + elif scale is "NBS": factor = -np.log(SWStoTOT) / np.log(0.1) + np.log(fH) / np.log(0.1) else: - raise ValueError('Unrecongnized pH scale: {}'.format(scale)) + raise ValueError("Unrecongnized pH scale: {}".format(scale)) # Calculate pH on all scales pH = {} - pH['total'] = pH_in - factor - pH['free'] = pH['total'] - np.log(FREEtoTOT) / np.log(0.1) - pH['seawater'] = pH['total'] - np.log(SWStoTOT) / np.log(0.1) - pH['NBS'] = ( - pH['total'] - np.log(SWStoTOT) / np.log(0.1) + np.log(fH) / np.log(0.1) - ) + pH["total"] = pH_in - factor + pH["free"] = pH["total"] - np.log(FREEtoTOT) / np.log(0.1) + pH["seawater"] = pH["total"] - np.log(SWStoTOT) / np.log(0.1) + pH["NBS"] = pH["total"] - np.log(SWStoTOT) / np.log(0.1) + np.log(fH) / np.log(0.1) return pH diff --git a/SalishSeaTools/__pkg_metadata__.py b/SalishSeaTools/__pkg_metadata__.py index 5ed5a0cc..c4519bb2 100644 --- a/SalishSeaTools/__pkg_metadata__.py +++ b/SalishSeaTools/__pkg_metadata__.py @@ -15,7 +15,7 @@ """SalishSeaTools Package """ -PROJECT = 'SalishSeaTools' -DESCRIPTION = 'SalishSeaTools Package' -VERSION = '24.1.dev0' -DEV_STATUS = '5 - Production' +PROJECT = "SalishSeaTools" +DESCRIPTION = "SalishSeaTools Package" +VERSION = "24.1.dev0" +DEV_STATUS = "5 - Production" diff --git a/SalishSeaTools/docs/installation.rst b/SalishSeaTools/docs/installation.rst index 9d9c6e23..6edb9e28 100644 --- a/SalishSeaTools/docs/installation.rst +++ b/SalishSeaTools/docs/installation.rst @@ -63,4 +63,3 @@ or import modules from it: from salishsea_tools import nc_tools nc_tools.check_dataset_attrs(...) - diff --git a/SalishSeaTools/notebooks/visualisations/make_readme.py b/SalishSeaTools/notebooks/visualisations/make_readme.py index f86cb66c..f553b9af 100644 --- a/SalishSeaTools/notebooks/visualisations/make_readme.py +++ b/SalishSeaTools/notebooks/visualisations/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'SalishSeaTools/notebooks/visualisations' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "SalishSeaTools/notebooks/visualisations" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """The Jupyter Notebooks in this directory are made by for testing functions in visualisations.py. @@ -36,31 +37,28 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) try: - first_cell = contents['worksheets'][0]['cells'][0] + first_cell = contents["worksheets"][0]["cells"][0] except KeyError: - first_cell = contents['cells'][0] - first_cell_type = first_cell['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = first_cell['source'] + first_cell = contents["cells"][0] + first_cell_type = first_cell["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = first_cell["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -71,7 +69,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/SalishSeaTools/salishsea_tools/DFOOPDB_to_pandas.py b/SalishSeaTools/salishsea_tools/DFOOPDB_to_pandas.py index cbd4aa85..4b8376c0 100644 --- a/SalishSeaTools/salishsea_tools/DFOOPDB_to_pandas.py +++ b/SalishSeaTools/salishsea_tools/DFOOPDB_to_pandas.py @@ -4,13 +4,14 @@ import glob import numpy as np -def loadDFO(basedir,varlist={},savedir='./'): - """ Returns stations dataframe and obs (profiles) dataframe + +def loadDFO(basedir, varlist={}, savedir="./"): + """Returns stations dataframe and obs (profiles) dataframe :arg basedir: path to the directory containing the files to be loaded eg basedir='/ocean/eolson/MEOPAR/obs/temptest/' NOTE: basedir should contain only files to be read (or links to them) :type basedir: str - + :arg varlist: set containing variables to be loaded; see choosevars below :type varlist: set @@ -19,269 +20,417 @@ def loadDFO(basedir,varlist={},savedir='./'): """ # these files log which files have been processed and record certain types of errors that can happen # you may want to take this part out and print the things that are written in these files to the screen instead - fout=open(basedir+'createDBfromDFO_OPDB_log.txt','w') - ferr=open(basedir+'createDBfromDFO_OPDB_errors.txt','w') - fout.write('Files processed:\n') - + fout = open(basedir + "createDBfromDFO_OPDB_log.txt", "w") + ferr = open(basedir + "createDBfromDFO_OPDB_errors.txt", "w") + fout.write("Files processed:\n") + # create full list of filenames - filenames=list() - filenames=[os.path.join(basedir,f) for f in os.listdir(basedir)] + filenames = list() + filenames = [os.path.join(basedir, f) for f in os.listdir(basedir)] filenames.sort() - if len(varlist)==0: + if len(varlist) == 0: # create set of variable names to load if they are present in files - choosevars={'Ammonia', 'Ammonium', 'Flag_Ammonium', 'Carbon_Dissolved_Organic', - 'Flag_Carbon_Dissolved_Organic', 'Carbon_Particulate_Organic', 'Carbon_Particulate_Total', - 'Flag_Carbon_Particulate_Total','Flag_Chlorophyll', 'Chlorophyll_Extracted', - 'Flag_Chlorophyll_Extracted', 'Chlorophyll_Extracted_gt0point7um', - 'Chlorophyll_Extracted_gt5point0um', 'Chlorophyll_plus_PhaeoPigment_Extracted', 'Date', - 'Depth', 'Depth_Nominal', 'Flag_Salinity', 'Flag_Salinity_Bottle', 'Flag_Silicate', - 'Flag_pH', 'Fluorescence_URU', 'Fluorescence_URU_Seapoint', 'Fluorescence_URU_Seatech', - 'Fluorescence_URU_Wetlabs', 'Latitude', 'Longitude', 'Nitrate', 'Flag_Nitrate', - 'Nitrate_plus_Nitrite', 'Flag_Nitrate_plus_Nitrite', 'Nitrate_plus_nitrite_ISUS', - 'Nitrate_plus_nitrite_ISUS_Voltage', 'Nitrite', 'Flag_Nitrite','Nitrogen_Dissolved_Organic', - 'Flag_Nitrogen_Dissolved_Organic', 'Nitrogen_Particulate_Organic','Nitrogen_Particulate_Total', - 'Flag_Nitrogen_Particulate_Total', 'Oxygen', 'Quality_Flag_Oxyg','Oxygen_Dissolved', - 'Flag_Oxygen_Dissolved', 'Oxygen_Dissolved_SBE', 'PAR', 'PAR_Reference', - 'PhaeoPigment_Extracted', 'Flag_PhaeoPigment_Extracted', 'Flag_Phaeophytin', 'Phosphate', - 'Flag_Phosphate','Quality_Flag_Phos', 'Phosphate(inorg)', 'Phytoplankton_Volume', 'Pressure', - 'Pressure_Reversing', 'Production_Primary', 'Quality_Flag_Nitr', 'Quality_Flag_Time', - 'Quality_Flag_Tota', 'Salinity', 'Salinity_Bottle', 'Salinity_T0_C0', 'Salinity_T1_C1', - 'Salinity__Pre1978','Quality_Flag_Sali','Salinity__Unknown', 'Sample_Method', 'Silicate', - 'Quality_Flag_Sili', 'Station', 'Temperature', 'Quality_Flag_Temp','Temperature_Draw', - 'Temperature_Primary','Temperature_Reversing', 'Temperature_Secondary', 'Time', 'Time_of_Obs', - 'Total_Phosphorus', 'Transmissivity', 'Turbidity_Seapoint'} + choosevars = { + "Ammonia", + "Ammonium", + "Flag_Ammonium", + "Carbon_Dissolved_Organic", + "Flag_Carbon_Dissolved_Organic", + "Carbon_Particulate_Organic", + "Carbon_Particulate_Total", + "Flag_Carbon_Particulate_Total", + "Flag_Chlorophyll", + "Chlorophyll_Extracted", + "Flag_Chlorophyll_Extracted", + "Chlorophyll_Extracted_gt0point7um", + "Chlorophyll_Extracted_gt5point0um", + "Chlorophyll_plus_PhaeoPigment_Extracted", + "Date", + "Depth", + "Depth_Nominal", + "Flag_Salinity", + "Flag_Salinity_Bottle", + "Flag_Silicate", + "Flag_pH", + "Fluorescence_URU", + "Fluorescence_URU_Seapoint", + "Fluorescence_URU_Seatech", + "Fluorescence_URU_Wetlabs", + "Latitude", + "Longitude", + "Nitrate", + "Flag_Nitrate", + "Nitrate_plus_Nitrite", + "Flag_Nitrate_plus_Nitrite", + "Nitrate_plus_nitrite_ISUS", + "Nitrate_plus_nitrite_ISUS_Voltage", + "Nitrite", + "Flag_Nitrite", + "Nitrogen_Dissolved_Organic", + "Flag_Nitrogen_Dissolved_Organic", + "Nitrogen_Particulate_Organic", + "Nitrogen_Particulate_Total", + "Flag_Nitrogen_Particulate_Total", + "Oxygen", + "Quality_Flag_Oxyg", + "Oxygen_Dissolved", + "Flag_Oxygen_Dissolved", + "Oxygen_Dissolved_SBE", + "PAR", + "PAR_Reference", + "PhaeoPigment_Extracted", + "Flag_PhaeoPigment_Extracted", + "Flag_Phaeophytin", + "Phosphate", + "Flag_Phosphate", + "Quality_Flag_Phos", + "Phosphate(inorg)", + "Phytoplankton_Volume", + "Pressure", + "Pressure_Reversing", + "Production_Primary", + "Quality_Flag_Nitr", + "Quality_Flag_Time", + "Quality_Flag_Tota", + "Salinity", + "Salinity_Bottle", + "Salinity_T0_C0", + "Salinity_T1_C1", + "Salinity__Pre1978", + "Quality_Flag_Sali", + "Salinity__Unknown", + "Sample_Method", + "Silicate", + "Quality_Flag_Sili", + "Station", + "Temperature", + "Quality_Flag_Temp", + "Temperature_Draw", + "Temperature_Primary", + "Temperature_Reversing", + "Temperature_Secondary", + "Time", + "Time_of_Obs", + "Total_Phosphorus", + "Transmissivity", + "Turbidity_Seapoint", + } else: - chosevars=varlist - varlistu=choosevars | {x+'_units' for x in choosevars if not re.search('Flag', x)} - + chosevars = varlist + varlistu = choosevars | { + x + "_units" for x in choosevars if not re.search("Flag", x) + } # create function that returns datatype for a given field name def coltype(ikey): typedict = { - 'Date': str, - 'Sample_Method': str, - 'Station': str, - 'Time': str, - 'Time_of_Obs.': str, + "Date": str, + "Sample_Method": str, + "Station": str, + "Time": str, + "Time_of_Obs.": str, } for varn in varlistu: - if (re.search('Flag', varn) or varn in varlistu-choosevars): - typedict[varn]=str - return typedict.get(ikey, float) # 2nd argument is default value returned if ikey not in typedict + if re.search("Flag", varn) or varn in varlistu - choosevars: + typedict[varn] = str + return typedict.get( + ikey, float + ) # 2nd argument is default value returned if ikey not in typedict # define Table Classes: - dfStation=pd.DataFrame(columns=('ID','STATION','EVENT NUMBER','LATITUDE','Lat','LONGITUDE','Lon','WATER DEPTH', - 'WDIR', 'WSPD','START TIME','StartDay','StartMonth','StartYear','StartHour','StartTimeZone', - 'DATA DESCRIPTION','MISSION','AGENCY','COUNTRY','PROJECT','SCIENTIST','PLATFORM','sourceFile')) - tdictSta={'ID':int,'STATION':str,'EVENT NUMBER':str,'LATITUDE':str,'Lat':float,'LONGITUDE':str,'Lon':float,'WATER DEPTH':float, - 'WDIR':float, 'WSPD':float,'START TIME':str,'StartDay':int,'StartMonth':int,'StartYear':int,'StartHour':float, - 'StartTimeZone':str,'DATA DESCRIPTION':str,'MISSION':str,'AGENCY':str,'COUNTRY':str,'PROJECT':str, - 'SCIENTIST':str,'PLATFORM':str,'sourceFile':str,'StationTBLID':int} - dfObs=pd.DataFrame(columns=list(('ID','sourceFile','StationTBLID',))+list([cname for cname in varlistu])) - tdictObs={'ID':int,'sourceFile':str,'StationTBLID':int} + dfStation = pd.DataFrame( + columns=( + "ID", + "STATION", + "EVENT NUMBER", + "LATITUDE", + "Lat", + "LONGITUDE", + "Lon", + "WATER DEPTH", + "WDIR", + "WSPD", + "START TIME", + "StartDay", + "StartMonth", + "StartYear", + "StartHour", + "StartTimeZone", + "DATA DESCRIPTION", + "MISSION", + "AGENCY", + "COUNTRY", + "PROJECT", + "SCIENTIST", + "PLATFORM", + "sourceFile", + ) + ) + tdictSta = { + "ID": int, + "STATION": str, + "EVENT NUMBER": str, + "LATITUDE": str, + "Lat": float, + "LONGITUDE": str, + "Lon": float, + "WATER DEPTH": float, + "WDIR": float, + "WSPD": float, + "START TIME": str, + "StartDay": int, + "StartMonth": int, + "StartYear": int, + "StartHour": float, + "StartTimeZone": str, + "DATA DESCRIPTION": str, + "MISSION": str, + "AGENCY": str, + "COUNTRY": str, + "PROJECT": str, + "SCIENTIST": str, + "PLATFORM": str, + "sourceFile": str, + "StationTBLID": int, + } + dfObs = pd.DataFrame( + columns=list( + ( + "ID", + "sourceFile", + "StationTBLID", + ) + ) + + list([cname for cname in varlistu]) + ) + tdictObs = {"ID": int, "sourceFile": str, "StationTBLID": int} for cname in varlistu: - tdictObs[cname]=coltype(cname) + tdictObs[cname] = coltype(cname) - stationNo=0 - obsNo=0 + stationNo = 0 + obsNo = 0 for ifile in filenames: - stationNo+=1 - sourceFile=re.search('\/ocean\/eolson\/MEOPAR\/obs\/(.*)', ifile).group(1) - fout.write(sourceFile+'\n') - varNames={} - varLens={} - varUnits={} - stationData={} - stationData['ID']=stationNo - stationData['sourceFile']=sourceFile - with open(ifile, 'rt', encoding = "ISO-8859-1") as f: - infile=False - invars=False - indetail=False - inadmin=False - inloc=False - indata=False - detformat=False + stationNo += 1 + sourceFile = re.search("\/ocean\/eolson\/MEOPAR\/obs\/(.*)", ifile).group(1) + fout.write(sourceFile + "\n") + varNames = {} + varLens = {} + varUnits = {} + stationData = {} + stationData["ID"] = stationNo + stationData["sourceFile"] = sourceFile + with open(ifile, "rt", encoding="ISO-8859-1") as f: + infile = False + invars = False + indetail = False + inadmin = False + inloc = False + indata = False + detformat = False for line in f: if infile: - if re.match('\s*\$', line) or len(line)==0: - infile=False + if re.match("\s*\$", line) or len(line) == 0: + infile = False else: - splitline=re.split('\s*\:\s*',line.strip(), maxsplit=1) - if re.match('START TIME',splitline[0]): - stationData['START TIME']=splitline[1] - splits=re.split('\s* \s*',splitline[1]) - stationData['StartTimeZone']=splits[0] - date=splits[1] - time=splits[2] - stationData['StartYear']=date[0:4] - stationData['StartMonth']=date[5:7] - stationData['StartDay']=date[8:] - splitTime=re.split('\:',time) - stationData['StartHour']=float(splitTime[0])+float(splitTime[1])/60.0+float(splitTime[2])/3600.0 - elif re.match('DATA DESCRIPTION',splitline[0]): - stationData['DATA DESCRIPTION']=splitline[1] + splitline = re.split("\s*\:\s*", line.strip(), maxsplit=1) + if re.match("START TIME", splitline[0]): + stationData["START TIME"] = splitline[1] + splits = re.split("\s* \s*", splitline[1]) + stationData["StartTimeZone"] = splits[0] + date = splits[1] + time = splits[2] + stationData["StartYear"] = date[0:4] + stationData["StartMonth"] = date[5:7] + stationData["StartDay"] = date[8:] + splitTime = re.split("\:", time) + stationData["StartHour"] = ( + float(splitTime[0]) + + float(splitTime[1]) / 60.0 + + float(splitTime[2]) / 3600.0 + ) + elif re.match("DATA DESCRIPTION", splitline[0]): + stationData["DATA DESCRIPTION"] = splitline[1] if invars: - if re.search('\$END', line): - invars=False + if re.search("\$END", line): + invars = False else: - test=re.findall("'.*?'",line) # (.*? matches anything but chooses min len match - not greedy) + test = re.findall( + "'.*?'", line + ) # (.*? matches anything but chooses min len match - not greedy) for expr in test: - line=re.sub(re.escape(expr),re.sub(' ','_',expr),line) # remove spaces from items in quotes - splitline=re.split('\s* \s*',line.strip()) - if re.match('[0-9]', splitline[0]): - varnum=int(splitline[0]) - cvar=splitline[1] - cvar = re.sub('(?<=[0-9])*\.(?=[0-9])','point',cvar) # decimal points -> point - cvar = re.sub('\-','',cvar) # remove - from column names - cvar = re.sub('\:','_',cvar) # replace : with _ - cvar = re.sub('\>','gt',cvar) # replace > with gt - cvar = re.sub('\<','lt',cvar) # replace < with lt - cvar = re.sub('(\'|\.)','',cvar) # remove special characters (' and .) + line = re.sub( + re.escape(expr), re.sub(" ", "_", expr), line + ) # remove spaces from items in quotes + splitline = re.split("\s* \s*", line.strip()) + if re.match("[0-9]", splitline[0]): + varnum = int(splitline[0]) + cvar = splitline[1] + cvar = re.sub( + "(?<=[0-9])*\.(?=[0-9])", "point", cvar + ) # decimal points -> point + cvar = re.sub("\-", "", cvar) # remove - from column names + cvar = re.sub("\:", "_", cvar) # replace : with _ + cvar = re.sub("\>", "gt", cvar) # replace > with gt + cvar = re.sub("\<", "lt", cvar) # replace < with lt + cvar = re.sub( + "('|\.)", "", cvar + ) # remove special characters (' and .) cunits = splitline[2].strip() - varNames[varnum]=cvar - varUnits[varnum]=cunits + varNames[varnum] = cvar + varUnits[varnum] = cunits elif indetail: - detcount+=1 - if re.search('\$END', line): - indetail=False - elif (detcount==1 and re.match('\s*\!\s*No\s*Pad\s*Start\s*Width', line)): - detformat=True + detcount += 1 + if re.search("\$END", line): + indetail = False + elif detcount == 1 and re.match( + "\s*\!\s*No\s*Pad\s*Start\s*Width", line + ): + detformat = True else: - if (detformat and not re.match('\s*\!',line)): - test=re.findall("'.*?'",line) # (.*? matches anything but chooses min len match - not greedy) + if detformat and not re.match("\s*\!", line): + test = re.findall( + "'.*?'", line + ) # (.*? matches anything but chooses min len match - not greedy) for expr in test: - line=re.sub(re.escape(expr),re.sub(' ','_',expr),line) # remove spaces from items in quotes - splitline=re.split('\s* \s*',line.strip()) - varnum=int(splitline[0]) + line = re.sub( + re.escape(expr), re.sub(" ", "_", expr), line + ) # remove spaces from items in quotes + splitline = re.split("\s* \s*", line.strip()) + varnum = int(splitline[0]) try: - varwid=int(splitline[3]) + varwid = int(splitline[3]) except: - detformat=False - varLens[varnum]=varwid + detformat = False + varLens[varnum] = varwid elif inadmin: - if len(line)==0: - inadmin=False + if len(line) == 0: + inadmin = False else: - splitline=re.split('\s*\:\s*',line.strip(), maxsplit=1) - if re.match('MISSION',splitline[0]): - stationData['MISSION']=splitline[1] - elif re.match('AGENCY',splitline[0]): - stationData['AGENCY']=splitline[1] - elif re.match('COUNTRY',splitline[0]): - stationData['COUNTRY']=splitline[1] - elif re.match('PROJECT',splitline[0]): - stationData['PROJECT']=splitline[1] - elif re.match('SCIENTIST',splitline[0]): - stationData['SCIENTIST']=splitline[1] - elif re.match('PLATFORM',splitline[0]): - stationData['PLATFORM']=splitline[1] + splitline = re.split("\s*\:\s*", line.strip(), maxsplit=1) + if re.match("MISSION", splitline[0]): + stationData["MISSION"] = splitline[1] + elif re.match("AGENCY", splitline[0]): + stationData["AGENCY"] = splitline[1] + elif re.match("COUNTRY", splitline[0]): + stationData["COUNTRY"] = splitline[1] + elif re.match("PROJECT", splitline[0]): + stationData["PROJECT"] = splitline[1] + elif re.match("SCIENTIST", splitline[0]): + stationData["SCIENTIST"] = splitline[1] + elif re.match("PLATFORM", splitline[0]): + stationData["PLATFORM"] = splitline[1] elif inloc: - if len(line)==0: - inloc=False + if len(line) == 0: + inloc = False else: - splitline=re.split('\s*\:\s*',line.strip(), maxsplit=1) - if re.match('STATION',splitline[0]): + splitline = re.split("\s*\:\s*", line.strip(), maxsplit=1) + if re.match("STATION", splitline[0]): try: - stationData['STATION']=splitline[1] + stationData["STATION"] = splitline[1] except: print(line) - return() - elif re.match('EVENT NUMBER',splitline[0]): - stationData['EVENT NUMBER']=splitline[1] - elif re.match('LATITUDE',splitline[0]): - stationData['LATITUDE']=splitline[1] - latparts=re.split('\s* \s*', splitline[1]) - signdict={'N':1,'E':1,'S':-1,'W':-1} - staLat=signdict[latparts[2]]*(float(latparts[0])+float(latparts[1])/60.0) - stationData['Lat']=staLat - elif re.match('LONGITUDE',splitline[0]): - stationData['LONGITUDE']=splitline[1] - lonparts=re.split('\s* \s*', splitline[1]) - signdict={'N':1,'E':1,'S':-1,'W':-1} - staLon=signdict[lonparts[2]]*(float(lonparts[0])+float(lonparts[1])/60.0) - stationData['Lon']=staLon - elif re.match('WATER DEPTH',splitline[0]): - stationData['WATER DEPTH']=splitline[1] - elif re.match('WDIR',splitline[0]): - stationData['WDIR']=re.split('\s* \s*',splitline[1])[0] - elif re.match('WSPD',splitline[0]): - stationData['WSPD']=re.split('\s* \s*',splitline[1])[0] - elif (indata and len(line)!=0 and not re.match('\s*\!',line)): + return () + elif re.match("EVENT NUMBER", splitline[0]): + stationData["EVENT NUMBER"] = splitline[1] + elif re.match("LATITUDE", splitline[0]): + stationData["LATITUDE"] = splitline[1] + latparts = re.split("\s* \s*", splitline[1]) + signdict = {"N": 1, "E": 1, "S": -1, "W": -1} + staLat = signdict[latparts[2]] * ( + float(latparts[0]) + float(latparts[1]) / 60.0 + ) + stationData["Lat"] = staLat + elif re.match("LONGITUDE", splitline[0]): + stationData["LONGITUDE"] = splitline[1] + lonparts = re.split("\s* \s*", splitline[1]) + signdict = {"N": 1, "E": 1, "S": -1, "W": -1} + staLon = signdict[lonparts[2]] * ( + float(lonparts[0]) + float(lonparts[1]) / 60.0 + ) + stationData["Lon"] = staLon + elif re.match("WATER DEPTH", splitline[0]): + stationData["WATER DEPTH"] = splitline[1] + elif re.match("WDIR", splitline[0]): + stationData["WDIR"] = re.split("\s* \s*", splitline[1])[0] + elif re.match("WSPD", splitline[0]): + stationData["WSPD"] = re.split("\s* \s*", splitline[1])[0] + elif indata and len(line) != 0 and not re.match("\s*\!", line): if detformat: - varVals={} - istart=0 - for ii in range(1,1+max(varNames.keys())): - varVal=line[istart:(istart+varLens[ii])] - istart+=varLens[ii] + varVals = {} + istart = 0 + for ii in range(1, 1 + max(varNames.keys())): + varVal = line[istart : (istart + varLens[ii])] + istart += varLens[ii] if varNames[ii] in varlistu: - varVals[varNames[ii]]=varVal.strip() - if varNames[ii]+'_units' in varlistu: - varVals[varNames[ii]+'_units']=varUnits[ii] - varVals['StationTBLID']=stationNo - varVals['sourceFile']=sourceFile - varVals['ID']=obsNo - obsNo=obsNo+1 - #SEND TO DATABASE - #session.execute(ObsTBL.__table__.insert().values(**varVals)) + varVals[varNames[ii]] = varVal.strip() + if varNames[ii] + "_units" in varlistu: + varVals[varNames[ii] + "_units"] = varUnits[ii] + varVals["StationTBLID"] = stationNo + varVals["sourceFile"] = sourceFile + varVals["ID"] = obsNo + obsNo = obsNo + 1 + # SEND TO DATABASE + # session.execute(ObsTBL.__table__.insert().values(**varVals)) for sel in varVals.keys(): - varVals[sel]=tdictObs[sel](varVals[sel]) - for sel in set(dfObs.keys())-set(varVals.keys()): - varVals[sel]=np.nan - dfObs.loc[varVals['ID']]=varVals + varVals[sel] = tdictObs[sel](varVals[sel]) + for sel in set(dfObs.keys()) - set(varVals.keys()): + varVals[sel] = np.nan + dfObs.loc[varVals["ID"]] = varVals else: - varVals={} - splitline=re.split('\s*\ \s*',line.strip()) - if len(splitline)==max(varNames.keys()): - for ii in range(1,1+max(varNames.keys())): + varVals = {} + splitline = re.split("\s*\ \s*", line.strip()) + if len(splitline) == max(varNames.keys()): + for ii in range(1, 1 + max(varNames.keys())): if varNames[ii] in varlistu: - varVals[varNames[ii]]=splitline[ii-1].strip() - if varNames[ii]+'_units' in varlistu: - varVals[varNames[ii]+'_units']=varUnits[ii] - varVals['StationTBLID']=stationNo - varVals['sourceFile']=sourceFile - varVals['ID']=obsNo - obsNo=obsNo+1 - #SEND TO DATABASE - #session.execute(ObsTBL.__table__.insert().values(**varVals)) + varVals[varNames[ii]] = splitline[ii - 1].strip() + if varNames[ii] + "_units" in varlistu: + varVals[varNames[ii] + "_units"] = varUnits[ii] + varVals["StationTBLID"] = stationNo + varVals["sourceFile"] = sourceFile + varVals["ID"] = obsNo + obsNo = obsNo + 1 + # SEND TO DATABASE + # session.execute(ObsTBL.__table__.insert().values(**varVals)) for sel in varVals.keys(): - varVals[sel]=tdictObs[sel](varVals[sel]) - for sel in set(dfObs.keys())-set(varVals.keys()): - varVals[sel]=np.nan - dfObs.loc[varVals['ID']]=varVals + varVals[sel] = tdictObs[sel](varVals[sel]) + for sel in set(dfObs.keys()) - set(varVals.keys()): + varVals[sel] = np.nan + dfObs.loc[varVals["ID"]] = varVals else: - ferr.write('ERROR: filename:'+sourceFile+' line:'+line) - if re.match('![- ]*$',line): - tem=re.search('(?<=\!)[- ]*$',line) - splitline=re.split(r'\s',tem.group(0)) - for ii in range(1, 1+len(splitline)): - varLens[ii]=len(splitline[ii-1])+1 - detformat=True - if re.search('\*FILE', line): - infile=True - if re.search('\$TABLE\: CHANNELS', line): - invars=True - if re.search('\$TABLE\: CHANNEL DETAIL', line): - indetail=True - detcount=0 - if re.search('\*ADMINISTRATION', line): - inadmin=True - if re.search('\*LOCATION', line): - inloc=True - inadmin=False - if re.search('\*END OF HEADER', line): - indata=True - inloc=False - if re.search('\$END',line): - inloc=False + ferr.write( + "ERROR: filename:" + sourceFile + " line:" + line + ) + if re.match("![- ]*$", line): + tem = re.search("(?<=\!)[- ]*$", line) + splitline = re.split(r"\s", tem.group(0)) + for ii in range(1, 1 + len(splitline)): + varLens[ii] = len(splitline[ii - 1]) + 1 + detformat = True + if re.search("\*FILE", line): + infile = True + if re.search("\$TABLE\: CHANNELS", line): + invars = True + if re.search("\$TABLE\: CHANNEL DETAIL", line): + indetail = True + detcount = 0 + if re.search("\*ADMINISTRATION", line): + inadmin = True + if re.search("\*LOCATION", line): + inloc = True + inadmin = False + if re.search("\*END OF HEADER", line): + indata = True + inloc = False + if re.search("\$END", line): + inloc = False # SEND TO DATABASE (at file level) for sel in stationData.keys(): - stationData[sel]=tdictSta[sel](stationData[sel]) - for sel in set(dfStation.keys())-set(stationData.keys()): - stationData[sel]=np.nan - dfStation.loc[stationData['ID']]=stationData - #session.execute(StationTBL.__table__.insert().values(**stationData)) + stationData[sel] = tdictSta[sel](stationData[sel]) + for sel in set(dfStation.keys()) - set(stationData.keys()): + stationData[sel] = np.nan + dfStation.loc[stationData["ID"]] = stationData + # session.execute(StationTBL.__table__.insert().values(**stationData)) fout.close() ferr.close() return dfStation, dfObs diff --git a/SalishSeaTools/salishsea_tools/LiveOcean_BCs.py b/SalishSeaTools/salishsea_tools/LiveOcean_BCs.py index e1f2d28a..4bfc1d47 100644 --- a/SalishSeaTools/salishsea_tools/LiveOcean_BCs.py +++ b/SalishSeaTools/salishsea_tools/LiveOcean_BCs.py @@ -47,9 +47,9 @@ def load_SalishSea_boundary_grid(imin, imax, rim, meshfilename): """ with nc.Dataset(meshfilename) as meshfile: - lonBC = meshfile.variables['nav_lon'][imin:imax, 1:rim + 1] - latBC = meshfile.variables['nav_lat'][imin:imax, 1:rim + 1] - depBC = meshfile.variables['gdept_1d'][0] + lonBC = meshfile.variables["nav_lon"][imin:imax, 1 : rim + 1] + latBC = meshfile.variables["nav_lat"][imin:imax, 1 : rim + 1] + depBC = meshfile.variables["gdept_1d"][0] shape = lonBC.shape @@ -57,9 +57,7 @@ def load_SalishSea_boundary_grid(imin, imax, rim, meshfilename): def load_LiveOcean( - date, - LO_dir='/results/forcing/LiveOcean/downloaded/', - LO_file='low_passed_UBC.nc' + date, LO_dir="/results/forcing/LiveOcean/downloaded/", LO_file="low_passed_UBC.nc" ): """Load a time series of Live Ocean results represented by a date, location and filename @@ -76,8 +74,8 @@ def load_LiveOcean( :returns: xarray dataset of Live Ocean results """ # Choose file and load - sdt = datetime.datetime.strptime(date, '%Y-%m-%d') - file = os.path.join(LO_dir, sdt.strftime('%Y%m%d'), LO_file) + sdt = datetime.datetime.strptime(date, "%Y-%m-%d") + file = os.path.join(LO_dir, sdt.strftime("%Y%m%d"), LO_file) T = grid.get_basic_info(file, only_T=True) # note: grid.py is from Parker d = xr.open_dataset(file) @@ -85,10 +83,8 @@ def load_LiveOcean( return d -def interpolate_to_NEMO_depths( - dataset, depBC, var_names -): - """ Interpolate variables in var_names from a Live Ocean dataset to NEMO +def interpolate_to_NEMO_depths(dataset, depBC, var_names): + """Interpolate variables in var_names from a Live Ocean dataset to NEMO depths. LiveOcean land points (including points lower than bathymetry) are set to np.nan and then masked. @@ -106,15 +102,18 @@ def interpolate_to_NEMO_depths( """ interps = {} for var_name in var_names: - var_interp = np.zeros((depBC.shape[0], dataset[var_name][0, 0].shape[0], - dataset[var_name][0, 0].shape[1])) + var_interp = np.zeros( + ( + depBC.shape[0], + dataset[var_name][0, 0].shape[0], + dataset[var_name][0, 0].shape[1], + ) + ) for j in range(var_interp.shape[1]): for i in range(var_interp.shape[2]): LO_depths = dataset.z_rho.values[0, :, j, i] var = dataset[var_name].values[0, :, j, i] - var_interp[:, j, i] = np.interp( - -depBC, LO_depths, var, left=np.nan - ) + var_interp[:, j, i] = np.interp(-depBC, LO_depths, var, left=np.nan) # NEMO depths are positive, LiveOcean are negative interps[var_name] = np.ma.masked_invalid(var_interp) @@ -171,9 +170,9 @@ def fill_box(interps, maxk=35): x1 = xx[~array.mask] y1 = yy[~array.mask] newarr = array[~array.mask] - interps[var][k] = interpolate.griddata((x1, y1), - newarr.ravel(), (xx, yy), - method='nearest') + interps[var][k] = interpolate.griddata( + (x1, y1), newarr.ravel(), (xx, yy), method="nearest" + ) return interps @@ -203,16 +202,18 @@ def convect(sigma, interps): if sigma[k, i, j] > sigma[k + 1, i, j]: good = False for var in var_names: - interps[var][k, i, j], interps[var][ - k + 1, i, j - ] = interps[var][k + 1, i, j], interps[var][k, i, j - ] - sigma[k, i, j], sigma[k + 1, i, j] = sigma[ - k + 1, i, j - ], sigma[k, i, j] + interps[var][k, i, j], interps[var][k + 1, i, j] = ( + interps[var][k + 1, i, j], + interps[var][k, i, j], + ) + sigma[k, i, j], sigma[k + 1, i, j] = ( + sigma[k + 1, i, j], + sigma[k, i, j], + ) return sigma, interps + def stabilize(sigma, interps): """Add a little salt to stabilize marginally stable cells @@ -228,15 +229,15 @@ def stabilize(sigma, interps): """ small = 0.01 # stabilize for delta sigma less than this - kl = 25 # stabilize for low delta sigma higher than this + kl = 25 # stabilize for low delta sigma higher than this add_salt = 0.01 # add this much salt kmax, imax, jmax = sigma.shape for k in range(kl - 1): for i in range(imax): for j in range(jmax): - if sigma[k+1, i, j] - sigma[k, i, j] < small: - interps['salt'][:k+1, i, j] += -add_salt / (k+1) - interps['salt'][k+1:, i, j] += add_salt / (kmax - k+1) + if sigma[k + 1, i, j] - sigma[k, i, j] < small: + interps["salt"][: k + 1, i, j] += -add_salt / (k + 1) + interps["salt"][k + 1 :, i, j] += add_salt / (kmax - k + 1) return interps @@ -295,8 +296,7 @@ def interpolate_to_NEMO_lateral(interps, dataset, NEMOlon, NEMOlat, shape): for k in range(var_new.shape[0]): var_grid = interps[var][k, :, :].ravel() var_new[k, ...] = interpolate.griddata( - (lonsLO, latsLO), var_grid, - (NEMOlon, NEMOlat), method='linear' + (lonsLO, latsLO), var_grid, (NEMOlon, NEMOlat), method="linear" ) interpl[var] = var_new return interpl @@ -328,6 +328,7 @@ def calculate_Si_from_NO3(NO3, SA, a=6.46, b=1.35, c=0, sigma=1, tsa=29): return Si + def correct_high_NO3(NO3, smax=100, nmax=120): """Correct LiveOcean nitrates that are higher than smax, so that the largest nitrate is nmax. Defaults cause no correction. @@ -343,10 +344,10 @@ def correct_high_NO3(NO3, smax=100, nmax=120): :returns: a 3-D array of corrected nitrate values""" - #correction = np.array([(nitrate - smax) if nitrate > smax else 0 for + # correction = np.array([(nitrate - smax) if nitrate > smax else 0 for # nitrate in NO3]) correction = NO3 - smax - correction[NO3 < smax] = 0. + correction[NO3 < smax] = 0.0 newnitrate = NO3 - correction * correction / (correction + nmax - smax) return newnitrate @@ -379,69 +380,61 @@ def prepare_dataset(interpl, var_meta, LO_to_NEMO_var_map, depBC, time): # Add some global attributes ds_attrs = { - 'acknowledgements': - 'Live Ocean https://faculty.washington.edu/pmacc/LO/LiveOcean.html', - 'creator_email': - 'sallen@eoas.ubc.ca', - 'creator_name': - 'Salish Sea MEOPAR Project Contributors', - 'creator_url': - 'https://salishsea-meopar-docs.readthedocs.org/', - 'institution': - 'UBC EOAS', - 'institution_fullname': ( - 'Earth, Ocean & Atmospheric Sciences,' - ' University of British Columbia' + "acknowledgements": "Live Ocean https://faculty.washington.edu/pmacc/LO/LiveOcean.html", + "creator_email": "sallen@eoas.ubc.ca", + "creator_name": "Salish Sea MEOPAR Project Contributors", + "creator_url": "https://salishsea-meopar-docs.readthedocs.org/", + "institution": "UBC EOAS", + "institution_fullname": ( + "Earth, Ocean & Atmospheric Sciences," " University of British Columbia" ), - 'summary': ( - 'Temperature, Salinity, Nitrate, Oxygen, DIC and TALK' - 'from the Live Ocean model' - ' interpolated in space onto the Salish Sea NEMO Model' - ' western open boundary. Silicon from Nitrate.' + "summary": ( + "Temperature, Salinity, Nitrate, Oxygen, DIC and TALK" + "from the Live Ocean model" + " interpolated in space onto the Salish Sea NEMO Model" + " western open boundary. Silicon from Nitrate." ), - 'source': ( - 'https://nbviewer.org/urls/bitbucket.org/' - 'salishsea/.../LiveOceanNew' + "source": ( + "https://nbviewer.org/urls/bitbucket.org/" "salishsea/.../LiveOceanNew" + ), + "history": ( + "[{}] File creation.".format(datetime.datetime.today().strftime("%Y-%m-%d")) ), - 'history': ( - '[{}] File creation.' - .format(datetime.datetime.today().strftime('%Y-%m-%d')) - ) } da = {} - var_names = (var for var in interpl.keys() if var != 'NH4') + var_names = (var for var in interpl.keys() if var != "NH4") for var in var_names: da[var] = xr.DataArray( data=interpl[var], name=LO_to_NEMO_var_map[var], - dims=('time_counter', 'deptht', 'yb', 'xbT'), + dims=("time_counter", "deptht", "yb", "xbT"), coords={ - 'time_counter': time, - 'deptht': depBC, - 'yb': [1], - 'xbT': np.arange(interpl[var].shape[3]) + "time_counter": time, + "deptht": depBC, + "yb": [1], + "xbT": np.arange(interpl[var].shape[3]), }, - attrs=var_meta[LO_to_NEMO_var_map[var]] + attrs=var_meta[LO_to_NEMO_var_map[var]], ) ds = xr.Dataset( data_vars={ - 'vosaline': da['salt'], - 'votemper': da['temp'], - 'NO3': da['NO3'], - 'Si': da['Si'], - 'OXY': da['oxygen'], - 'DIC': da['TIC'], - 'TA': da['alkalinity'] + "vosaline": da["salt"], + "votemper": da["temp"], + "NO3": da["NO3"], + "Si": da["Si"], + "OXY": da["oxygen"], + "DIC": da["TIC"], + "TA": da["alkalinity"], }, coords={ - 'time_counter': time, - 'deptht': depBC, - 'yb': [1], - 'xbT': np.arange(interpl['salt'].shape[3]) + "time_counter": time, + "deptht": depBC, + "yb": [1], + "xbT": np.arange(interpl["salt"].shape[3]), }, - attrs=ds_attrs + attrs=ds_attrs, ) return ds @@ -464,18 +457,18 @@ def write_out_file(ds, date, file_template, bc_dir): :type bc_dir: str """ - sdt = datetime.datetime.strptime(date, '%Y-%m-%d') + sdt = datetime.datetime.strptime(date, "%Y-%m-%d") filename = file_template.format(sdt) filepath = os.path.join(bc_dir, filename) - encoding = {var: {'zlib': True} for var in ds.data_vars} - encoding['time_counter'] = {'units': 'minutes since 1970-01-01 00:00'} + encoding = {var: {"zlib": True} for var in ds.data_vars} + encoding["time_counter"] = {"units": "minutes since 1970-01-01 00:00"} ds.to_netcdf( path=filepath, - unlimited_dims=('time_counter'), + unlimited_dims=("time_counter"), encoding=encoding, ) - logger.debug('Saved {}'.format(filename)) + logger.debug("Saved {}".format(filename)) return filepath @@ -485,17 +478,17 @@ def write_out_file(ds, date, file_template, bc_dir): def create_LiveOcean_TS_BCs( date, - file_template='LiveOcean_v201712_{:y%Ym%md%d}.nc', - meshfilename='/results/nowcast-sys/grid/mesh_mask201702.nc', - bc_dir='/results/forcing/LiveOcean/boundary_conditions/', - LO_dir='/results/forcing/LiveOcean/downloaded/', - LO_to_SSC_parameters = {'NO3': {'smax' : 100., - 'nmax' : 120.,}, - 'Si' : {'a' : 6.46, - 'b' : 1.35, - 'c' : 0., - 'sigma' : 1., - 'tsa' : 29}} + file_template="LiveOcean_v201712_{:y%Ym%md%d}.nc", + meshfilename="/results/nowcast-sys/grid/mesh_mask201702.nc", + bc_dir="/results/forcing/LiveOcean/boundary_conditions/", + LO_dir="/results/forcing/LiveOcean/downloaded/", + LO_to_SSC_parameters={ + "NO3": { + "smax": 100.0, + "nmax": 120.0, + }, + "Si": {"a": 6.46, "b": 1.35, "c": 0.0, "sigma": 1.0, "tsa": 29}, + }, ): """Create a Live Ocean boundary condition file for date for use in the NEMO model. @@ -519,54 +512,37 @@ def create_LiveOcean_TS_BCs( # Create metadeta for temperature and salinity # (Live Ocean variables, NEMO grid) var_meta = { - 'vosaline': { - 'grid': 'SalishSea2', - 'long_name': 'Practical Salinity', - 'units': 'psu' + "vosaline": { + "grid": "SalishSea2", + "long_name": "Practical Salinity", + "units": "psu", }, - 'votemper': { - 'grid': 'SalishSea2', - 'long_name': 'Potential Temperature', - 'units': 'deg C' + "votemper": { + "grid": "SalishSea2", + "long_name": "Potential Temperature", + "units": "deg C", }, - 'NO3': { - 'grid': 'SalishSea2', - 'long_name': 'Nitrate', - 'units': 'muM' + "NO3": {"grid": "SalishSea2", "long_name": "Nitrate", "units": "muM"}, + "Si": {"grid": "SalishSea2", "long_name": "Dissolved Silicon", "units": "muM"}, + "OXY": {"grid": "SalishSea2", "long_name": "Oxygen", "units": "muM"}, + "DIC": { + "grid": "SalishSea2", + "long_name": "Dissolved Inorganic Carbon", + "units": "muM", }, - 'Si': { - 'grid': 'SalishSea2', - 'long_name': 'Dissolved Silicon', - 'units': 'muM' - }, - 'OXY': { - 'grid': 'SalishSea2', - 'long_name': 'Oxygen', - 'units': 'muM' - }, - 'DIC': { - 'grid': 'SalishSea2', - 'long_name': 'Dissolved Inorganic Carbon', - 'units': 'muM' - }, - 'TA': { - 'grid': 'SalishSea2', - 'long_name': 'Total Alkalinity', - 'units': 'muM' - }, - + "TA": {"grid": "SalishSea2", "long_name": "Total Alkalinity", "units": "muM"}, } # Mapping from LiveOcean TS names to NEMO TS names LO_to_NEMO_var_map = { - 'salt': 'vosaline', - 'temp': 'votemper', - 'NO3': 'NO3', - 'NH4': 'NH4', - 'Si': 'Si', - 'oxygen': 'OXY', - 'TIC': 'DIC', - 'alkalinity': 'TA', + "salt": "vosaline", + "temp": "votemper", + "NO3": "NO3", + "NH4": "NH4", + "Si": "Si", + "oxygen": "OXY", + "TIC": "DIC", + "alkalinity": "TA", } # Load BC information @@ -578,11 +554,13 @@ def create_LiveOcean_TS_BCs( d = load_LiveOcean(date, LO_dir) # Depth interpolation - interps = interpolate_to_NEMO_depths(d, depBC, var_names=(var for var in LO_to_NEMO_var_map if var != 'Si')) + interps = interpolate_to_NEMO_depths( + d, depBC, var_names=(var for var in LO_to_NEMO_var_map if var != "Si") + ) # Change to TEOS-10 - var_meta, interps['salt'], interps['temp'] = _convert_TS_to_TEOS10( - var_meta, interps['salt'], interps['temp'] + var_meta, interps["salt"], interps["temp"] = _convert_TS_to_TEOS10( + var_meta, interps["salt"], interps["temp"] ) # Remove South of Tatoosh @@ -592,7 +570,7 @@ def create_LiveOcean_TS_BCs( interps = fill_box(interps) # Calculate the density (sigma) and convect - sigma = gsw.sigma0(interps['salt'][:], interps['temp'][:]) + sigma = gsw.sigma0(interps["salt"][:], interps["temp"][:]) sigma, interps = convect(sigma, interps) # Fill Live Ocean Vertically @@ -602,7 +580,7 @@ def create_LiveOcean_TS_BCs( interpl = interpolate_to_NEMO_lateral(interps, d, lonBC, latBC, shape) # Convect Again - sigmal = gsw.sigma0(interpl['salt'][:], interpl['temp'][:]) + sigmal = gsw.sigma0(interpl["salt"][:], interpl["temp"][:]) sigmal, interpl = convect(sigmal, interpl) interpl = stabilize(sigmal, interpl) @@ -610,31 +588,31 @@ def create_LiveOcean_TS_BCs( for var in interpl.keys(): interpl[var] = np.swapaxes(interpl[var], 1, 2) interpl[var] = interpl[var].reshape( - 1, interpl[var].shape[0], 1, - interpl[var].shape[2] * interpl[var].shape[1] + 1, interpl[var].shape[0], 1, interpl[var].shape[2] * interpl[var].shape[1] ) # Due to change in LiveOcean (May 22, 2023) add NH4 to NO3 to # preserve previous behaviour (note LiveOcean NH4+NO3 evaluates # better than NO3 against NO3 obs) - interpl['NO3'] = interpl['NO3'] + interpl['NH4'] + interpl["NO3"] = interpl["NO3"] + interpl["NH4"] # Calculate Si from NO3 using LiveOcean nitrate - interpl['Si'] = calculate_Si_from_NO3( - interpl['NO3'], interpl['salt'], - a=LO_to_SSC_parameters['Si']['a'], - b=LO_to_SSC_parameters['Si']['b'], - c=LO_to_SSC_parameters['Si']['c'], - sigma=LO_to_SSC_parameters['Si']['sigma'], - tsa=LO_to_SSC_parameters['Si']['tsa'] - ) + interpl["Si"] = calculate_Si_from_NO3( + interpl["NO3"], + interpl["salt"], + a=LO_to_SSC_parameters["Si"]["a"], + b=LO_to_SSC_parameters["Si"]["b"], + c=LO_to_SSC_parameters["Si"]["c"], + sigma=LO_to_SSC_parameters["Si"]["sigma"], + tsa=LO_to_SSC_parameters["Si"]["tsa"], + ) # Correct NO3 values - interpl['NO3'] = correct_high_NO3( - interpl['NO3'], - smax=LO_to_SSC_parameters['NO3']['smax'], - nmax=LO_to_SSC_parameters['NO3']['nmax'] - ) + interpl["NO3"] = correct_high_NO3( + interpl["NO3"], + smax=LO_to_SSC_parameters["NO3"]["smax"], + nmax=LO_to_SSC_parameters["NO3"]["nmax"], + ) # Prepare dataset ts = d.ocean_time.data @@ -663,9 +641,9 @@ def _convert_TS_to_TEOS10(var_meta, sal, temp): :returns: updated meta data, salinity and temperature""" # modify metadata new_meta = var_meta.copy() - new_meta['vosaline']['long_name'] = 'Reference Salinity' - new_meta['vosaline']['units'] = 'g/kg' - new_meta['votemper']['long_name'] = 'Conservative Temperature' + new_meta["vosaline"]["long_name"] = "Reference Salinity" + new_meta["vosaline"]["units"] = "g/kg" + new_meta["votemper"]["long_name"] = "Conservative Temperature" # Convert salinity from practical to reference salinity sal_ref = gsw.SR_from_SP(sal[:]) # Convert temperature from potential to conservative diff --git a/SalishSeaTools/salishsea_tools/LiveOcean_SQL.py b/SalishSeaTools/salishsea_tools/LiveOcean_SQL.py index 0d4fe474..348bf60d 100644 --- a/SalishSeaTools/salishsea_tools/LiveOcean_SQL.py +++ b/SalishSeaTools/salishsea_tools/LiveOcean_SQL.py @@ -41,17 +41,24 @@ def create_files_for_nowcast(date, teos_10=True): salinity is Practical Salinity :type teos_10: boolean """ - save_dir = '/results/forcing/LiveOcean/boundary_conditions/' - LO_dir = '/results/forcing/LiveOcean/downloaded/' + save_dir = "/results/forcing/LiveOcean/boundary_conditions/" + LO_dir = "/results/forcing/LiveOcean/downloaded/" create_LiveOcean_TS_BCs( - date, date, '1H', 'daily', nowcast=True, teos_10=teos_10, - bc_dir=save_dir, LO_dir=LO_dir) + date, + date, + "1H", + "daily", + nowcast=True, + teos_10=teos_10, + bc_dir=save_dir, + LO_dir=LO_dir, + ) # ---------------------- Interpolation functions ------------------------ def load_SalishSea_boundary_grid( - fname='/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea_west_TEOS10.nc', + fname="/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea_west_TEOS10.nc", ): """Load the Salish Sea NEMO model boundary depth, latitudes and longitudes. @@ -61,15 +68,15 @@ def load_SalishSea_boundary_grid( """ f = nc.Dataset(fname) - depth = f.variables['deptht'][:] - lon = f.variables['nav_lon'][:] - lat = f.variables['nav_lat'][:] + depth = f.variables["deptht"][:] + lon = f.variables["nav_lon"][:] + lat = f.variables["nav_lat"][:] shape = lon.shape return depth, lon, lat, shape -def load_LiveOcean(files, resample_interval='1H'): +def load_LiveOcean(files, resample_interval="1H"): """Load a time series of Live Ocean results represented by a list of files. Time series is resampled by averaging over resample_interval. Default is 1 hour. @@ -88,33 +95,38 @@ def load_LiveOcean(files, resample_interval='1H'): with xr.open_dataset(f) as d1: # drop uncommon variables - subfunction? d, d1 = _remove_uncommon_variables_or_coords(d, d1) - d = xr.concat([d, d1], dim='ocean_time', data_vars='minimal') + d = xr.concat([d, d1], dim="ocean_time", data_vars="minimal") # Determine z-rho (depth) G, S, T = grid.get_basic_info(files[0]) # note: grid.py is from Parker z_rho = np.zeros(d.salt.shape) for t in range(z_rho.shape[0]): zeta = d.zeta.values[t, :, :] - z_rho[t, :, :, :] = grid.get_z(G['h'], zeta, S) + z_rho[t, :, :, :] = grid.get_z(G["h"], zeta, S) # Add z_rho to dataset zrho_DA = xr.DataArray( z_rho, - dims=['ocean_time', 's_rho', 'eta_rho', 'xi_rho'], - coords={'ocean_time': d.ocean_time.values[:], - 's_rho': d.s_rho.values[:], - 'eta_rho': d.eta_rho.values[:], - 'xi_rho': d.xi_rho.values[:]}, - attrs={'units': 'metres', - 'positive': 'up', - 'long_name': 'Depth at s-levels', - 'field': 'z_rho ,scalar'}) + dims=["ocean_time", "s_rho", "eta_rho", "xi_rho"], + coords={ + "ocean_time": d.ocean_time.values[:], + "s_rho": d.s_rho.values[:], + "eta_rho": d.eta_rho.values[:], + "xi_rho": d.xi_rho.values[:], + }, + attrs={ + "units": "metres", + "positive": "up", + "long_name": "Depth at s-levels", + "field": "z_rho ,scalar", + }, + ) d = d.assign(z_rho=zrho_DA) # Resample - d = d.resample(resample_interval, 'ocean_time') + d = d.resample(resample_interval, "ocean_time") return d -def _remove_uncommon_variables_or_coords(d, d1, remove_type='variables'): +def _remove_uncommon_variables_or_coords(d, d1, remove_type="variables"): """Removes uncommon variables or coordinates between two xarray datasets :arg d: First dataset @@ -128,10 +140,10 @@ def _remove_uncommon_variables_or_coords(d, d1, remove_type='variables'): :returns: two new datasets with uncommon variables/coordinates removed """ - if remove_type == 'variables': + if remove_type == "variables": d1list = d1.data_vars dlist = d.data_vars - elif remove_type == 'coords': + elif remove_type == "coords": d1list = d1.coords dlist = d.coords diff = set(dlist) ^ set(d1list) @@ -141,7 +153,7 @@ def _remove_uncommon_variables_or_coords(d, d1, remove_type='variables'): def interpolate_to_NEMO_depths(dataset, NEMO_depths, var_names): - """ Interpolate variables in var_names from a Live Ocean dataset to NEMO + """Interpolate variables in var_names from a Live Ocean dataset to NEMO depths. LiveOcean land points (including points lower than bathymetry) are set to np.nan and then masked. @@ -166,7 +178,8 @@ def interpolate_to_NEMO_depths(dataset, NEMO_depths, var_names): LO_depths = dataset.z_rho.values[t, :, j, i] var = dataset[var_name].values[t, :, j, i] var_interp[t, :, j, i] = np.interp( - -NEMO_depths, LO_depths, var, left=np.nan) + -NEMO_depths, LO_depths, var, left=np.nan + ) # NEMO depths are positive, LiveOcean are negative interps[var_name] = np.ma.masked_invalid(var_interp) @@ -199,8 +212,7 @@ def fill_NaNs_with_nearest_neighbour(data, lons, lats): valid_data = subdata[~mask] try: filled[t, k, mask] = interpolate.griddata( - points, valid_data, (lons[mask], lats[mask]), - method='nearest' + points, valid_data, (lons[mask], lats[mask]), method="nearest" ) except ValueError: # if the whole depth level is NaN, @@ -248,14 +260,14 @@ def interpolate_to_NEMO_lateral(var_arrays, dataset, NEMOlon, NEMOlat, shape): var_grid = var[t, k, :, :] # First, interpolate with bilinear. The result is masked near # and at grid points where var_grid is masked. - var_interp = Basemap.interp( - var_grid, lonsLO, latsLO, NEMOlon, NEMOlat) + var_interp = Basemap.interp(var_grid, lonsLO, latsLO, NEMOlon, NEMOlat) # Keep track of mask mask[t, k, ...] = var_interp.mask # Next, interpolate using nearest neighbour so that masked # areas can be filled later. interp_nearest[t, k, ...] = Basemap.interp( - var_grid, lonsLO, latsLO, NEMOlon, NEMOlat, order=0) + var_grid, lonsLO, latsLO, NEMOlon, NEMOlat, order=0 + ) # ave bilinear intepr in var_new var_new[t, k, ...] = var_interp # Fill in masked values with nearest neighbour interpolant @@ -263,11 +275,12 @@ def interpolate_to_NEMO_lateral(var_arrays, dataset, NEMOlon, NEMOlat, shape): var_new[inds_of_mask] = interp_nearest[inds_of_mask] # There are still some nans over pure land areas. # Fill those with nearest lateral neighbour or level above - interps[var_name] = fill_NaNs_with_nearest_neighbour( - var_new, NEMOlon, NEMOlat) + interps[var_name] = fill_NaNs_with_nearest_neighbour(var_new, NEMOlon, NEMOlat) # Make sure salinity is strictly increasing with depth for k in range(1, var_new.shape[1]): - interps['salt'][:, k] = np.fmax(interps['salt'][:, k], interps['salt'][:, k-1]) + interps["salt"][:, k] = np.fmax( + interps["salt"][:, k], interps["salt"][:, k - 1] + ) # Make sure density is strictly increasing with depth interps = _increasing_density(interps) return interps @@ -280,11 +293,11 @@ def _increasing_density(filled): stable = False while not stable: - for t in np.arange(filled['salt'].shape[0]): - approx_rho_stable = ( - beta * (filled['salt'][t, 1:] - filled['salt'][t, :-1]) - alpha * - (filled['temp'][t, 1:] - filled['temp'][t, :-1])) - if (np.min(approx_rho_stable) >= 0): + for t in np.arange(filled["salt"].shape[0]): + approx_rho_stable = beta * ( + filled["salt"][t, 1:] - filled["salt"][t, :-1] + ) - alpha * (filled["temp"][t, 1:] - filled["temp"][t, :-1]) + if np.min(approx_rho_stable) >= 0: stable = True else: inds_of_mask = np.where(approx_rho_stable < 0) @@ -292,7 +305,7 @@ def _increasing_density(filled): ks = np.where(approx_rho_stable[:, i, j] < 0) kmax = max(ks[0]) + 2 kmin = min(ks[0]) - for var_name in ['salt', 'temp']: + for var_name in ["salt", "temp"]: average = np.mean(filled[var_name][t, kmin:kmax, i, j]) filled[var_name][t, kmin:kmax, i, j] = average return filled @@ -301,110 +314,131 @@ def _increasing_density(filled): def _bioFileSetup(TS, new): for dname, the_dim in TS.dimensions.items(): new.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) - deptht=new.createVariable('deptht','float32',('deptht',)) - deptht.long_name = 'Vertical T Levels' - deptht.units = 'm' - deptht.positive = 'down' - deptht.valid_range = np.array((4., 428.)) - deptht[:] = TS.variables['deptht'][:] - - #nav_lat - nav_lat = new.createVariable('nav_lat','float32',('yb','xbT')) - nav_lat.long_name = TS.variables['nav_lat'].long_name - nav_lat.units = TS.variables['nav_lat'].units - nav_lat[:] = TS.variables['nav_lat'][:] - - #nav_lon - nav_lon = new.createVariable('nav_lon','float32',('yb','xbT')) - nav_lon.long_name = TS.variables['nav_lon'].long_name - nav_lon.units = TS.variables['nav_lon'].units - nav_lon[:]=TS.variables['nav_lon'][:] + deptht = new.createVariable("deptht", "float32", ("deptht",)) + deptht.long_name = "Vertical T Levels" + deptht.units = "m" + deptht.positive = "down" + deptht.valid_range = np.array((4.0, 428.0)) + deptht[:] = TS.variables["deptht"][:] + + # nav_lat + nav_lat = new.createVariable("nav_lat", "float32", ("yb", "xbT")) + nav_lat.long_name = TS.variables["nav_lat"].long_name + nav_lat.units = TS.variables["nav_lat"].units + nav_lat[:] = TS.variables["nav_lat"][:] + + # nav_lon + nav_lon = new.createVariable("nav_lon", "float32", ("yb", "xbT")) + nav_lon.long_name = TS.variables["nav_lon"].long_name + nav_lon.units = TS.variables["nav_lon"].units + nav_lon[:] = TS.variables["nav_lon"][:] # time_counter - time_counter = new.createVariable('time_counter', 'float32', ('time_counter')) - time_counter.long_name = 'Time axis' - time_counter.axis = 'T' - time_counter.units = 'weeks since beginning of year' - time_counter[:] = TS.variables['time_counter'][:] + time_counter = new.createVariable("time_counter", "float32", ("time_counter")) + time_counter.long_name = "Time axis" + time_counter.axis = "T" + time_counter.units = "weeks since beginning of year" + time_counter[:] = TS.variables["time_counter"][:] # NO3 - voNO3 = new.createVariable('NO3', 'float32', - ('time_counter','deptht','yb','xbT')) - voNO3.grid = TS.variables['votemper'].grid - voNO3.units = 'muM' - voNO3.long_name = 'Nitrate' + voNO3 = new.createVariable( + "NO3", "float32", ("time_counter", "deptht", "yb", "xbT") + ) + voNO3.grid = TS.variables["votemper"].grid + voNO3.units = "muM" + voNO3.long_name = "Nitrate" # don't yet set values - #Si - voSi = new.createVariable('Si', 'float32', - ('time_counter','deptht','yb','xbT')) - voSi.grid = TS.variables['votemper'].grid - voSi.units = 'muM' - voSi.long_name = 'Silica' + # Si + voSi = new.createVariable("Si", "float32", ("time_counter", "deptht", "yb", "xbT")) + voSi.grid = TS.variables["votemper"].grid + voSi.units = "muM" + voSi.long_name = "Silica" # don't yet set values - return(new) + return new -def _ginterp(xval,xPeriod,yval,L,xlocs): + +def _ginterp(xval, xPeriod, yval, L, xlocs): # if not periodic, xPeriod=0 - fil=np.empty(np.size(xlocs)) - s=L/2.355 - for ii in range(0,xlocs.size): - t=xlocs[ii] - diff=[min(abs(x-t),abs(x-t+xPeriod), abs(x-t-xPeriod)) for x in xval] - weight=[np.exp(-.5*x**2/s**2) if sum(diff75 and NH >.2): + if not (P > 75 and NH > 0.2): qP.append(P) qNH.append(NH) else: remP.append(P) remNH.append(NH) - qP=np.array(qP) - qNH=np.array(qNH) - remP=np.array(remP) - remNH=np.array(remNH) + qP = np.array(qP) + qNH = np.array(qNH) + remP = np.array(remP) + remNH = np.array(remNH) # create depth-weighted mean profile using gaussian filter - zs=np.array(TS.variables['deptht']) - AmmProf=_ginterp(qP,0.0,qNH,10,zs) - AmmProf[AmmProf!=AmmProf]=0.0 - + zs = np.array(TS.variables["deptht"]) + AmmProf = _ginterp(qP, 0.0, qNH, 10, zs) + AmmProf[AmmProf != AmmProf] = 0.0 - for ii in range(0,zs.size): - voNH4[:,ii,0,:]=AmmProf[ii] + for ii in range(0, zs.size): + voNH4[:, ii, 0, :] = AmmProf[ii] # DON # take nearest available data to SJDF - q=session.query(Station.StartYear,Station.StartMonth,Press, Station.Lat, Station.Lon,Obs.Depth, - Obs.Nitrogen_Dissolved_Organic,Obs.Nitrogen_Dissolved_Organic_units,Tem).\ - select_from(Obs).join(Station,Station.ID==Obs.StationTBLID).\ - filter(Obs.Nitrogen_Dissolved_Organic!=None).filter(Obs.Nitrogen_Dissolved_Organic>=0).\ - filter(Station.Lat!=None).filter(Station.Lon!=None).\ - filter(Station.Lat<48.8).filter(Station.Lon<-125).all() - - qDON=[] + q = ( + session.query( + Station.StartYear, + Station.StartMonth, + Press, + Station.Lat, + Station.Lon, + Obs.Depth, + Obs.Nitrogen_Dissolved_Organic, + Obs.Nitrogen_Dissolved_Organic_units, + Tem, + ) + .select_from(Obs) + .join(Station, Station.ID == Obs.StationTBLID) + .filter(Obs.Nitrogen_Dissolved_Organic != None) + .filter(Obs.Nitrogen_Dissolved_Organic >= 0) + .filter(Station.Lat != None) + .filter(Station.Lon != None) + .filter(Station.Lat < 48.8) + .filter(Station.Lon < -125) + .all() + ) + + qDON = [] for row in q: qDON.append(row.Nitrogen_Dissolved_Organic) - val_DON=np.mean(qDON) + val_DON = np.mean(qDON) - voDON[:,:,:,:]=val_DON + voDON[:, :, :, :] = val_DON # PON # take nearest available data to SJDF - q=session.query(Station.StartYear,Station.StartMonth,Press, Station.Lat, Station.Lon,Obs.Depth, - Obs.Nitrogen_Particulate_Organic,Obs.Nitrogen_Particulate_Organic_units,Tem).\ - select_from(Obs).join(Station,Station.ID==Obs.StationTBLID).\ - filter(Obs.Nitrogen_Particulate_Organic!=None).filter(Obs.Nitrogen_Particulate_Organic>=0).\ - filter(Station.Lat!=None).filter(Station.Lon!=None).\ - filter(Station.Lat<48.8).filter(Station.Lon<-125).all() - - qPON=[] + q = ( + session.query( + Station.StartYear, + Station.StartMonth, + Press, + Station.Lat, + Station.Lon, + Obs.Depth, + Obs.Nitrogen_Particulate_Organic, + Obs.Nitrogen_Particulate_Organic_units, + Tem, + ) + .select_from(Obs) + .join(Station, Station.ID == Obs.StationTBLID) + .filter(Obs.Nitrogen_Particulate_Organic != None) + .filter(Obs.Nitrogen_Particulate_Organic >= 0) + .filter(Station.Lat != None) + .filter(Station.Lon != None) + .filter(Station.Lat < 48.8) + .filter(Station.Lon < -125) + .all() + ) + + qPON = [] for row in q: qPON.append(row.Nitrogen_Particulate_Organic) - val_PON=np.mean(qPON) + val_PON = np.mean(qPON) - voPON[:,:,:,:]=val_PON + voPON[:, :, :, :] = val_PON newConst.close() TS.close() # set up NO3 and save climatology: # umol/L=mmol/m**3, so all NO units the same - q=session.query(JDFLocs.ObsID, Station.StartYear,Station.StartMonth,Press,NO, - Tem,SA,Station.StartDay).select_from(Obs).\ - join(JDFLocs,JDFLocs.ObsID==Obs.ID).join(Station,Station.ID==Obs.StationTBLID).\ - join(Calcs,Calcs.ObsID==Obs.ID).filter(SA<38).filter(SA>0).filter(NO!=None).\ - filter(Tem!=None).filter(SA!=None).filter(Press!=None).\ - all() - qNO50=[] - qSA50=[] - qP50=[] - qT50=[] + q = ( + session.query( + JDFLocs.ObsID, + Station.StartYear, + Station.StartMonth, + Press, + NO, + Tem, + SA, + Station.StartDay, + ) + .select_from(Obs) + .join(JDFLocs, JDFLocs.ObsID == Obs.ID) + .join(Station, Station.ID == Obs.StationTBLID) + .join(Calcs, Calcs.ObsID == Obs.ID) + .filter(SA < 38) + .filter(SA > 0) + .filter(NO != None) + .filter(Tem != None) + .filter(SA != None) + .filter(Press != None) + .all() + ) + qNO50 = [] + qSA50 = [] + qP50 = [] + qT50 = [] for OID, Yr, Mn, P, NO3, T, S_A, dy in q: - if P>80: + if P > 80: qNO50.append(NO3) qT50.append(T) qSA50.append(S_A) qP50.append(P) - qNO50=np.array(qNO50) - qSA50=np.array(qSA50) - qT50=np.array(qT50) - qP50=np.array(qP50) - qTC50=gsw_calls.generic_gsw_caller('gsw_CT_from_t.m', - [qSA50, qT50, qP50, ]) - qNO50=np.array(qNO50) - - a=np.vstack([qTC50,qSA50,np.ones(len(qTC50))]).T - #a2=np.vstack([qTC,qSA,np.ones(len(qTC))]).T - m = np.linalg.lstsq(a,qNO50)[0] + qNO50 = np.array(qNO50) + qSA50 = np.array(qSA50) + qT50 = np.array(qT50) + qP50 = np.array(qP50) + qTC50 = gsw_calls.generic_gsw_caller( + "gsw_CT_from_t.m", + [ + qSA50, + qT50, + qP50, + ], + ) + qNO50 = np.array(qNO50) + + a = np.vstack([qTC50, qSA50, np.ones(len(qTC50))]).T + # a2=np.vstack([qTC,qSA,np.ones(len(qTC))]).T + m = np.linalg.lstsq(a, qNO50)[0] mT, mS, mC = m - df=pd.DataFrame({'mC':[mC],'mT':[mT],'mS':[mS]}) + df = pd.DataFrame({"mC": [mC], "mT": [mT], "mS": [mS]}) df.to_csv(nFitFilePath) - zupper=np.extract(zs<100, zs) - ydays=np.arange(0,365,365/52) + zupper = np.extract(zs < 100, zs) + ydays = np.arange(0, 365, 365 / 52) # umol/L=mmol/m**3, so all NO units the same - q=session.query(JDFLocs.ObsID, Station.StartYear,Station.StartMonth,Press,NO, - Tem,SA,Station.StartDay).select_from(Obs).\ - join(JDFLocs,JDFLocs.ObsID==Obs.ID).join(Station,Station.ID==Obs.StationTBLID).\ - join(Calcs,Calcs.ObsID==Obs.ID).filter(SA<38).filter(SA>0).filter(NO!=None).\ - filter(Tem!=None).filter(SA!=None).filter(Press<120).filter(Press!=None).\ - all() - #for row in q: + q = ( + session.query( + JDFLocs.ObsID, + Station.StartYear, + Station.StartMonth, + Press, + NO, + Tem, + SA, + Station.StartDay, + ) + .select_from(Obs) + .join(JDFLocs, JDFLocs.ObsID == Obs.ID) + .join(Station, Station.ID == Obs.StationTBLID) + .join(Calcs, Calcs.ObsID == Obs.ID) + .filter(SA < 38) + .filter(SA > 0) + .filter(NO != None) + .filter(Tem != None) + .filter(SA != None) + .filter(Press < 120) + .filter(Press != None) + .all() + ) + # for row in q: # print(row) - qYr=[] - qMn=[] - qDy=[] - qP=[] - qNO=[] - date=[] + qYr = [] + qMn = [] + qDy = [] + qP = [] + qNO = [] + date = [] for OID, Yr, Mn, P, NO3, T, S_A, dy in q: qYr.append(Yr) qMn.append(Mn) qDy.append(dy) qP.append(P) qNO.append(NO3) - date.append(datetime.date(int(Yr),int(Mn),int(dy))) + date.append(datetime.date(int(Yr), int(Mn), int(dy))) - qP=np.array(qP) - qNO=np.array(qNO) - date=np.array(date) - YD=0.0*qNO - for i in range(0,len(YD)): - YD[i]=date[i].timetuple().tm_yday + qP = np.array(qP) + qNO = np.array(qNO) + date = np.array(date) + YD = 0.0 * qNO + for i in range(0, len(YD)): + YD[i] = date[i].timetuple().tm_yday - ndict,nmat=_ginterp2d(YD,365,qP,0,qNO,30,10,ydays,zupper) - np.savetxt(nClimFilePath,nmat,delimiter=',') + ndict, nmat = _ginterp2d(YD, 365, qP, 0, qNO, 30, 10, ydays, zupper) + np.savetxt(nClimFilePath, nmat, delimiter=",") # set up Si and save climatology: # umol/L=mmol/m**3, so all NO units the same - q=session.query(JDFLocs.ObsID, Station.StartYear,Station.StartMonth,Press, - Obs.Silicate,Tem,SA,Station.StartDay).select_from(Obs).\ - join(JDFLocs,JDFLocs.ObsID==Obs.ID).join(Station,Station.ID==Obs.StationTBLID).\ - join(Calcs,Calcs.ObsID==Obs.ID).filter(SA<38).filter(SA>0).filter(Obs.Silicate!=None).\ - filter(Tem!=None).filter(SA!=None).filter(Press!=None).\ - all() - qP50=[] - qNO50=[] - qSA50=[] - qT50=[] + q = ( + session.query( + JDFLocs.ObsID, + Station.StartYear, + Station.StartMonth, + Press, + Obs.Silicate, + Tem, + SA, + Station.StartDay, + ) + .select_from(Obs) + .join(JDFLocs, JDFLocs.ObsID == Obs.ID) + .join(Station, Station.ID == Obs.StationTBLID) + .join(Calcs, Calcs.ObsID == Obs.ID) + .filter(SA < 38) + .filter(SA > 0) + .filter(Obs.Silicate != None) + .filter(Tem != None) + .filter(SA != None) + .filter(Press != None) + .all() + ) + qP50 = [] + qNO50 = [] + qSA50 = [] + qT50 = [] for OID, Yr, Mn, P, NO3, T, S_A, dy in q: - if P>80: + if P > 80: qP50.append(P) qNO50.append(NO3) qT50.append(T) qSA50.append(S_A) - qP50 =np.array(qP50) - qSA50=np.array(qSA50) - qT50 =np.array(qT50) - qTC50=gsw_calls.generic_gsw_caller('gsw_CT_from_t.m',[qSA50, qT50, qP50, ]) - qNO50=np.array(qNO50) + qP50 = np.array(qP50) + qSA50 = np.array(qSA50) + qT50 = np.array(qT50) + qTC50 = gsw_calls.generic_gsw_caller( + "gsw_CT_from_t.m", + [ + qSA50, + qT50, + qP50, + ], + ) + qNO50 = np.array(qNO50) - a=np.vstack([qTC50,qSA50,np.ones(len(qTC50))]).T - m = np.linalg.lstsq(a,qNO50)[0] + a = np.vstack([qTC50, qSA50, np.ones(len(qTC50))]).T + m = np.linalg.lstsq(a, qNO50)[0] mT, mS, mC = m - df=pd.DataFrame({'mC':[mC],'mT':[mT],'mS':[mS]}) + df = pd.DataFrame({"mC": [mC], "mT": [mT], "mS": [mS]}) df.to_csv(siFitFilePath) # umol/L=mmol/m**3, so all NO units the same - q=session.query(JDFLocs.ObsID, Station.StartYear,Station.StartMonth,Press,Obs.Silicate, - Tem,SA,Station.StartDay).select_from(Obs).\ - join(JDFLocs,JDFLocs.ObsID==Obs.ID).join(Station,Station.ID==Obs.StationTBLID).\ - join(Calcs,Calcs.ObsID==Obs.ID).filter(SA<38).filter(SA>0).filter(Obs.Silicate!=None).\ - filter(Tem!=None).filter(SA!=None).filter(Press<120).filter(Press!=None).\ - all() - qYr=[] - qMn=[] - qDy=[] - qP=[] - qNO=[] - date=[] + q = ( + session.query( + JDFLocs.ObsID, + Station.StartYear, + Station.StartMonth, + Press, + Obs.Silicate, + Tem, + SA, + Station.StartDay, + ) + .select_from(Obs) + .join(JDFLocs, JDFLocs.ObsID == Obs.ID) + .join(Station, Station.ID == Obs.StationTBLID) + .join(Calcs, Calcs.ObsID == Obs.ID) + .filter(SA < 38) + .filter(SA > 0) + .filter(Obs.Silicate != None) + .filter(Tem != None) + .filter(SA != None) + .filter(Press < 120) + .filter(Press != None) + .all() + ) + qYr = [] + qMn = [] + qDy = [] + qP = [] + qNO = [] + date = [] for OID, Yr, Mn, P, NO3, T, S_A, dy in q: qYr.append(Yr) qMn.append(Mn) qDy.append(dy) qP.append(P) qNO.append(NO3) - date.append(datetime.date(int(Yr),int(Mn),int(dy))) - qP=np.array(qP) - qNO=np.array(qNO) - date=np.array(date) - YD=0.0*qP - for i in range(0,len(YD)): - YD[i]=date[i].timetuple().tm_yday - sidict,simat=_ginterp2d(YD,365,qP,0,qNO,30,10,ydays,zupper) - np.savetxt(siClimFilePath,simat,delimiter=',') + date.append(datetime.date(int(Yr), int(Mn), int(dy))) + qP = np.array(qP) + qNO = np.array(qNO) + date = np.array(date) + YD = 0.0 * qP + for i in range(0, len(YD)): + YD[i] = date[i].timetuple().tm_yday + sidict, simat = _ginterp2d(YD, 365, qP, 0, qNO, 30, 10, ydays, zupper) + np.savetxt(siClimFilePath, simat, delimiter=",") return + # ------------------ Creation of files ------------------------------ -def create_LiveOcean_bio_BCs_fromTS(TSfile, strdate=None, - TSdir = '/results/forcing/LiveOcean/boundary_conditions', - outFile='bioLOTS_{:y%Ym%md%d}.nc', - outDir = '/results/forcing/LiveOcean/boundary_conditions/bio', - nFitFilePath = '/results/forcing/LiveOcean/boundary_conditions/bio/fits/bioOBCfit_NTS.csv', - siFitFilePath = '/results/forcing/LiveOcean/boundary_conditions/bio/fits/bioOBCfit_SiTS.csv', - nClimFilePath = '/results/forcing/LiveOcean/boundary_conditions/bio/fits/nmat.csv', - siClimFilePath = '/results/forcing/LiveOcean/boundary_conditions/bio/fits/simat.csv', - recalcFits=False): - """ create BC files from LiveOcean-based TS BC files using linear fit of N and Si to T and S +def create_LiveOcean_bio_BCs_fromTS( + TSfile, + strdate=None, + TSdir="/results/forcing/LiveOcean/boundary_conditions", + outFile="bioLOTS_{:y%Ym%md%d}.nc", + outDir="/results/forcing/LiveOcean/boundary_conditions/bio", + nFitFilePath="/results/forcing/LiveOcean/boundary_conditions/bio/fits/bioOBCfit_NTS.csv", + siFitFilePath="/results/forcing/LiveOcean/boundary_conditions/bio/fits/bioOBCfit_SiTS.csv", + nClimFilePath="/results/forcing/LiveOcean/boundary_conditions/bio/fits/nmat.csv", + siClimFilePath="/results/forcing/LiveOcean/boundary_conditions/bio/fits/simat.csv", + recalcFits=False, +): + """create BC files from LiveOcean-based TS BC files using linear fit of N and Si to T and S :arg str TSfile: name of LiveOcean-based TS Bc file @@ -829,26 +1051,30 @@ def create_LiveOcean_bio_BCs_fromTS(TSfile, strdate=None, """ # if requested, recalculate nut-TS fits and nut climatologies from database - if recalcFits==True: - recalcBioTSFits(nFitFilePath = nFitFilePath,siFitFilePath = siFitFilePath, - nClimFilePath = nClimFilePath, siClimFilePath = siClimFilePath) + if recalcFits == True: + recalcBioTSFits( + nFitFilePath=nFitFilePath, + siFitFilePath=siFitFilePath, + nClimFilePath=nClimFilePath, + siClimFilePath=siClimFilePath, + ) # if no date is supplied, try to get it from the TS file name. otherwise, process it # note: None case is fragile - if strdate==None: - TSyear=int(TSfile[-13:-9]) - TSmon=int(TSfile[-8:-6]) - TSday=int(TSfile[-5:-3]) - dtdate=datetime.datetime(TSyear,TSmon,TSday) + if strdate == None: + TSyear = int(TSfile[-13:-9]) + TSmon = int(TSfile[-8:-6]) + TSday = int(TSfile[-5:-3]) + dtdate = datetime.datetime(TSyear, TSmon, TSday) else: - dtdate = datetime.datetime.strptime(strdate, '%Y-%m-%d') + dtdate = datetime.datetime.strptime(strdate, "%Y-%m-%d") TSyear = dtdate.year - YD=(dtdate-datetime.datetime(TSyear-1,12,31)).days + YD = (dtdate - datetime.datetime(TSyear - 1, 12, 31)).days # if necessary, substitue date into file name - if ('{' in outFile): - outFile=outFile.format(dtdate) + if "{" in outFile: + outFile = outFile.format(dtdate) # TS file is name of LO TS OBC file for the date you want bio OBCs for TS = nc.Dataset(os.path.join(TSdir, TSfile)) @@ -857,61 +1083,71 @@ def create_LiveOcean_bio_BCs_fromTS(TSfile, strdate=None, tofile = os.path.join(outDir, outFile) if os.path.exists(tofile): os.remove(tofile) - new = nc.Dataset(tofile, 'w', zlib=True) + new = nc.Dataset(tofile, "w", zlib=True) new = _bioFileSetup(TS, new) # other definitions - zs=np.array(new.variables['deptht']) - zupper=np.extract(zs<100, zs) - ydays=np.arange(0,365,365/52) + zs = np.array(new.variables["deptht"]) + zupper = np.extract(zs < 100, zs) + ydays = np.arange(0, 365, 365 / 52) # load N data - nmat=np.loadtxt(nClimFilePath,delimiter=',') - df = pd.read_csv(nFitFilePath,index_col=0) - mC=df.loc[0,'mC'] - mT=df.loc[0,'mT'] - mS=df.loc[0,'mS'] + nmat = np.loadtxt(nClimFilePath, delimiter=",") + df = pd.read_csv(nFitFilePath, index_col=0) + mC = df.loc[0, "mC"] + mT = df.loc[0, "mT"] + mS = df.loc[0, "mS"] # process N - ztan=[.5*math.tanh((a-70)/20)+1/2 for a in zupper] - zcoeff=np.ones(np.shape(TS.variables['votemper'])) # zcoeff is multiplier of fit function; 1-zcoeff is multiplier of climatology - for i in range(0,zupper.size): - zcoeff[:,i,:,:]=ztan[i] - funfit=mC +mT*TS.variables['votemper'][:,:,:,:]+mS*TS.variables['vosaline'][:,:,:,:] - - nmat0=np.zeros((np.shape(TS.variables['votemper'])[0],np.shape(nmat)[1])) - for ii in range(0,np.shape(nmat0)[1]): - nmat0[:,ii]=np.interp(YD,ydays,nmat[:,ii],period=365) - nmat_2=np.expand_dims(nmat0,axis=2) - nmat_2=np.expand_dims(nmat_2,axis=3) - nmat_3=np.tile(nmat_2,(1,1,1,TS.variables['votemper'].shape[3])) - clim=np.zeros(TS.variables['votemper'].shape) - clim[:,0:27,:,:]=nmat_3 + ztan = [0.5 * math.tanh((a - 70) / 20) + 1 / 2 for a in zupper] + zcoeff = np.ones( + np.shape(TS.variables["votemper"]) + ) # zcoeff is multiplier of fit function; 1-zcoeff is multiplier of climatology + for i in range(0, zupper.size): + zcoeff[:, i, :, :] = ztan[i] + funfit = ( + mC + + mT * TS.variables["votemper"][:, :, :, :] + + mS * TS.variables["vosaline"][:, :, :, :] + ) + + nmat0 = np.zeros((np.shape(TS.variables["votemper"])[0], np.shape(nmat)[1])) + for ii in range(0, np.shape(nmat0)[1]): + nmat0[:, ii] = np.interp(YD, ydays, nmat[:, ii], period=365) + nmat_2 = np.expand_dims(nmat0, axis=2) + nmat_2 = np.expand_dims(nmat_2, axis=3) + nmat_3 = np.tile(nmat_2, (1, 1, 1, TS.variables["votemper"].shape[3])) + clim = np.zeros(TS.variables["votemper"].shape) + clim[:, 0:27, :, :] = nmat_3 # set N variable - new.variables['NO3'][:,:,:,:]=zcoeff*funfit+(1-zcoeff)*clim + new.variables["NO3"][:, :, :, :] = zcoeff * funfit + (1 - zcoeff) * clim # load Si data - simat=np.loadtxt(siClimFilePath,delimiter=',') - dfS = pd.read_csv(siFitFilePath,index_col=0) - mC=dfS.loc[0,'mC'] - mT=dfS.loc[0,'mT'] - mS=dfS.loc[0,'mS'] + simat = np.loadtxt(siClimFilePath, delimiter=",") + dfS = pd.read_csv(siFitFilePath, index_col=0) + mC = dfS.loc[0, "mC"] + mT = dfS.loc[0, "mT"] + mS = dfS.loc[0, "mS"] # process Si - funfit=mC +mT*TS.variables['votemper'][:,:,:,:]+mS*TS.variables['vosaline'][:,:,:,:] + funfit = ( + mC + + mT * TS.variables["votemper"][:, :, :, :] + + mS * TS.variables["vosaline"][:, :, :, :] + ) - simat0=np.zeros((np.shape(TS.variables['votemper'])[0],np.shape(simat)[1])) - for ii in range(0,np.shape(simat0)[1]): - simat0[:,ii]=np.interp(YD,ydays,simat[:,ii],period=365) - simat_2=np.expand_dims(simat0,axis=2) - simat_2=np.expand_dims(simat_2,axis=3) - simat_3=np.tile(simat_2,(1,1,1,TS.variables['votemper'].shape[3])) - clim=np.zeros(TS.variables['votemper'].shape) - clim[:,0:27,:,:]=simat_3 + simat0 = np.zeros((np.shape(TS.variables["votemper"])[0], np.shape(simat)[1])) + for ii in range(0, np.shape(simat0)[1]): + simat0[:, ii] = np.interp(YD, ydays, simat[:, ii], period=365) + simat_2 = np.expand_dims(simat0, axis=2) + simat_2 = np.expand_dims(simat_2, axis=3) + simat_3 = np.tile(simat_2, (1, 1, 1, TS.variables["votemper"].shape[3])) + clim = np.zeros(TS.variables["votemper"].shape) + clim[:, 0:27, :, :] = simat_3 # set Si variable - new.variables['Si'][:,:,:,:]=zcoeff*funfit+(1-zcoeff)*clim + new.variables["Si"][:, :, :, :] = zcoeff * funfit + (1 - zcoeff) * clim new.close() TS.close() @@ -920,12 +1156,17 @@ def create_LiveOcean_bio_BCs_fromTS(TSfile, strdate=None, def create_LiveOcean_TS_BCs( - start, end, avg_period, file_frequency, - nowcast=False, teos_10=True, basename='LO', + start, + end, + avg_period, + file_frequency, + nowcast=False, + teos_10=True, + basename="LO", single_nowcast=False, - bc_dir='/results/forcing/LiveOcean/boundary_condtions/', - LO_dir='/results/forcing/LiveOcean/downloaded/', - NEMO_BC='/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea_west_TEOS10.nc' + bc_dir="/results/forcing/LiveOcean/boundary_condtions/", + LO_dir="/results/forcing/LiveOcean/downloaded/", + NEMO_BC="/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea_west_TEOS10.nc", ): """Create a series of Live Ocean boundary condition files in date range [start, end] for use in the NEMO model. @@ -984,20 +1225,24 @@ def create_LiveOcean_TS_BCs( :rtype: list """ # Check for incoming consistency - if (nowcast and single_nowcast): - raise ValueError ('Choose either nowcast or single_nowcast, not both') + if nowcast and single_nowcast: + raise ValueError("Choose either nowcast or single_nowcast, not both") # Create metadeta for temperature and salinity - var_meta = {'vosaline': {'grid': 'SalishSea2', - 'long_name': 'Practical Salinity', - 'units': 'psu'}, - 'votemper': {'grid': 'SalishSea2', - 'long_name': 'Potential Temperature', - 'units': 'deg C'} - } + var_meta = { + "vosaline": { + "grid": "SalishSea2", + "long_name": "Practical Salinity", + "units": "psu", + }, + "votemper": { + "grid": "SalishSea2", + "long_name": "Potential Temperature", + "units": "deg C", + }, + } # Mapping from LiveOcean TS names to NEMO TS names - LO_to_NEMO_var_map = {'salt': 'vosaline', - 'temp': 'votemper'} + LO_to_NEMO_var_map = {"salt": "vosaline", "temp": "votemper"} # Initialize var_arrays dict NEMO_var_arrays = {key: [] for key in LO_to_NEMO_var_map.values()} @@ -1008,18 +1253,20 @@ def create_LiveOcean_TS_BCs( # Load and interpolate Live Ocean if nowcast: logger.info( - 'Preparing 48 hours of Live Ocean results. ' - 'Argument end={} is ignored'.format(end)) + "Preparing 48 hours of Live Ocean results. " + "Argument end={} is ignored".format(end) + ) files = _list_LO_files_for_nowcast(start, LO_dir) save_dir = os.path.join(bc_dir, start) if not os.path.isdir(save_dir): os.mkdir(save_dir) elif single_nowcast: logger.info( - 'Preparing one daily average Live Ocean result. ' - 'Argument end={} is ignored'.format(end)) - sdt = datetime.datetime.strptime(start, '%Y-%m-%d') - files = [os.path.join(LO_dir, sdt.strftime('%Y%m%d'), 'low_passed_UBC.nc')] + "Preparing one daily average Live Ocean result. " + "Argument end={} is ignored".format(end) + ) + sdt = datetime.datetime.strptime(start, "%Y-%m-%d") + files = [os.path.join(LO_dir, sdt.strftime("%Y%m%d"), "low_passed_UBC.nc")] save_dir = os.path.join(bc_dir, start) if not os.path.isdir(save_dir): os.mkdir(save_dir) @@ -1028,41 +1275,50 @@ def create_LiveOcean_TS_BCs( save_dir = bc_dir LO_dataset = load_LiveOcean(files, resample_interval=avg_period) - depth_interps = interpolate_to_NEMO_depths(LO_dataset, depBC, - ['salt', 'temp']) - lateral_interps = interpolate_to_NEMO_lateral(depth_interps, LO_dataset, - lonBC, latBC, shape) - lateral_interps['ocean_time'] = LO_dataset.ocean_time + depth_interps = interpolate_to_NEMO_depths(LO_dataset, depBC, ["salt", "temp"]) + lateral_interps = interpolate_to_NEMO_lateral( + depth_interps, LO_dataset, lonBC, latBC, shape + ) + lateral_interps["ocean_time"] = LO_dataset.ocean_time # convert to TEOS-10 if necessary if teos_10: - var_meta, lateral_interps['salt'], lateral_interps['temp'] = \ + var_meta, lateral_interps["salt"], lateral_interps["temp"] = ( _convert_TS_to_TEOS10( - var_meta, lateral_interps['salt'], lateral_interps['temp']) + var_meta, lateral_interps["salt"], lateral_interps["temp"] + ) + ) # divide up data and save into separate files _separate_and_save_files( - lateral_interps, avg_period, file_frequency, basename, save_dir, - LO_to_NEMO_var_map, var_meta, NEMO_var_arrays, NEMO_BC) + lateral_interps, + avg_period, + file_frequency, + basename, + save_dir, + LO_to_NEMO_var_map, + var_meta, + NEMO_var_arrays, + NEMO_BC, + ) # make time_counter the record dimension using ncks and compress - files = glob.glob(os.path.join(save_dir, '*.nc')) + files = glob.glob(os.path.join(save_dir, "*.nc")) for f in files: - cmd = ['ncks', '--mk_rec_dmn=time_counter', '-O', f, f] + cmd = ["ncks", "--mk_rec_dmn=time_counter", "-O", f, f] sp.call(cmd) - cmd = ['ncks', '-4', '-L4', '-O', f, f] + cmd = ["ncks", "-4", "-L4", "-O", f, f] sp.call(cmd) # move files around if nowcast: - filepaths = _relocate_files_for_nowcast( - start, save_dir, basename, bc_dir) + filepaths = _relocate_files_for_nowcast(start, save_dir, basename, bc_dir) elif single_nowcast: filepaths = [] d_file = os.path.join( - save_dir, '{}_{}.nc'.format( - basename, sdt.strftime('y%Ym%md%d'))) + save_dir, "{}_{}.nc".format(basename, sdt.strftime("y%Ym%md%d")) + ) filepath = os.path.join( - bc_dir, '{}_{}.nc'.format( - basename, sdt.strftime('y%Ym%md%d'))) + bc_dir, "{}_{}.nc".format(basename, sdt.strftime("y%Ym%md%d")) + ) os.rename(d_file, filepath) filepaths.append(filepath) if not os.listdir(save_dir): @@ -1091,12 +1347,12 @@ def _relocate_files_for_nowcast(start_date, save_dir, basename, bc_dir): :rtype: list """ filepaths = [] - rundate = datetime.datetime.strptime(start_date, '%Y-%m-%d') - for d, subdir in zip([1, 2], ['', 'fcst']): + rundate = datetime.datetime.strptime(start_date, "%Y-%m-%d") + for d, subdir in zip([1, 2], ["", "fcst"]): next_date = rundate + datetime.timedelta(days=d) d_file = os.path.join( - save_dir, '{}_{}.nc'.format( - basename, next_date.strftime('y%Ym%md%d'))) + save_dir, "{}_{}.nc".format(basename, next_date.strftime("y%Ym%md%d")) + ) if os.path.isfile(d_file): filepath = os.path.join(bc_dir, subdir, os.path.basename(d_file)) os.rename(d_file, filepath) @@ -1107,7 +1363,7 @@ def _relocate_files_for_nowcast(start_date, save_dir, basename, bc_dir): def _list_LO_time_series_files(start, end, LO_dir): - """ List the Live Ocean files in a given date range [start, end]. + """List the Live Ocean files in a given date range [start, end]. LO nowcast files that form a time series are used. Note: If start='2016-06-01' and end= '2016-06-02' results will be a list starting with LO_dir/2016-05-31/ocean_his_0025_UBC.nc and ending with @@ -1124,15 +1380,16 @@ def _list_LO_time_series_files(start, end, LO_dir): :returns: list of Live Ocean file names """ - sdt = (datetime.datetime.strptime(start, '%Y-%m-%d') - - datetime.timedelta(days=1)) - edt = datetime.datetime.strptime(end, '%Y-%m-%d') + sdt = datetime.datetime.strptime(start, "%Y-%m-%d") - datetime.timedelta(days=1) + edt = datetime.datetime.strptime(end, "%Y-%m-%d") sstr = os.path.join( - LO_dir, '{}/ocean_his_0025_UBC.nc'.format(sdt.strftime('%Y%m%d'))) + LO_dir, "{}/ocean_his_0025_UBC.nc".format(sdt.strftime("%Y%m%d")) + ) estr = os.path.join( - LO_dir, '{}/ocean_his_0024_UBC.nc'.format(edt.strftime('%Y%m%d'))) + LO_dir, "{}/ocean_his_0024_UBC.nc".format(edt.strftime("%Y%m%d")) + ) - allfiles = glob.glob(os.path.join(LO_dir, '*/*UBC.nc')) + allfiles = glob.glob(os.path.join(LO_dir, "*/*UBC.nc")) files = [] for filename in allfiles: @@ -1140,7 +1397,7 @@ def _list_LO_time_series_files(start, end, LO_dir): files.append(filename) # remove files outside of first 24hours for each day - regex = re.compile(r'_00[3-7][0-9]|_002[6-9]') + regex = re.compile(r"_00[3-7][0-9]|_002[6-9]") keeps = [x for x in files if not regex.search(x)] keeps.sort() @@ -1149,7 +1406,7 @@ def _list_LO_time_series_files(start, end, LO_dir): def _list_LO_files_for_nowcast(rundate, LO_dir): - """ List 48 hours of Live Ocean files that began on rundate. + """List 48 hours of Live Ocean files that began on rundate. Used for creation of nowcast system boundary conditions. Each Live Ocean run date contains 72 hours. This funtcion returns the files that represent hours 23 through 71. @@ -1166,10 +1423,10 @@ def _list_LO_files_for_nowcast(rundate, LO_dir): :returns: list of Live Ocean file names """ - sdt = datetime.datetime.strptime(rundate, '%Y-%m-%d') - allfiles = glob.glob(os.path.join(LO_dir, sdt.strftime('%Y%m%d'), '*.nc')) - start_str = 'ocean_his_0025_UBC.nc' - end_str = 'ocean_his_0072_UBC.nc' + sdt = datetime.datetime.strptime(rundate, "%Y-%m-%d") + allfiles = glob.glob(os.path.join(LO_dir, sdt.strftime("%Y%m%d"), "*.nc")) + start_str = "ocean_his_0025_UBC.nc" + end_str = "ocean_his_0072_UBC.nc" files_return = [] for filename in allfiles: if os.path.basename(filename) >= start_str: @@ -1182,8 +1439,15 @@ def _list_LO_files_for_nowcast(rundate, LO_dir): def _separate_and_save_files( - interpolated_data, avg_period, file_frequency, basename, save_dir, - LO_to_NEMO_var_map, var_meta, NEMO_var_arrays, NEMO_BC_file, + interpolated_data, + avg_period, + file_frequency, + basename, + save_dir, + LO_to_NEMO_var_map, + var_meta, + NEMO_var_arrays, + NEMO_BC_file, ): """Separates and saves variables in interpolated_data into netCDF files given a desired file frequency. @@ -1229,55 +1493,60 @@ def _separate_and_save_files( :arg str NEMO_BC_file: path to an example NEMO boundary condition file for loading boundary info. """ - time_units = {'1H': 'hours', '1D': 'days', '7D': 'weeks', '1M': 'months'} + time_units = {"1H": "hours", "1D": "days", "7D": "weeks", "1M": "months"} index = 0 first = datetime.datetime.strptime( - str(interpolated_data['ocean_time'].values[0])[0:-3], - '%Y-%m-%dT%H:%M:%S.%f' + str(interpolated_data["ocean_time"].values[0])[0:-3], "%Y-%m-%dT%H:%M:%S.%f" ) # I don't really like method of retrieving the date from LO results. # Is it necessary? . first = first.replace(second=0, microsecond=0) - for counter, t in enumerate(interpolated_data['ocean_time']): - date = datetime.datetime.strptime(str(t.values)[0:-3], - '%Y-%m-%dT%H:%M:%S.%f') + for counter, t in enumerate(interpolated_data["ocean_time"]): + date = datetime.datetime.strptime(str(t.values)[0:-3], "%Y-%m-%dT%H:%M:%S.%f") conditions = { - 'yearly': date.year != first.year, - 'monthly': date.month != first.month, + "yearly": date.year != first.year, + "monthly": date.month != first.month, # above doesn't work if same months, different year... - 'daily': date.date() != first.date() + "daily": date.date() != first.date(), } filenames = { - 'yearly': os.path.join(save_dir, - '{}_y{}.nc'.format(basename, first.year) - ), - 'monthly': os.path.join(save_dir, - '{}_y{}m{:02d}.nc'.format(basename, - first.year, - first.month) - ), - 'daily': os.path.join(save_dir, - '{}_y{}m{:02d}d{:02d}.nc'.format(basename, - first.year, - first.month, - first.day) - ) + "yearly": os.path.join(save_dir, "{}_y{}.nc".format(basename, first.year)), + "monthly": os.path.join( + save_dir, "{}_y{}m{:02d}.nc".format(basename, first.year, first.month) + ), + "daily": os.path.join( + save_dir, + "{}_y{}m{:02d}d{:02d}.nc".format( + basename, first.year, first.month, first.day + ), + ), } if conditions[file_frequency]: for LO_name, NEMO_name in LO_to_NEMO_var_map.items(): - NEMO_var_arrays[NEMO_name] = \ - interpolated_data[LO_name][index:counter, :, :, :] + NEMO_var_arrays[NEMO_name] = interpolated_data[LO_name][ + index:counter, :, :, : + ] _create_sub_file( - first, time_units[avg_period], NEMO_var_arrays, var_meta, - NEMO_BC_file, filenames[file_frequency]) + first, + time_units[avg_period], + NEMO_var_arrays, + var_meta, + NEMO_BC_file, + filenames[file_frequency], + ) first = date index = counter - elif counter == interpolated_data['ocean_time'].values.shape[0] - 1: + elif counter == interpolated_data["ocean_time"].values.shape[0] - 1: for LO_name, NEMO_name in LO_to_NEMO_var_map.items(): - NEMO_var_arrays[NEMO_name] = \ - interpolated_data[LO_name][index:, :, :, :] - _create_sub_file(first, time_units[avg_period], NEMO_var_arrays, - var_meta, NEMO_BC_file, filenames[file_frequency]) + NEMO_var_arrays[NEMO_name] = interpolated_data[LO_name][index:, :, :, :] + _create_sub_file( + first, + time_units[avg_period], + NEMO_var_arrays, + var_meta, + NEMO_BC_file, + filenames[file_frequency], + ) def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): @@ -1306,7 +1575,7 @@ def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): # Load BC information f = nc.Dataset(NEMO_BC) - depBC = f.variables['deptht'] + depBC = f.variables["deptht"] # Copy variables and attributes of non-time dependent variables # from a previous BC file @@ -1314,8 +1583,8 @@ def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): for var_name in var_arrays: if var_name in keys: # check that var_name can be removed keys.remove(var_name) - keys.remove('time_counter') # Allow xarray to build these arrays - keys.remove('deptht') + keys.remove("time_counter") # Allow xarray to build these arrays + keys.remove("deptht") # Now iterate through remaining variables in old BC file and add to dataset for key in keys: var = f.variables[key] @@ -1323,7 +1592,8 @@ def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): var, name=key, dims=list(var.dimensions), - attrs={att: var.getncattr(att) for att in var.ncattrs()}) + attrs={att: var.getncattr(att) for att in var.ncattrs()}, + ) ds = xr.merge([ds, temp_array]) # Add better units information nbidta etc # for varname in ['nbidta', 'nbjdta', 'nbrdta']: @@ -1333,44 +1603,49 @@ def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): data_array = xr.DataArray( var_array, name=var_name, - dims=['time_counter', 'deptht', 'yb', 'xbT'], + dims=["time_counter", "deptht", "yb", "xbT"], coords={ - 'deptht': (['deptht'], depBC[:]), - 'time_counter': np.arange(var_array.shape[0]) + "deptht": (["deptht"], depBC[:]), + "time_counter": np.arange(var_array.shape[0]), }, - attrs=var_meta[var_name]) + attrs=var_meta[var_name], + ) ds = xr.merge([ds, data_array]) # Fix metadata on time_counter - ds['time_counter'].attrs['units'] = \ - '{} since {}'.format(time_unit, date.strftime('%Y-%m-%d %H:%M:%S')) - ds['time_counter'].attrs['time_origin'] = \ - date.strftime('%Y-%m-%d %H:%M:%S') - ds['time_counter'].attrs['long_name'] = 'Time axis' + ds["time_counter"].attrs["units"] = "{} since {}".format( + time_unit, date.strftime("%Y-%m-%d %H:%M:%S") + ) + ds["time_counter"].attrs["time_origin"] = date.strftime("%Y-%m-%d %H:%M:%S") + ds["time_counter"].attrs["long_name"] = "Time axis" # Add metadata for deptht - ds['deptht'].attrs = {att: depBC.getncattr(att) for att in depBC.ncattrs()} + ds["deptht"].attrs = {att: depBC.getncattr(att) for att in depBC.ncattrs()} # Add some global attributes ds.attrs = { - 'acknowledgements': - 'Live Ocean https://faculty.washington.edu/pmacc/LO/LiveOcean.html', - 'creator_email': 'nsoontie@eos.ubc.ca', - 'creator_name': 'Salish Sea MEOPAR Project Contributors', - 'creator_url': 'https://salishsea-meopar-docs.readthedocs.org/', - 'institution': 'UBC EOAS', - 'institution_fullname': ('Earth, Ocean & Atmospheric Sciences,' - ' University of British Columbia'), - 'summary': ('Temperature and Salinity from the Live Ocean model' - ' interpolated in space onto the Salish Sea NEMO Model' - ' western open boundary.'), - 'source': ('https://nbviewer.org/urls/bitbucket.org/' - 'salishsea/analysis-nancy/blob/master/notebooks/' - 'LiveOcean/Interpolating%20Live%20Ocean%20to%20' - 'our%20boundary.ipynb'), - 'history': - ('[{}] File creation.' - .format(datetime.datetime.today().strftime('%Y-%m-%d'))) + "acknowledgements": "Live Ocean https://faculty.washington.edu/pmacc/LO/LiveOcean.html", + "creator_email": "nsoontie@eos.ubc.ca", + "creator_name": "Salish Sea MEOPAR Project Contributors", + "creator_url": "https://salishsea-meopar-docs.readthedocs.org/", + "institution": "UBC EOAS", + "institution_fullname": ( + "Earth, Ocean & Atmospheric Sciences," " University of British Columbia" + ), + "summary": ( + "Temperature and Salinity from the Live Ocean model" + " interpolated in space onto the Salish Sea NEMO Model" + " western open boundary." + ), + "source": ( + "https://nbviewer.org/urls/bitbucket.org/" + "salishsea/analysis-nancy/blob/master/notebooks/" + "LiveOcean/Interpolating%20Live%20Ocean%20to%20" + "our%20boundary.ipynb" + ), + "history": ( + "[{}] File creation.".format(datetime.datetime.today().strftime("%Y-%m-%d")) + ), } ds.to_netcdf(filename) - logger.debug('Saved {}'.format(filename)) + logger.debug("Saved {}".format(filename)) def _convert_TS_to_TEOS10(var_meta, sal, temp): @@ -1391,15 +1666,24 @@ def _convert_TS_to_TEOS10(var_meta, sal, temp): :returns: updated meta data, salinity and temperature""" # modify metadata new_meta = var_meta.copy() - new_meta['vosaline']['long_name'] = 'Reference Salinity' - new_meta['vosaline']['units'] = 'g/kg' - new_meta['votemper']['long_name'] = 'Conservative Temperature' + new_meta["vosaline"]["long_name"] = "Reference Salinity" + new_meta["vosaline"]["units"] = "g/kg" + new_meta["votemper"]["long_name"] = "Conservative Temperature" # Convert salinity from practical to reference salinity - sal_ref = gsw_calls.generic_gsw_caller('gsw_SR_from_SP.m', - [sal[:], ]) + sal_ref = gsw_calls.generic_gsw_caller( + "gsw_SR_from_SP.m", + [ + sal[:], + ], + ) # Conver temperature from potential to consvervative - temp_cons = gsw_calls.generic_gsw_caller('gsw_CT_from_pt.m', - [sal_ref[:], temp[:], ]) + temp_cons = gsw_calls.generic_gsw_caller( + "gsw_CT_from_pt.m", + [ + sal_ref[:], + temp[:], + ], + ) return new_meta, sal_ref, temp_cons @@ -1408,12 +1692,12 @@ def _convert_TS_to_TEOS10(var_meta, sal, temp): # # See the SalishSeaNowcast.nowcast.workers.make_live_ocean_files worker for # the nowcast automation code that does this job -if __name__ == '__main__': +if __name__ == "__main__": # Configure logging so that information messages appear on stderr logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(name)s %(levelname)s: %(message)s') + formatter = logging.Formatter("%(name)s %(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) diff --git a/SalishSeaTools/salishsea_tools/LiveOcean_grid.py b/SalishSeaTools/salishsea_tools/LiveOcean_grid.py index 48790b20..959553c9 100644 --- a/SalishSeaTools/salishsea_tools/LiveOcean_grid.py +++ b/SalishSeaTools/salishsea_tools/LiveOcean_grid.py @@ -23,41 +23,56 @@ def get_basic_info(fn, only_G=False, only_S=False, only_T=False): """ - ds = nc.Dataset(fn, 'r') + ds = nc.Dataset(fn, "r") def make_G(ds): # get grid and bathymetry info - g_varlist = ['h', 'lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', - 'lat_v', 'mask_rho', 'mask_u', 'mask_v', 'pm', 'pn', ] + g_varlist = [ + "h", + "lon_rho", + "lat_rho", + "lon_u", + "lat_u", + "lon_v", + "lat_v", + "mask_rho", + "mask_u", + "mask_v", + "pm", + "pn", + ] G = dict() for vv in g_varlist: G[vv] = ds.variables[vv][:] - G['DX'] = 1/G['pm'] - G['DY'] = 1/G['pn'] - G['M'], G['L'] = np.shape(G['lon_rho']) # M = rows, L = columns + G["DX"] = 1 / G["pm"] + G["DY"] = 1 / G["pn"] + G["M"], G["L"] = np.shape(G["lon_rho"]) # M = rows, L = columns # make the masks boolean (True = water, False = land, opposite of masked arrays!) - G['mask_rho'] = G['mask_rho'] == 1 - G['mask_u'] = G['mask_u'] == 1 - G['mask_v'] = G['mask_v'] == 1 + G["mask_rho"] = G["mask_rho"] == 1 + G["mask_u"] = G["mask_u"] == 1 + G["mask_v"] = G["mask_v"] == 1 return G + def make_S(ds): # get vertical sigma-coordinate info (vectors are bottom to top) - s_varlist = ['s_rho', 'hc', 'Cs_r', 'Vtransform'] + s_varlist = ["s_rho", "hc", "Cs_r", "Vtransform"] S = dict() for vv in s_varlist: S[vv] = ds.variables[vv][:] - S['N'] = len(S['s_rho']) # number of vertical levels + S["N"] = len(S["s_rho"]) # number of vertical levels return S + def make_T(ds): # get time info - t_varlist = ['ocean_time'] + t_varlist = ["ocean_time"] T = dict() for vv in t_varlist: T[vv] = ds.variables[vv][:] - T_units = ds.variables['ocean_time'].units - tt = nc.num2date(T['ocean_time'][:], T_units) - T['time'] = tt + T_units = ds.variables["ocean_time"].units + tt = nc.num2date(T["ocean_time"][:], T_units) + T["time"] = tt return T + # return results if only_G: return make_G(ds) @@ -87,14 +102,15 @@ def get_z(h, zeta, S): """ # input error checking - if ( (not isinstance(h, np.ndarray)) - or (not isinstance(zeta, (np.ndarray, np.ma.core.MaskedArray))) ): - print('WARNING from get_z(): Inputs must be numpy arrays') + if (not isinstance(h, np.ndarray)) or ( + not isinstance(zeta, (np.ndarray, np.ma.core.MaskedArray)) + ): + print("WARNING from get_z(): Inputs must be numpy arrays") if not isinstance(S, dict): - print('WARNING from get_z(): S must be a dict') + print("WARNING from get_z(): S must be a dict") # number of vertical levels - N = S['N'] + N = S["N"] # remove singleton dimensions h = h.squeeze() @@ -104,29 +120,29 @@ def get_z(h, zeta, S): zeta = np.atleast_2d(zeta) # check that the dimensions are the same if h.shape != zeta.shape: - print('WARNING from get_z(): h and zeta must be the same shape') + print("WARNING from get_z(): h and zeta must be the same shape") M, L = h.shape # rho # create some useful arrays - csr = S['Cs_r'] + csr = S["Cs_r"] csrr = csr.reshape(N, 1, 1).copy() Cs_r = np.tile(csrr, [1, M, L]) H_r = np.tile(h.reshape(1, M, L).copy(), [N, 1, 1]) Zeta_r = np.tile(zeta.reshape(1, M, L).copy(), [N, 1, 1]) - if S['hc'] == 0: # if hc = 0 the transform is simpler (and faster) - z_rho = H_r*Cs_r + Zeta_r + Zeta_r*Cs_r - elif S['hc'] != 0: # need to calculate a few more useful arrays - sr = S['s_rho'] + if S["hc"] == 0: # if hc = 0 the transform is simpler (and faster) + z_rho = H_r * Cs_r + Zeta_r + Zeta_r * Cs_r + elif S["hc"] != 0: # need to calculate a few more useful arrays + sr = S["s_rho"] srr = sr.reshape(N, 1, 1).copy() S_rho = np.tile(srr, [1, M, L]) - Hc_r = np.tile(S['hc'], [N, M, L]) - if S['Vtransform'] == 1: - zr0 = (S_rho - Cs_r) * Hc_r + Cs_r*H_r - z_rho = zr0 + Zeta_r * (1 + zr0/H_r) - elif S['Vtransform'] == 2: - zr0 = (S_rho*Hc_r + Cs_r*H_r) / (Hc_r + H_r) - z_rho = Zeta_r + (Zeta_r + H_r)*zr0 + Hc_r = np.tile(S["hc"], [N, M, L]) + if S["Vtransform"] == 1: + zr0 = (S_rho - Cs_r) * Hc_r + Cs_r * H_r + z_rho = zr0 + Zeta_r * (1 + zr0 / H_r) + elif S["Vtransform"] == 2: + zr0 = (S_rho * Hc_r + Cs_r * H_r) / (Hc_r + H_r) + z_rho = Zeta_r + (Zeta_r + H_r) * zr0 z_rho = z_rho.squeeze() return z_rho diff --git a/SalishSeaTools/salishsea_tools/UBC_subdomain.py b/SalishSeaTools/salishsea_tools/UBC_subdomain.py index 32871975..71c288f0 100644 --- a/SalishSeaTools/salishsea_tools/UBC_subdomain.py +++ b/SalishSeaTools/salishsea_tools/UBC_subdomain.py @@ -27,63 +27,95 @@ XBS = [55, 80] # x-limits YBS = [295, 325] # y-limits # Variables to copy -VAR_LIST = ['salt', 'temp', 'h', 'lon_rho', 'lat_rho', 'mask_rho', 'pn', 'pm', - 's_rho', 'hc', 'Cs_r', 'Vtransform', 'zeta', 'ocean_time', - 'lon_u', 'lat_u', 'mask_u', 'u', - 'lon_v', 'lat_v', 'mask_v', 'v', - 'NO3', 'phytoplankton', 'zooplankton', 'detritus', 'Ldetritus', - 'oxygen', 'TIC', 'alkalinity', 'CaCO3', 'rho'] +VAR_LIST = [ + "salt", + "temp", + "h", + "lon_rho", + "lat_rho", + "mask_rho", + "pn", + "pm", + "s_rho", + "hc", + "Cs_r", + "Vtransform", + "zeta", + "ocean_time", + "lon_u", + "lat_u", + "mask_u", + "u", + "lon_v", + "lat_v", + "mask_v", + "v", + "NO3", + "phytoplankton", + "zooplankton", + "detritus", + "Ldetritus", + "oxygen", + "TIC", + "alkalinity", + "CaCO3", + "rho", +] # Dimensions to copy -DIM_LIST = ['xi_rho', 'eta_rho', 'N', 's_rho', 'ocean_time', - 'xi_u', 'eta_u', 'xi_v', 'eta_v'] +DIM_LIST = [ + "xi_rho", + "eta_rho", + "N", + "s_rho", + "ocean_time", + "xi_u", + "eta_u", + "xi_v", + "eta_v", +] def get_UBC_subdomain(f_list): - """Create subdomain files for all netCDF files in f_list """ + """Create subdomain files for all netCDF files in f_list""" for fname in f_list: - fnew = '{}_UBC.nc'.format(fname.split('.nc', 1)[0]) - with nc.Dataset(fname) as G, nc.Dataset(fnew, 'w') as Gnew: + fnew = "{}_UBC.nc".format(fname.split(".nc", 1)[0]) + with nc.Dataset(fname) as G, nc.Dataset(fnew, "w") as Gnew: _copy_netCDF_subdomain(G, Gnew, XBS, YBS, VAR_LIST, DIM_LIST) -def _copy_netCDF_subdomain(oldfile, newfile, xbounds, ybounds, - var_list, dim_list): +def _copy_netCDF_subdomain(oldfile, newfile, xbounds, ybounds, var_list, dim_list): """Copy variables in var_list in subdomain [xbounds, ybounds] from - oldfile to newfile. Also copies dimensions in dim_list and - all global attributes. + oldfile to newfile. Also copies dimensions in dim_list and + all global attributes. """ _copy_dimensions(oldfile, newfile, dim_list, xbounds, ybounds) _copy_variables(oldfile, newfile, var_list, xbounds, ybounds) # copy global attributes - newfile.setncatts( - {att: oldfile.getncattr(att) for att in oldfile.ncattrs()} - ) + newfile.setncatts({att: oldfile.getncattr(att) for att in oldfile.ncattrs()}) def _copy_dimensions(oldfile, newfile, dim_list, xbounds, ybounds): - """ Copy the dimensions in dims_list from oldfile to newfile. - Dimensions of eta_rho, xi_rho are determined by limits of - ybounds, xbounds. eta_v and xi_u have one extra because of staggering. + """Copy the dimensions in dims_list from oldfile to newfile. + Dimensions of eta_rho, xi_rho are determined by limits of + ybounds, xbounds. eta_v and xi_u have one extra because of staggering. """ dim_size_dict = { - 'eta_rho': ybounds[1]-ybounds[0]+1, - 'eta_u': ybounds[1]-ybounds[0]+1, - 'xi_rho': xbounds[1]-xbounds[0]+1, - 'xi_v': xbounds[1]-xbounds[0]+1, - 'xi_u': xbounds[1]-xbounds[0], - 'eta_v': ybounds[1]-ybounds[0], - 'ocean_time': 0, + "eta_rho": ybounds[1] - ybounds[0] + 1, + "eta_u": ybounds[1] - ybounds[0] + 1, + "xi_rho": xbounds[1] - xbounds[0] + 1, + "xi_v": xbounds[1] - xbounds[0] + 1, + "xi_u": xbounds[1] - xbounds[0], + "eta_v": ybounds[1] - ybounds[0], + "ocean_time": 0, } for dimname in dim_list: dim = oldfile.dimensions[dimname] - newfile.createDimension( - dimname, size=dim_size_dict.get(dimname, dim.__len__()) - ) + newfile.createDimension(dimname, size=dim_size_dict.get(dimname, dim.__len__())) def _copy_variables(oldfile, newfile, var_list, xbounds, ybounds): """Copy variables in var_list from oldfile to newfile for subdomain - [xbounds, ybounds]""" + [xbounds, ybounds]""" varnames_in_file = list(oldfile.variables.keys()) for varname in var_list: if varname in varnames_in_file: @@ -91,24 +123,23 @@ def _copy_variables(oldfile, newfile, var_list, xbounds, ybounds): dims = var.dimensions newvar = newfile.createVariable(varname, var.datatype, dims) # copy variable attributes - newvar.setncatts({att: var.getncattr(att) - for att in var.ncattrs()}) + newvar.setncatts({att: var.getncattr(att) for att in var.ncattrs()}) # fill data - if 'eta_rho' in dims or 'xi_rho' in dims: - newvar[:] = var[..., - ybounds[0]:ybounds[1]+1, - xbounds[0]:xbounds[1]+1] - elif 'eta_u' in dims or 'xi_u' in dims: - newvar[:] = var[..., - ybounds[0]:ybounds[1]+1, - xbounds[0]:xbounds[1]] - elif 'eta_v' in dims or 'xi_v' in dims: - newvar[:] = var[..., - ybounds[0]:ybounds[1], - xbounds[0]:xbounds[1]+1] + if "eta_rho" in dims or "xi_rho" in dims: + newvar[:] = var[ + ..., ybounds[0] : ybounds[1] + 1, xbounds[0] : xbounds[1] + 1 + ] + elif "eta_u" in dims or "xi_u" in dims: + newvar[:] = var[ + ..., ybounds[0] : ybounds[1] + 1, xbounds[0] : xbounds[1] + ] + elif "eta_v" in dims or "xi_v" in dims: + newvar[:] = var[ + ..., ybounds[0] : ybounds[1], xbounds[0] : xbounds[1] + 1 + ] else: newvar[:] = var[:] -if __name__ == '__main__': +if __name__ == "__main__": get_UBC_subdomain(sys.argv[1:]) diff --git a/SalishSeaTools/salishsea_tools/bathy_tools.py b/SalishSeaTools/salishsea_tools/bathy_tools.py index 59a03ed5..82ef5646 100644 --- a/SalishSeaTools/salishsea_tools/bathy_tools.py +++ b/SalishSeaTools/salishsea_tools/bathy_tools.py @@ -87,9 +87,9 @@ def plot_colourmesh( title, fig_size=(9, 9), axis_limits=None, - colour_map='winter_r', + colour_map="winter_r", bins=15, - land_colour='#edc9af', + land_colour="#edc9af", ): """Create a colour-mesh plot of a bathymetry dataset on a longitude/latitude axis. @@ -121,20 +121,21 @@ def plot_colourmesh( :returns: Figure object containing the plot :rtype: :py:class:`matplotlib.figure.Figure` """ - lats = dataset.variables['nav_lat'] - lons = dataset.variables['nav_lon'] - depths = dataset.variables['Bathymetry'] + lats = dataset.variables["nav_lat"] + lons = dataset.variables["nav_lon"] + depths = dataset.variables["Bathymetry"] fig = plt.figure(figsize=fig_size) set_aspect_ratio(lats) plt.title(title) cmap, norm = prep_colour_map( - depths, limits=(0, np.max(depths)), colour_map=colour_map, bins=bins) + depths, limits=(0, np.max(depths)), colour_map=colour_map, bins=bins + ) cmap.set_bad(land_colour) plt.pcolormesh(lons[:], lats[:], depths[:], cmap=cmap, norm=norm) if axis_limits is not None: plt.axis(axis_limits) cbar = plt.colorbar(shrink=0.8) - cbar.set_label('Depth [m]') + cbar.set_label("Depth [m]") return fig @@ -143,9 +144,9 @@ def plot_colourmesh_zoom( centre, half_width=5, fig_size=(9, 9), - colour_map='copper_r', + colour_map="copper_r", bins=15, - land_colour='white', + land_colour="white", ): """Create a colour-mesh plot of a bathymetry dataset on a grid point axis. @@ -176,32 +177,34 @@ def plot_colourmesh_zoom( dataset's :py:const:`Bathymetry` variable masked array :type land_colour: str """ - lats = dataset.variables['nav_lat'] - depths = dataset.variables['Bathymetry'] + lats = dataset.variables["nav_lat"] + depths = dataset.variables["Bathymetry"] plt.figure(figsize=fig_size) set_aspect_ratio(lats) ictr, jctr = centre region_depths = depths[ - jctr+half_width:jctr-half_width:-1, - ictr-half_width:ictr+half_width + jctr + half_width : jctr - half_width : -1, + ictr - half_width : ictr + half_width, ] cmap, norm = prep_colour_map( - depths, limits=(0, np.max(region_depths)), colour_map=colour_map, bins=bins) + depths, limits=(0, np.max(region_depths)), colour_map=colour_map, bins=bins + ) cmap.set_bad(land_colour) plt.pcolormesh(depths[:], cmap=cmap, norm=norm) cbar = plt.colorbar() - cbar.set_label('Depth [m]') - plt.axis((ictr-half_width, ictr+half_width, - jctr-half_width, jctr+half_width)) + cbar.set_label("Depth [m]") + plt.axis( + (ictr - half_width, ictr + half_width, jctr - half_width, jctr + half_width) + ) def prep_colour_map( - depths, - limits=None, - centre=None, - half_width=5, - colour_map='copper_r', - bins=15, + depths, + limits=None, + centre=None, + half_width=5, + colour_map="copper_r", + bins=15, ): """Returns cmap and norm elements of a colourmap for the netCDF depths variable. @@ -231,8 +234,8 @@ def prep_colour_map( if limits is None: ictr, jctr = centre region = depths[ - jctr+half_width:jctr-half_width:-1, - ictr-half_width:ictr+half_width + jctr + half_width : jctr - half_width : -1, + ictr - half_width : ictr + half_width, ] limits = (0, np.max(region)) levels = MaxNLocator(nbins=bins).tick_values(*limits) @@ -266,9 +269,12 @@ def show_region_depths(depths, centre, half_width=5): :type half_width: int """ ictr, jctr = centre - print(depths[ - jctr+half_width:jctr-half_width:-1, - ictr-half_width:ictr+half_width]) + print( + depths[ + jctr + half_width : jctr - half_width : -1, + ictr - half_width : ictr + half_width, + ] + ) def smooth(depths, max_norm_depth_diff=0.8, smooth_factor=0.2): @@ -295,12 +301,14 @@ def smooth(depths, max_norm_depth_diff=0.8, smooth_factor=0.2): while max_diff > max_norm_depth_diff: if diffs_lat[lat_ij] > diffs_lon[lon_ij]: i, j = lat_ij - depths[lat_ij], depths[i+1, j] = smooth_neighbours( - smooth_factor, depths[lat_ij], depths[i+1, j]) + depths[lat_ij], depths[i + 1, j] = smooth_neighbours( + smooth_factor, depths[lat_ij], depths[i + 1, j] + ) else: i, j = lon_ij - depths[lon_ij], depths[i, j+1] = smooth_neighbours( - smooth_factor, depths[lon_ij], depths[i, j+1]) + depths[lon_ij], depths[i, j + 1] = smooth_neighbours( + smooth_factor, depths[lon_ij], depths[i, j + 1] + ) diffs_lat, lat_ij, diffs_lon, lon_ij = choose_steepest_cells(depths) max_diff = np.maximum(diffs_lat[lat_ij], diffs_lon[lon_ij]) return depths @@ -370,7 +378,7 @@ def calc_norm_depth_diffs(depths, delta_lat, delta_lon): :rtype: :py:class:`netCDF4.Variable` """ jmax, imax = depths.shape - offset_depths = depths[:jmax-delta_lat, :imax-delta_lon] + offset_depths = depths[: jmax - delta_lat, : imax - delta_lon] avg_depths = (depths[delta_lat:, delta_lon:] + offset_depths) / 2 delta_depths = depths[delta_lat:, delta_lon:] - offset_depths return np.abs(delta_depths / avg_depths) @@ -399,8 +407,8 @@ def zero_jervis_end(depths): :returns: netcdf variable object containing the depths :rtype: :py:class:`netCDF4.Variable` """ - depths[650:651+1, 310:320] = 0. - depths[647:649+1, 312:320] = 0. + depths[650 : 651 + 1, 310:320] = 0.0 + depths[647 : 649 + 1, 312:320] = 0.0 return depths @@ -414,15 +422,15 @@ def zero_toba_region(depths): :returns: netcdf variable object containing the depths :rtype: :py:class:`netCDF4.Variable` """ - depths[746, 243:] = 0. - depths[747:756+1, 240:] = 0. - depths[757:763+1, 235:] = 0. - depths[763:766+1, 220:] = 0. - depths[766:771, 213:] = 0. - depths[771, 189:] = 0. - depths[772, 188:] = 0. - depths[773:774+1, 189:] = 0. - depths[775:784+1, 190:] = 0. - depths[785:788+1, 198:] = 0. - depths[789:791+1, 199:] = 0. + depths[746, 243:] = 0.0 + depths[747 : 756 + 1, 240:] = 0.0 + depths[757 : 763 + 1, 235:] = 0.0 + depths[763 : 766 + 1, 220:] = 0.0 + depths[766:771, 213:] = 0.0 + depths[771, 189:] = 0.0 + depths[772, 188:] = 0.0 + depths[773 : 774 + 1, 189:] = 0.0 + depths[775 : 784 + 1, 190:] = 0.0 + depths[785 : 788 + 1, 198:] = 0.0 + depths[789 : 791 + 1, 199:] = 0.0 return depths diff --git a/SalishSeaTools/salishsea_tools/bio_tools.py b/SalishSeaTools/salishsea_tools/bio_tools.py index 5c68daea..ffd4b633 100644 --- a/SalishSeaTools/salishsea_tools/bio_tools.py +++ b/SalishSeaTools/salishsea_tools/bio_tools.py @@ -19,80 +19,124 @@ import f90nml import os -def load_nml_bio(resDir,nmlname,bioRefName='namelist_smelt_ref',bioCfgName='namelist_smelt_cfg',namRefDir=None): - """ extract parameter values from smelt namelists for nampisbio + +def load_nml_bio( + resDir, + nmlname, + bioRefName="namelist_smelt_ref", + bioCfgName="namelist_smelt_cfg", + namRefDir=None, +): + """extract parameter values from smelt namelists for nampisbio :arg str resDir: directory containing namelists associated with run; usually results diri :arg str nmlname name of namelist to load: eg, 'nampisprod' :arg str bioRefName: name of bio reference namelist (optional) :arg str bioCfgName: name of bio config namelist (optional) :arg str namRefDir: dir to get ref namelist from if not in results dir (optional) """ - if namRefDir==None: - namRefDir=resDir - nmlRef=f90nml.read(os.path.join(namRefDir,bioRefName)) - nmlCfg=f90nml.read(os.path.join(resDir,bioCfgName)) - nml=nmlRef[nmlname] + if namRefDir == None: + namRefDir = resDir + nmlRef = f90nml.read(os.path.join(namRefDir, bioRefName)) + nmlCfg = f90nml.read(os.path.join(resDir, bioCfgName)) + nml = nmlRef[nmlname] for key in nmlCfg[nmlname]: - nml[key]=nmlCfg[nmlname][key] + nml[key] = nmlCfg[nmlname][key] return nml -def each_limiter(zz_I_par,zz_NO,zz_NH,zz_Si,tmask, - zz_rate_Iopt,zz_rate_gamma,zz_rate_K_Si,zz_rate_kapa,zz_rate_k): + +def each_limiter( + zz_I_par, + zz_NO, + zz_NH, + zz_Si, + tmask, + zz_rate_Iopt, + zz_rate_gamma, + zz_rate_K_Si, + zz_rate_kapa, + zz_rate_k, +): # Light - zz_plank_growth_light = (1.0 - np.exp(-zz_I_par / (0.33 * zz_rate_Iopt)) ) * \ - (np.exp(-zz_I_par / (30. * zz_rate_Iopt))) * 1.06 + zz_plank_growth_light = ( + (1.0 - np.exp(-zz_I_par / (0.33 * zz_rate_Iopt))) + * (np.exp(-zz_I_par / (30.0 * zz_rate_Iopt))) + * 1.06 + ) zz_Uc = (1.0 - zz_rate_gamma) * zz_plank_growth_light - ILim=zz_Uc + ILim = zz_Uc # Si - zz_Sc = np.where(np.logical_and(zz_Si>0.0,tmask>0), - zz_Si / (zz_rate_K_Si + zz_Si),0.0) - SiLim=zz_Sc + zz_Sc = np.where( + np.logical_and(zz_Si > 0.0, tmask > 0), zz_Si / (zz_rate_K_Si + zz_Si), 0.0 + ) + SiLim = zz_Sc # Nitrate and Ammonium - zz_Oup_cell = np.where(np.logical_and(zz_NO > 0.0,tmask>0), - zz_NO * zz_rate_kapa / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH),0.0) - zz_Hup_cell = np.where(np.logical_and(zz_NH > 0.0,tmask>0), - zz_NH / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH),0.0) - if (np.any(zz_Oup_cell < 0.)): - raise ValueError('zz_Oup_cell<0') - if (np.any(zz_Hup_cell < 0.)): - raise ValueError('zz_Hup_cell<0') - NLim=zz_Oup_cell+zz_Hup_cell + zz_Oup_cell = np.where( + np.logical_and(zz_NO > 0.0, tmask > 0), + zz_NO * zz_rate_kapa / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH), + 0.0, + ) + zz_Hup_cell = np.where( + np.logical_and(zz_NH > 0.0, tmask > 0), + zz_NH / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH), + 0.0, + ) + if np.any(zz_Oup_cell < 0.0): + raise ValueError("zz_Oup_cell<0") + if np.any(zz_Hup_cell < 0.0): + raise ValueError("zz_Hup_cell<0") + NLim = zz_Oup_cell + zz_Hup_cell # set flags - limiter=-1*np.ones(zz_Si.shape) - limiter=np.where(np.logical_and(ILim<=NLim,ILim<=SiLim),0, - np.where(NLim<=SiLim,2,np.where(SiLim0.0, - zz_Si / (zz_rate_K_Si + zz_Si),0.0) - SiLim=zz_Sc + zz_Sc = np.where(zz_Si > 0.0, zz_Si / (zz_rate_K_Si + zz_Si), 0.0) + SiLim = zz_Sc # Nitrate and Ammonium - zz_Oup_cell = np.where(zz_NO > 0.0, - zz_NO * zz_rate_kapa / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH),0.0) - zz_Hup_cell = np.where(zz_NH > 0.0, - zz_NH / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH),0.0) - if (np.any(zz_Oup_cell < 0.)): - raise ValueError('zz_Oup_cell<0') - if (np.any(zz_Hup_cell < 0.)): - raise ValueError('zz_Hup_cell<0') - NLim=zz_Oup_cell+zz_Hup_cell - nutLim=np.minimum(NLim,SiLim) - return np.power(nutLim,0.2) - -def calc_diat_sink(zz_w_sink_Pmicro_min,zz_w_sink_Pmicro_max,diatNutLim): + zz_Oup_cell = np.where( + zz_NO > 0.0, + zz_NO * zz_rate_kapa / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH), + 0.0, + ) + zz_Hup_cell = np.where( + zz_NH > 0.0, zz_NH / (zz_rate_k + zz_NO * zz_rate_kapa + zz_NH), 0.0 + ) + if np.any(zz_Oup_cell < 0.0): + raise ValueError("zz_Oup_cell<0") + if np.any(zz_Hup_cell < 0.0): + raise ValueError("zz_Hup_cell<0") + NLim = zz_Oup_cell + zz_Hup_cell + nutLim = np.minimum(NLim, SiLim) + return np.power(nutLim, 0.2) + + +def calc_diat_sink(zz_w_sink_Pmicro_min, zz_w_sink_Pmicro_max, diatNutLim): # enter min and max rates in m/day as in namelist # use calc_nutLim_2 to estimate diatNutLim, which is to power 0.2 - wsink= zz_w_sink_Pmicro_min*diatNutLim+zz_w_sink_Pmicro_max*(1.0-diatNutLim) - return wsink/(24*3600) # diatom sinking rates are converted to m/s during namelist read + wsink = zz_w_sink_Pmicro_min * diatNutLim + zz_w_sink_Pmicro_max * ( + 1.0 - diatNutLim + ) + return wsink / ( + 24 * 3600 + ) # diatom sinking rates are converted to m/s during namelist read -def calc_p_limiters(I,NO,NH,Si,tmask,nampisprod): + +def calc_p_limiters(I, NO, NH, Si, tmask, nampisprod): """Calculate limiting factor: I, Si, or N based on SMELT output :arg I: np.array slice of PAR from dia file @@ -103,38 +147,94 @@ def calc_p_limiters(I,NO,NH,Si,tmask,nampisprod): :arg nampisprod: namelist dict loaded using load_nml_bio with argument nampisprod """ - ILimDiat, NLimDiat, SiLimDiat, limiterDiat, limvalDiat=each_limiter(I,NO,NH,Si,tmask,nampisprod['zz_rate_Iopt_diat'], - nampisprod['zz_rate_gamma_diat'],nampisprod['zz_rate_k_Si_diat'], - nampisprod['zz_rate_kapa_diat'],nampisprod['zz_rate_k_diat']) - - ILimMyri, NLimMyri, SiLimMyri, limiterMyri, limvalMyri=each_limiter(I,NO,NH,Si,tmask,nampisprod['zz_rate_Iopt_myri'], - nampisprod['zz_rate_gamma_myri'],nampisprod['zz_rate_k_Si_myri'], - nampisprod['zz_rate_kapa_myri'],nampisprod['zz_rate_k_myri']) - - ILimNano, NLimNano, SiLimNano, limiterNano, limvalNano=each_limiter(I,NO,NH,Si,tmask,nampisprod['zz_rate_Iopt_nano'], - nampisprod['zz_rate_gamma_nano'],nampisprod['zz_rate_k_Si_nano'], - nampisprod['zz_rate_kapa_nano'],nampisprod['zz_rate_k_nano']) - Diat={'ILim':ILimDiat,'NLim':NLimDiat,'SiLim':SiLimDiat,'limiter':limiterDiat,'limval':limvalDiat} - Myri={'ILim':ILimMyri,'NLim':NLimMyri,'SiLim':SiLimMyri,'limiter':limiterMyri,'limval':limvalMyri} - Nano={'ILim':ILimNano,'NLim':NLimNano,'SiLim':SiLimNano,'limiter':limiterNano,'limval':limvalNano} + ILimDiat, NLimDiat, SiLimDiat, limiterDiat, limvalDiat = each_limiter( + I, + NO, + NH, + Si, + tmask, + nampisprod["zz_rate_Iopt_diat"], + nampisprod["zz_rate_gamma_diat"], + nampisprod["zz_rate_k_Si_diat"], + nampisprod["zz_rate_kapa_diat"], + nampisprod["zz_rate_k_diat"], + ) + + ILimMyri, NLimMyri, SiLimMyri, limiterMyri, limvalMyri = each_limiter( + I, + NO, + NH, + Si, + tmask, + nampisprod["zz_rate_Iopt_myri"], + nampisprod["zz_rate_gamma_myri"], + nampisprod["zz_rate_k_Si_myri"], + nampisprod["zz_rate_kapa_myri"], + nampisprod["zz_rate_k_myri"], + ) + + ILimNano, NLimNano, SiLimNano, limiterNano, limvalNano = each_limiter( + I, + NO, + NH, + Si, + tmask, + nampisprod["zz_rate_Iopt_nano"], + nampisprod["zz_rate_gamma_nano"], + nampisprod["zz_rate_k_Si_nano"], + nampisprod["zz_rate_kapa_nano"], + nampisprod["zz_rate_k_nano"], + ) + Diat = { + "ILim": ILimDiat, + "NLim": NLimDiat, + "SiLim": SiLimDiat, + "limiter": limiterDiat, + "limval": limvalDiat, + } + Myri = { + "ILim": ILimMyri, + "NLim": NLimMyri, + "SiLim": SiLimMyri, + "limiter": limiterMyri, + "limval": limvalMyri, + } + Nano = { + "ILim": ILimNano, + "NLim": NLimNano, + "SiLim": SiLimNano, + "limiter": limiterNano, + "limval": limvalNano, + } return Diat, Myri, Nano + def phyto_Tdep_Factor(TT, zz_rate_maxtemp, zz_rate_temprange): - #if hasattr(TT,'__len__'): # assume 1-d array or similar and return array + # if hasattr(TT,'__len__'): # assume 1-d array or similar and return array # return np.array([phyto_Tdep_Factor(el,zz_rate_maxtemp, zz_rate_temprange) for el in TT]) - #else: + # else: # return np.exp(0.07 * (TT - 20)) * min(max((zz_rate_maxtemp - TT), 0.0),zz_rate_temprange) / (zz_rate_temprange + 1e-10) - return np.exp(0.07 * (TT - 20)) * np.minimum(np.maximum((zz_rate_maxtemp - TT), 0.0),zz_rate_temprange) / (zz_rate_temprange + 1e-10) + return ( + np.exp(0.07 * (TT - 20)) + * np.minimum(np.maximum((zz_rate_maxtemp - TT), 0.0), zz_rate_temprange) + / (zz_rate_temprange + 1e-10) + ) -def calc_T_Factors(TT,nampisprod): - Tdep_Diat=phyto_Tdep_Factor(TT,nampisprod['zz_rate_maxtemp_diat'],nampisprod['zz_rate_temprange_diat']) - Tdep_Myri=phyto_Tdep_Factor(TT,nampisprod['zz_rate_maxtemp_myri'],nampisprod['zz_rate_temprange_myri']) - Tdep_Nano=phyto_Tdep_Factor(TT,nampisprod['zz_rate_maxtemp_nano'],nampisprod['zz_rate_temprange_nano']) +def calc_T_Factors(TT, nampisprod): + Tdep_Diat = phyto_Tdep_Factor( + TT, nampisprod["zz_rate_maxtemp_diat"], nampisprod["zz_rate_temprange_diat"] + ) + Tdep_Myri = phyto_Tdep_Factor( + TT, nampisprod["zz_rate_maxtemp_myri"], nampisprod["zz_rate_temprange_myri"] + ) + Tdep_Nano = phyto_Tdep_Factor( + TT, nampisprod["zz_rate_maxtemp_nano"], nampisprod["zz_rate_temprange_nano"] + ) return Tdep_Diat, Tdep_Myri, Tdep_Nano -#def calc_limiter(resDir,fnameDia=None,fnamePtrc=None): +# def calc_limiter(resDir,fnameDia=None,fnamePtrc=None): # :arg str resDir: path to results directory where output and namelist files are stored # :arg str fnameDia: (optional) diagnostic file to get output from; # if none suplied assumes there is only one possibility in resDir @@ -146,13 +246,13 @@ def calc_T_Factors(TT,nampisprod): # fPtrc=nc.Dataset(os.path.join(resDir,fname # -#def find_closest_model_point( +# def find_closest_model_point( # lon, lat, model_lons, model_lats, grid='NEMO', land_mask=None, # tols={ # 'NEMO': {'tol_lon': 0.0104, 'tol_lat': 0.00388}, # 'GEM2.5': {'tol_lon': 0.016, 'tol_lat': 0.012}, # } -#): +# ): # """Returns the grid coordinates of the closest model point # to a specified lon/lat. If land_mask is provided, returns the closest # water point. @@ -190,4 +290,3 @@ def calc_T_Factors(TT,nampisprod): # # :returns: yind, xind # """ - diff --git a/SalishSeaTools/salishsea_tools/bloomdrivers.py b/SalishSeaTools/salishsea_tools/bloomdrivers.py index f1a3d031..cfe8a8f2 100644 --- a/SalishSeaTools/salishsea_tools/bloomdrivers.py +++ b/SalishSeaTools/salishsea_tools/bloomdrivers.py @@ -4,498 +4,539 @@ import matplotlib as mpl import netCDF4 as nc import datetime as dt -from salishsea_tools import evaltools as et, places, viz_tools, visualisations, geo_tools +from salishsea_tools import ( + evaltools as et, + places, + viz_tools, + visualisations, + geo_tools, +) import xarray as xr import pandas as pd import pickle import os import gsw + # Extracting winds from the correct path -def getWindVarsYear(year,loc): - ''' Given a year, returns the correct directory and nam_fmt for wind forcing as well as the - location of S3 on the corresponding grid. - Parameters: - year: a year value in integer form - loc: the location name as a string. Eg. loc='S3' - Returns: - jW: y-coordinate for the location - iW: x-coordinate for the location - opsdir: path to directory where wind forcing file is stored - nam_fmt: naming convention of the appropriate files - ''' - if year>2014: - opsdir='/results/forcing/atmospheric/GEM2.5/operational/' - nam_fmt='ops' - jW,iW=places.PLACES[loc]['GEM2.5 grid ji'] +def getWindVarsYear(year, loc): + """Given a year, returns the correct directory and nam_fmt for wind forcing as well as the + location of S3 on the corresponding grid. + Parameters: + year: a year value in integer form + loc: the location name as a string. Eg. loc='S3' + Returns: + jW: y-coordinate for the location + iW: x-coordinate for the location + opsdir: path to directory where wind forcing file is stored + nam_fmt: naming convention of the appropriate files + """ + if year > 2014: + opsdir = "/results/forcing/atmospheric/GEM2.5/operational/" + nam_fmt = "ops" + jW, iW = places.PLACES[loc]["GEM2.5 grid ji"] else: - opsdir='/data/eolson/results/MEOPAR/GEMLAM/' - nam_fmt='gemlam' - with xr.open_dataset('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc') as gridrefWind: + opsdir = "/data/eolson/results/MEOPAR/GEMLAM/" + nam_fmt = "gemlam" + with xr.open_dataset( + "/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc" + ) as gridrefWind: # always use a post-2011 file here to identify station grid location - lon,lat=places.PLACES[loc]['lon lat'] - jW,iW=geo_tools.find_closest_model_point(lon,lat, - gridrefWind.variables['nav_lon'][:,:]-360,gridrefWind.variables['nav_lat'][:,:], - grid='GEM2.5') - # the -360 is needed because longitudes in this case are reported in postive degrees East + lon, lat = places.PLACES[loc]["lon lat"] + jW, iW = geo_tools.find_closest_model_point( + lon, + lat, + gridrefWind.variables["nav_lon"][:, :] - 360, + gridrefWind.variables["nav_lat"][:, :], + grid="GEM2.5", + ) + # the -360 is needed because longitudes in this case are reported in postive degrees East + + return jW, iW, opsdir, nam_fmt - return jW,iW,opsdir,nam_fmt # Metric 1: -def metric1_bloomtime(phyto_alld,no3_alld,bio_time): - ''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time - and depth, returns a datetime value of the spring phytoplankton bloom date according to the - following definition (now called 'metric 1'): - - 'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to - 3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM - (the half-saturation concentration) for two consecutive days' - EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results - - Parameters: - phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time - range of 'bio_time' - no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time - range of 'bio_time' - bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld - Returns: - bloomtime1: the spring bloom date as a single datetime value - - ''' +def metric1_bloomtime(phyto_alld, no3_alld, bio_time): + """Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time + and depth, returns a datetime value of the spring phytoplankton bloom date according to the + following definition (now called 'metric 1'): + + 'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to + 3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM + (the half-saturation concentration) for two consecutive days' + EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results + + Parameters: + phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time + range of 'bio_time' + no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time + range of 'bio_time' + bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld + Returns: + bloomtime1: the spring bloom date as a single datetime value + + """ # a) get avg phytplankton in upper 3m - phyto_alld_df=pd.DataFrame(phyto_alld) - upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1)) - upper_3m_phyto.columns=['upper_3m_phyto'] - #upper_3m_phyto + phyto_alld_df = pd.DataFrame(phyto_alld) + upper_3m_phyto = pd.DataFrame(phyto_alld_df[[0, 1, 2, 3]].mean(axis=1)) + upper_3m_phyto.columns = ["upper_3m_phyto"] + # upper_3m_phyto # b) get average no3 in upper 3m - no3_alld_df=pd.DataFrame(no3_alld) - upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1)) - upper_3m_no3.columns=['upper_3m_no3'] - #upper_3m_no3 + no3_alld_df = pd.DataFrame(no3_alld) + upper_3m_no3 = pd.DataFrame(no3_alld_df[[0, 1, 2, 3]].mean(axis=1)) + upper_3m_no3.columns = ["upper_3m_no3"] + # upper_3m_no3 # make bio_time into a dataframe - bio_time_df=pd.DataFrame(bio_time) - bio_time_df.columns=['bio_time'] - metric1_df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1) - - # c) Find first location where nitrate crosses below 0.5 micromolar and - # stays there for 2 days + bio_time_df = pd.DataFrame(bio_time) + bio_time_df.columns = ["bio_time"] + metric1_df = pd.concat((bio_time_df, upper_3m_phyto, upper_3m_no3), axis=1) + + # c) Find first location where nitrate crosses below 0.5 micromolar and + # stays there for 2 days # NOTE: changed the value to 2 micromolar - location1=np.nan + location1 = np.nan for i, row in metric1_df.iterrows(): try: - if metric1_df['upper_3m_no3'].iloc[i]<2 and metric1_df['upper_3m_no3'].iloc[i+1]<2: - location1=i + if ( + metric1_df["upper_3m_no3"].iloc[i] < 2 + and metric1_df["upper_3m_no3"].iloc[i + 1] < 2 + ): + location1 = i break except IndexError: - location1=np.nan - print('bloom not found') + location1 = np.nan + print("bloom not found") # d) Find date with maximum phytoplankton concentration within four days (say 9 day window) of date in c) if np.isnan(location1): - bloomrange=np.nan - bloomtime1=np.nan + bloomrange = np.nan + bloomtime1 = np.nan else: - bloomrange=metric1_df[location1-4:location1+5] - bloomtime1=bloomrange.loc[bloomrange.upper_3m_phyto.idxmax(), 'bio_time'] + bloomrange = metric1_df[location1 - 4 : location1 + 5] + bloomtime1 = bloomrange.loc[bloomrange.upper_3m_phyto.idxmax(), "bio_time"] return bloomtime1 -# Metric 2: -def metric2_bloomtime(phyto_alld,no3_alld,bio_time): - ''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time - and depth, returns a datetime value of the spring phytoplankton bloom date according to the - following definition (now called 'metric 2'): - - 'The first peak in which chlorophyll concentrations in upper 3m are above 5 ug/L for more than two days' - - Parameters: - phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time - range of 'bio_time' - no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time - range of 'bio_time' - bio_time: 1D datetime array of the same time frame as sphyto and sno3 - Returns: - bloomtime2: the spring bloom date as a single datetime value - - ''' +# Metric 2: +def metric2_bloomtime(phyto_alld, no3_alld, bio_time): + """Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time + and depth, returns a datetime value of the spring phytoplankton bloom date according to the + following definition (now called 'metric 2'): + + 'The first peak in which chlorophyll concentrations in upper 3m are above 5 ug/L for more than two days' + + Parameters: + phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time + range of 'bio_time' + no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time + range of 'bio_time' + bio_time: 1D datetime array of the same time frame as sphyto and sno3 + Returns: + bloomtime2: the spring bloom date as a single datetime value + + """ # a) get avg phytplankton in upper 3m - phyto_alld_df=pd.DataFrame(phyto_alld) - upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1)) - upper_3m_phyto.columns=['sphyto'] - #upper_3m_phyto + phyto_alld_df = pd.DataFrame(phyto_alld) + upper_3m_phyto = pd.DataFrame(phyto_alld_df[[0, 1, 2, 3]].mean(axis=1)) + upper_3m_phyto.columns = ["sphyto"] + # upper_3m_phyto # b) get average no3 in upper 3m - no3_alld_df=pd.DataFrame(no3_alld) - upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1)) - upper_3m_no3.columns=['sno3'] - #upper_3m_no3 + no3_alld_df = pd.DataFrame(no3_alld) + upper_3m_no3 = pd.DataFrame(no3_alld_df[[0, 1, 2, 3]].mean(axis=1)) + upper_3m_no3.columns = ["sno3"] + # upper_3m_no3 # make bio_time into a dataframe - bio_time_df=pd.DataFrame(bio_time) - bio_time_df.columns=['bio_time'] - df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1) + bio_time_df = pd.DataFrame(bio_time) + bio_time_df.columns = ["bio_time"] + df = pd.concat((bio_time_df, upper_3m_phyto, upper_3m_no3), axis=1) # to find all the peaks: - df['phytopeaks'] = df.sphyto[(df.sphyto.shift(1) < df.sphyto) & (df.sphyto.shift(-1) < df.sphyto)] + df["phytopeaks"] = df.sphyto[ + (df.sphyto.shift(1) < df.sphyto) & (df.sphyto.shift(-1) < df.sphyto) + ] # need to covert the value of interest from ug/L to uM N (conversion factor: 1.8 ug Chl per umol N) - chlvalue=5/1.8 + chlvalue = 5 / 1.8 - # extract the bloom time date + # extract the bloom time date for i, row in df.iterrows(): try: - if df['sphyto'].iloc[i-1]>chlvalue and df['sphyto'].iloc[i-2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]): - bloomtime2=df.bio_time[i] + if ( + df["sphyto"].iloc[i - 1] > chlvalue + and df["sphyto"].iloc[i - 2] > chlvalue + and pd.notna(df["phytopeaks"].iloc[i]) + ): + bloomtime2 = df.bio_time[i] break - elif df['sphyto'].iloc[i+1]>chlvalue and df['sphyto'].iloc[i+2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]): - bloomtime2=df.bio_time[i] + elif ( + df["sphyto"].iloc[i + 1] > chlvalue + and df["sphyto"].iloc[i + 2] > chlvalue + and pd.notna(df["phytopeaks"].iloc[i]) + ): + bloomtime2 = df.bio_time[i] break except IndexError: - bloomtime2=np.nan - print('bloom not found') + bloomtime2 = np.nan + print("bloom not found") return bloomtime2 -# Metric 3: -def metric3_bloomtime(sphyto,sno3,bio_time): - ''' Given datetime array and two 1D arrays of surface phytplankton and nitrate concentrations - over time, returns a datetime value of the spring phytoplankton bloom date according to the - following definition (now called 'metric 3'): - - 'The median + 5% of the annual Chl concentration is deemed “threshold value” for each year. - For a given year, bloom initiation is determined to be the week that first reaches the - threshold value (by looking at weekly averages) as long as one of the two following weeks - was >70% of the threshold value' - - Parameters: - sphyto: 1D array of phytoplankton concentrations (in uM N) over time - range of 'bio_time' - sno3: 1D array of nitrate concentrations (in uM N) over time - range of 'bio_time' - bio_time: 1D datetime array of the same time frame as sphyto and sno3 - Returns: - bloomtime3: the spring bloom date as a single datetime value - - ''' - # 1) determine threshold value - df = pd.DataFrame({'bio_time':bio_time, 'sphyto':sphyto, 'sno3':sno3}) - + +# Metric 3: +def metric3_bloomtime(sphyto, sno3, bio_time): + """Given datetime array and two 1D arrays of surface phytplankton and nitrate concentrations + over time, returns a datetime value of the spring phytoplankton bloom date according to the + following definition (now called 'metric 3'): + + 'The median + 5% of the annual Chl concentration is deemed “threshold value” for each year. + For a given year, bloom initiation is determined to be the week that first reaches the + threshold value (by looking at weekly averages) as long as one of the two following weeks + was >70% of the threshold value' + + Parameters: + sphyto: 1D array of phytoplankton concentrations (in uM N) over time + range of 'bio_time' + sno3: 1D array of nitrate concentrations (in uM N) over time + range of 'bio_time' + bio_time: 1D datetime array of the same time frame as sphyto and sno3 + Returns: + bloomtime3: the spring bloom date as a single datetime value + + """ + # 1) determine threshold value + df = pd.DataFrame({"bio_time": bio_time, "sphyto": sphyto, "sno3": sno3}) + # a) find median chl value of that year, add 5% (this is only feb-june, should we do the whole year?) - threshold=df['sphyto'].median()*1.05 + threshold = df["sphyto"].median() * 1.05 # b) secondthresh = find 70% of threshold value - secondthresh=threshold*0.7 + secondthresh = threshold * 0.7 # 2) Take the average of each week and make a dataframe with start date of week and weekly average - weeklychl = pd.DataFrame(df.resample('W', on='bio_time').sphyto.mean()) + weeklychl = pd.DataFrame(df.resample("W", on="bio_time").sphyto.mean()) weeklychl.reset_index(inplace=True) - # 3) Loop through the weeks and find the first week that reaches the threshold. - # Is one of the two week values after this week > secondthresh? + # 3) Loop through the weeks and find the first week that reaches the threshold. + # Is one of the two week values after this week > secondthresh? for i, row in weeklychl.iterrows(): try: - if weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+1]>secondthresh: - bloomtime3=weeklychl.bio_time[i] + if ( + weeklychl["sphyto"].iloc[i] > threshold + and weeklychl["sphyto"].iloc[i + 1] > secondthresh + ): + bloomtime3 = weeklychl.bio_time[i] break - elif weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+2]>secondthresh: - bloomtime3=weeklychl.bio_time[i] + elif ( + weeklychl["sphyto"].iloc[i] > threshold + and weeklychl["sphyto"].iloc[i + 2] > secondthresh + ): + bloomtime3 = weeklychl.bio_time[i] break except IndexError: - bloomtime2=np.nan - print('bloom not found') + bloomtime2 = np.nan + print("bloom not found") return bloomtime3 + # Surface monthly average calculation given 2D array with depth and time: -def D2_3monthly_avg(time,x): - - ''' Given datetime array of 3 months and a 2D array of variable x, over time - and depth, returns an array containing the 3 monthly averages of the - surface values of variable x - - Parameters: - time: datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - x: 2-dimensional numpy array containing daily averages of the - same length and time frame as 'time', and depth profile - Returns: - jan_x, feb_x, mar_x: monthly averages of variable x at surface - ''' - - depthx=pd.DataFrame(x) - surfacex=np.array(depthx[[0]]).flatten() - df=pd.DataFrame({'time':time, 'x':surfacex}) - monthlyx=pd.DataFrame(df.resample('M', on='time').x.mean()) +def D2_3monthly_avg(time, x): + """Given datetime array of 3 months and a 2D array of variable x, over time + and depth, returns an array containing the 3 monthly averages of the + surface values of variable x + + Parameters: + time: datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + x: 2-dimensional numpy array containing daily averages of the + same length and time frame as 'time', and depth profile + Returns: + jan_x, feb_x, mar_x: monthly averages of variable x at surface + """ + + depthx = pd.DataFrame(x) + surfacex = np.array(depthx[[0]]).flatten() + df = pd.DataFrame({"time": time, "x": surfacex}) + monthlyx = pd.DataFrame(df.resample("M", on="time").x.mean()) monthlyx.reset_index(inplace=True) - jan_x=monthlyx.iloc[0]['x'] - feb_x=monthlyx.iloc[1]['x'] - mar_x=monthlyx.iloc[2]['x'] + jan_x = monthlyx.iloc[0]["x"] + feb_x = monthlyx.iloc[1]["x"] + mar_x = monthlyx.iloc[2]["x"] return jan_x, feb_x, mar_x - # mid depth nitrate (30-90m): -def D1_3monthly_avg(time,x): - - ''' Given datetime array of 3 months and a 1D array of variable x with time, - returns an array containing the 3 monthly averages of the variable x - - Parameters: - time: datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - x: 1-dimensional numpy array containing daily averages of the - same length and time frame as 'time' - Returns: - jan_x, feb_x, mar_x: monthly averages of variable x - ''' - - df=pd.DataFrame({'time':time, 'x':x}) - monthlyx=pd.DataFrame(df.resample('M', on='time').x.mean()) +def D1_3monthly_avg(time, x): + """Given datetime array of 3 months and a 1D array of variable x with time, + returns an array containing the 3 monthly averages of the variable x + + Parameters: + time: datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + x: 1-dimensional numpy array containing daily averages of the + same length and time frame as 'time' + Returns: + jan_x, feb_x, mar_x: monthly averages of variable x + """ + + df = pd.DataFrame({"time": time, "x": x}) + monthlyx = pd.DataFrame(df.resample("M", on="time").x.mean()) monthlyx.reset_index(inplace=True) - jan_x=monthlyx.iloc[0]['x'] - feb_x=monthlyx.iloc[1]['x'] - mar_x=monthlyx.iloc[2]['x'] + jan_x = monthlyx.iloc[0]["x"] + feb_x = monthlyx.iloc[1]["x"] + mar_x = monthlyx.iloc[2]["x"] return jan_x, feb_x, mar_x + # Monthly average calculation given 1D array and non-datetime : -def D1_3monthly_avg2(time,x): - - ''' Given non-datetime array of 3 months and a 1D array of variable x with time, - returns an array containing the 3 monthly averages of the variable x - - Parameters: - time: non-datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - x: 1-dimensional numpy array containing daily averages of the - same length and time frame as 'time' - Returns: - jan_x, feb_x, mar_x: monthly averages of variable x - ''' - - - df=pd.DataFrame({'time':time, 'x':x}) +def D1_3monthly_avg2(time, x): + """Given non-datetime array of 3 months and a 1D array of variable x with time, + returns an array containing the 3 monthly averages of the variable x + + Parameters: + time: non-datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + x: 1-dimensional numpy array containing daily averages of the + same length and time frame as 'time' + Returns: + jan_x, feb_x, mar_x: monthly averages of variable x + """ + + df = pd.DataFrame({"time": time, "x": x}) df["time"] = pd.to_datetime(df["time"]) - monthlyx=pd.DataFrame(df.resample('M',on='time').x.mean()) + monthlyx = pd.DataFrame(df.resample("M", on="time").x.mean()) monthlyx.reset_index(inplace=True) - jan_x=monthlyx.iloc[0]['x'] - feb_x=monthlyx.iloc[1]['x'] - mar_x=monthlyx.iloc[2]['x'] + jan_x = monthlyx.iloc[0]["x"] + feb_x = monthlyx.iloc[1]["x"] + mar_x = monthlyx.iloc[2]["x"] return jan_x, feb_x, mar_x -def halo_de(ncname,ts_x,ts_y): - - ''' Given a path to a SalishSeaCast netcdf file and an x, y pair, - returns halocline depth, where halocline depth is defined a midway between - two cells that have the largest salinity gradient - ie max abs((sal1-sal2)/(depth1-depth2)) - - Parameters: - ncname (str): path to a netcdf file containing - a valid salinity variable (vosaline) - ts_x (int): x-coordinate at which halocline is calculated - tx_y (int): y-coordinate at which halocline is calculated - Returns: - halocline_depth: depth in meters of maximum salinity gradient - ''' - - # o - + +def halo_de(ncname, ts_x, ts_y): + """Given a path to a SalishSeaCast netcdf file and an x, y pair, + returns halocline depth, where halocline depth is defined a midway between + two cells that have the largest salinity gradient + ie max abs((sal1-sal2)/(depth1-depth2)) + + Parameters: + ncname (str): path to a netcdf file containing + a valid salinity variable (vosaline) + ts_x (int): x-coordinate at which halocline is calculated + tx_y (int): y-coordinate at which halocline is calculated + Returns: + halocline_depth: depth in meters of maximum salinity gradient + """ + + # o + halocline = 0 - grid = nc.Dataset('/data/vdo/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc') + grid = nc.Dataset("/data/vdo/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc") nemo = nc.Dataset(ncname) - - #get the land mask - col_mask = grid['tmask'][0,:,ts_y,ts_x] - - #get the depths of the watercolumn and filter only cells that have water - col_depths = grid['gdept_0'][0,:,ts_y,ts_x] - col_depths = col_depths[col_mask==1] - -### if there is no water, no halocline - if (len(col_depths) == 0): + + # get the land mask + col_mask = grid["tmask"][0, :, ts_y, ts_x] + + # get the depths of the watercolumn and filter only cells that have water + col_depths = grid["gdept_0"][0, :, ts_y, ts_x] + col_depths = col_depths[col_mask == 1] + + ### if there is no water, no halocline + if len(col_depths) == 0: halocline = np.nan - - else: - #get the salinity of the point, again filtering for where water exists - col_sal = nemo['vosaline'][0,:,ts_y,ts_x] - col_sal = col_sal[col_mask==1] - #get the gradient in salinity + else: + # get the salinity of the point, again filtering for where water exists + col_sal = nemo["vosaline"][0, :, ts_y, ts_x] + col_sal = col_sal[col_mask == 1] + + # get the gradient in salinity sal_grad = np.zeros_like(col_sal) - for i in range(0, (len(col_sal)-1)): - sal_grad[i] = np.abs((col_sal[i]-col_sal[i+1])/(col_depths[i]-col_depths[i+1])) + for i in range(0, (len(col_sal) - 1)): + sal_grad[i] = np.abs( + (col_sal[i] - col_sal[i + 1]) / (col_depths[i] - col_depths[i + 1]) + ) - #print(sal_grad) + # print(sal_grad) loc_max = np.where(sal_grad == np.nanmax(sal_grad)) - loc_max = (loc_max[0][0]) + loc_max = loc_max[0][0] - #halocline is halfway between the two cells - halocline = col_depths[loc_max] + 0.5*(col_depths[loc_max+1]-col_depths[loc_max]) + # halocline is halfway between the two cells + halocline = col_depths[loc_max] + 0.5 * ( + col_depths[loc_max + 1] - col_depths[loc_max] + ) - return halocline # regression line and r2 value for plots -def reg_r2(driver,bloomdate): - - '''Given two arrays of the same length, returns linear regression best - fit line and r-squared value. - - Parameters: - driver: 1D array of the independent (predictor) variable - bloomdate: 1D array of the dependent (response) variable, the - same length as "driver" - Returns: - y: y-coordinates of best fit line - r2: r-squared value of regression fit - m: slope of line - c: y-intercepth of line - ''' - +def reg_r2(driver, bloomdate): + """Given two arrays of the same length, returns linear regression best + fit line and r-squared value. + + Parameters: + driver: 1D array of the independent (predictor) variable + bloomdate: 1D array of the dependent (response) variable, the + same length as "driver" + Returns: + y: y-coordinates of best fit line + r2: r-squared value of regression fit + m: slope of line + c: y-intercepth of line + """ + A = np.vstack([driver, np.ones(len(driver))]).T - m, c = np.linalg.lstsq(A, bloomdate,rcond=None)[0] - m=round(m,3) - c=round(c,2) - y = m*driver + c - model, resid = np.linalg.lstsq(A, bloomdate,rcond=None)[:2] + m, c = np.linalg.lstsq(A, bloomdate, rcond=None)[0] + m = round(m, 3) + c = round(c, 2) + y = m * driver + c + model, resid = np.linalg.lstsq(A, bloomdate, rcond=None)[:2] r2 = 1 - resid / (len(bloomdate) * np.var(bloomdate)) return y, r2, m, c + # depth of turbocline -def turbo(eddy,time,depth): - '''Given a datetime array of 3 months, a depth array, and 2D array of eddy - diffusivity over time and depth, returns the average turbocline depth - for each of the three months. Turbocline depth is defined here as the depth - before the depth at which eddy diffusivity reaches a value of 0.001 m^2/s - - Parameters: - eddy: 2-dimensional numpy array containing daily averaged eddy diffusivity - of the same time frame as 'time', and over depth - time: datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - depth: depth array from grid_T - Returns: - jan_turbo: average turbocline depth of the first month (single value) - feb_turbo: average turbocline depth of the second month (single value) - mar_turbo: average turbocline depth of the third month (single value) - ''' - turbo=list() - for day in eddy: - dfed=pd.DataFrame({'depth':depth[:-1], 'eddy':day[1:]}) - dfed=dfed.iloc[1:] # dropping surface values - dfed[:21] #keep top 21 (25m depth) +def turbo(eddy, time, depth): + """Given a datetime array of 3 months, a depth array, and 2D array of eddy + diffusivity over time and depth, returns the average turbocline depth + for each of the three months. Turbocline depth is defined here as the depth + before the depth at which eddy diffusivity reaches a value of 0.001 m^2/s + + Parameters: + eddy: 2-dimensional numpy array containing daily averaged eddy diffusivity + of the same time frame as 'time', and over depth + time: datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + depth: depth array from grid_T + Returns: + jan_turbo: average turbocline depth of the first month (single value) + feb_turbo: average turbocline depth of the second month (single value) + mar_turbo: average turbocline depth of the third month (single value) + """ + turbo = list() + for day in eddy: + dfed = pd.DataFrame({"depth": depth[:-1], "eddy": day[1:]}) + dfed = dfed.iloc[1:] # dropping surface values + dfed[:21] # keep top 21 (25m depth) for i, row in dfed.iterrows(): try: - if row['eddy']<0.001: - turbo.append(dfed.at[i,'depth']) + if row["eddy"] < 0.001: + turbo.append(dfed.at[i, "depth"]) break except IndexError: turbo.append(np.nan) - print('turbocline depth not found') - dfturbo=pd.DataFrame({'time':time, 'turbo':turbo}) - monthlyturbo=pd.DataFrame(dfturbo.resample('M', on='time').turbo.mean()) + print("turbocline depth not found") + dfturbo = pd.DataFrame({"time": time, "turbo": turbo}) + monthlyturbo = pd.DataFrame(dfturbo.resample("M", on="time").turbo.mean()) monthlyturbo.reset_index(inplace=True) - jan_turbo=monthlyturbo.iloc[0]['turbo'] - feb_turbo=monthlyturbo.iloc[1]['turbo'] - mar_turbo=monthlyturbo.iloc[2]['turbo'] + jan_turbo = monthlyturbo.iloc[0]["turbo"] + feb_turbo = monthlyturbo.iloc[1]["turbo"] + mar_turbo = monthlyturbo.iloc[2]["turbo"] return jan_turbo, feb_turbo, mar_turbo -def density_diff(sal,temp,time): - - '''Given a datetime array of 3 months, a 2D array of salinity over time and depth, - a 2D array of temperature over time and depth, returns the difference in density - from the surface to a series of depths averaged over each month for 3 months - - Parameters: - sal: 2-dimensional numpy array containing daily averaged salinity - of the same time frame as 'time', and over depth - temp: 2-dimensional numpy array containing daily averaged temperature - of the same time frame as 'time', and over depth - time: datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - - Returns: - density_diffs: a dictionary containing a description as a string and the - density difference from the surface to some depth (the depth - range is 5m to 30m, in increments of 5m) - Eg. 'Jan 5m': somevalue - describes that the numerical value on the right (somevalue) - is the density difference from the surface to 5m depth, averaged - over the first month - ''' - p=0 - depthrange={5:5,10:10,15:15,19:20,20:25,21:30} - density_diffs=dict() - for ind,depth in depthrange.items(): - dsal=pd.DataFrame(sal) - dtemp=pd.DataFrame(temp) - - surfacedens=gsw.rho(dsal.iloc[:,0],dtemp.iloc[:,0],p) # get the surface density - idens=gsw.rho(dsal.iloc[:,ind],dtemp.iloc[:,ind],p) # get the density at that depth - densdiff=idens-surfacedens # get the daily density difference - - df=pd.DataFrame({'time':time, 'densdiff':densdiff}) - monthlydiff=pd.DataFrame(df.resample('M', on='time').densdiff.mean()) # average over months + +def density_diff(sal, temp, time): + """Given a datetime array of 3 months, a 2D array of salinity over time and depth, + a 2D array of temperature over time and depth, returns the difference in density + from the surface to a series of depths averaged over each month for 3 months + + Parameters: + sal: 2-dimensional numpy array containing daily averaged salinity + of the same time frame as 'time', and over depth + temp: 2-dimensional numpy array containing daily averaged temperature + of the same time frame as 'time', and over depth + time: datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + + Returns: + density_diffs: a dictionary containing a description as a string and the + density difference from the surface to some depth (the depth + range is 5m to 30m, in increments of 5m) + Eg. 'Jan 5m': somevalue + describes that the numerical value on the right (somevalue) + is the density difference from the surface to 5m depth, averaged + over the first month + """ + p = 0 + depthrange = {5: 5, 10: 10, 15: 15, 19: 20, 20: 25, 21: 30} + density_diffs = dict() + for ind, depth in depthrange.items(): + dsal = pd.DataFrame(sal) + dtemp = pd.DataFrame(temp) + + surfacedens = gsw.rho( + dsal.iloc[:, 0], dtemp.iloc[:, 0], p + ) # get the surface density + idens = gsw.rho( + dsal.iloc[:, ind], dtemp.iloc[:, ind], p + ) # get the density at that depth + densdiff = idens - surfacedens # get the daily density difference + + df = pd.DataFrame({"time": time, "densdiff": densdiff}) + monthlydiff = pd.DataFrame( + df.resample("M", on="time").densdiff.mean() + ) # average over months monthlydiff.reset_index(inplace=True) - density_diffs[f'Jan {depth}m']=monthlydiff.iloc[0]['densdiff'] - density_diffs[f'Feb {depth}m']=monthlydiff.iloc[1]['densdiff'] - density_diffs[f'Mar {depth}m']=monthlydiff.iloc[2]['densdiff'] + density_diffs[f"Jan {depth}m"] = monthlydiff.iloc[0]["densdiff"] + density_diffs[f"Feb {depth}m"] = monthlydiff.iloc[1]["densdiff"] + density_diffs[f"Mar {depth}m"] = monthlydiff.iloc[2]["densdiff"] return density_diffs -def avg_eddy(eddy,time,ij,ii): - - '''Given a 2D array of eddy diffusivity over time and depth, a datetime array of 3 months, - the x and y coordinates of the location of interest, returns the average eddy diffusivity - over the upper 15 and 30, each averaged over every month - - Parameters: - eddy: 2-dimensional numpy array containing daily averaged eddy diffusivity - of the same time frame as 'time', and over depth - time: datetime array of each day starting from the 1st day - of the first month, ending on the last day of the third month - ij: y-coordinate for location - ii: x-coordinate for location - Returns: - jan_eddyk1: average eddy diffusivity over upper 15m, averaged over the first month - feb_eddyk1: average eddy diffusivity over upper 15m, averaged over the second month - mar_eddyk1: average eddy diffusivity over upper 15m, averaged over the third month - jan_eddyk2: average eddy diffusivity over upper 30m, averaged over the first month - feb_eddyk2: average eddy diffusivity over upper 30m, averaged over the second month - mar_eddyk2: average eddy diffusivity over upper 30m, averaged over the third month - ''' - - k1=15 # 15m depth is index 15 (actual value is 15.096255) - k2=22 # 30m depth is index 22 (actual value is 31.101034) - with xr.open_dataset('/data/vdo/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc') as mesh: - tmask=np.array(mesh.tmask[0,:,ij,ii]) - e3t_0=np.array(mesh.e3t_0[0,:,ij,ii]) - e3t_k1=np.array(mesh.e3t_0[:,k1,ij,ii]) - e3t_k2=np.array(mesh.e3t_0[:,k1,ij,ii]) +def avg_eddy(eddy, time, ij, ii): + """Given a 2D array of eddy diffusivity over time and depth, a datetime array of 3 months, + the x and y coordinates of the location of interest, returns the average eddy diffusivity + over the upper 15 and 30, each averaged over every month + + Parameters: + eddy: 2-dimensional numpy array containing daily averaged eddy diffusivity + of the same time frame as 'time', and over depth + time: datetime array of each day starting from the 1st day + of the first month, ending on the last day of the third month + ij: y-coordinate for location + ii: x-coordinate for location + Returns: + jan_eddyk1: average eddy diffusivity over upper 15m, averaged over the first month + feb_eddyk1: average eddy diffusivity over upper 15m, averaged over the second month + mar_eddyk1: average eddy diffusivity over upper 15m, averaged over the third month + jan_eddyk2: average eddy diffusivity over upper 30m, averaged over the first month + feb_eddyk2: average eddy diffusivity over upper 30m, averaged over the second month + mar_eddyk2: average eddy diffusivity over upper 30m, averaged over the third month + """ + + k1 = 15 # 15m depth is index 15 (actual value is 15.096255) + k2 = 22 # 30m depth is index 22 (actual value is 31.101034) + with xr.open_dataset( + "/data/vdo/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc" + ) as mesh: + tmask = np.array(mesh.tmask[0, :, ij, ii]) + e3t_0 = np.array(mesh.e3t_0[0, :, ij, ii]) + e3t_k1 = np.array(mesh.e3t_0[:, k1, ij, ii]) + e3t_k2 = np.array(mesh.e3t_0[:, k1, ij, ii]) # vertical sum of microzo in mmol/m3 * vertical grid thickness in m: - inteddy=list() - avgeddyk1=list() - avgeddyk2=list() + inteddy = list() + avgeddyk1 = list() + avgeddyk2 = list() for dailyeddy in eddy: - eddy_tgrid=(dailyeddy[1:]+dailyeddy[:-1]) - eddy_e3t=eddy_tgrid*e3t_0[:-1] - avgeddyk1.append(np.sum(eddy_e3t[:k1]*tmask[:k1])/np.sum(e3t_0[:k1])) - avgeddyk2.append(np.sum(eddy_e3t[:k2]*tmask[:k2])/np.sum(e3t_0[:k2])) - - df=pd.DataFrame({'time':time, 'eddyk1':avgeddyk1,'eddyk2':avgeddyk2}) - monthlyeddyk1=pd.DataFrame(df.resample('M', on='time').eddyk1.mean()) - monthlyeddyk2=pd.DataFrame(df.resample('M', on='time').eddyk2.mean()) + eddy_tgrid = dailyeddy[1:] + dailyeddy[:-1] + eddy_e3t = eddy_tgrid * e3t_0[:-1] + avgeddyk1.append(np.sum(eddy_e3t[:k1] * tmask[:k1]) / np.sum(e3t_0[:k1])) + avgeddyk2.append(np.sum(eddy_e3t[:k2] * tmask[:k2]) / np.sum(e3t_0[:k2])) + + df = pd.DataFrame({"time": time, "eddyk1": avgeddyk1, "eddyk2": avgeddyk2}) + monthlyeddyk1 = pd.DataFrame(df.resample("M", on="time").eddyk1.mean()) + monthlyeddyk2 = pd.DataFrame(df.resample("M", on="time").eddyk2.mean()) monthlyeddyk1.reset_index(inplace=True) monthlyeddyk2.reset_index(inplace=True) - jan_eddyk1=monthlyeddyk1.iloc[0]['eddyk1'] - feb_eddyk1=monthlyeddyk1.iloc[1]['eddyk1'] - mar_eddyk1=monthlyeddyk1.iloc[2]['eddyk1'] - jan_eddyk2=monthlyeddyk2.iloc[0]['eddyk2'] - feb_eddyk2=monthlyeddyk2.iloc[1]['eddyk2'] - mar_eddyk2=monthlyeddyk2.iloc[2]['eddyk2'] - return jan_eddyk1, feb_eddyk1, mar_eddyk1,jan_eddyk2,feb_eddyk2,mar_eddyk2 + jan_eddyk1 = monthlyeddyk1.iloc[0]["eddyk1"] + feb_eddyk1 = monthlyeddyk1.iloc[1]["eddyk1"] + mar_eddyk1 = monthlyeddyk1.iloc[2]["eddyk1"] + jan_eddyk2 = monthlyeddyk2.iloc[0]["eddyk2"] + feb_eddyk2 = monthlyeddyk2.iloc[1]["eddyk2"] + mar_eddyk2 = monthlyeddyk2.iloc[2]["eddyk2"] + return jan_eddyk1, feb_eddyk1, mar_eddyk1, jan_eddyk2, feb_eddyk2, mar_eddyk2 diff --git a/SalishSeaTools/salishsea_tools/data_tools.py b/SalishSeaTools/salishsea_tools/data_tools.py index 32e50c5f..56e87153 100644 --- a/SalishSeaTools/salishsea_tools/data_tools.py +++ b/SalishSeaTools/salishsea_tools/data_tools.py @@ -379,9 +379,7 @@ def onc_json_to_dataset(onc_json, teos=True): "actualSamples": sensor["actualSamples"], }, ) - dataset_attrs = { - "station": onc_json["parameters"]["locationCode"] - } + dataset_attrs = {"station": onc_json["parameters"]["locationCode"]} return xarray.Dataset(data_vars, attrs=dataset_attrs) @@ -487,11 +485,13 @@ def get_chs_tides( # IWLS API limits requests to 7 day long periods water_levels, datetimes = [], [] - for (span_start, span_end) in arrow.Arrow.span_range("week", begin, end, exact=True): - query_params.update({ - "from": f"{span_start.format('YYYY-MM-DDTHH:mm:ss')}Z", - "to": f"{span_end.format('YYYY-MM-DDTHH:mm:ss')}Z", - }) + for span_start, span_end in arrow.Arrow.span_range("week", begin, end, exact=True): + query_params.update( + { + "from": f"{span_start.format('YYYY-MM-DDTHH:mm:ss')}Z", + "to": f"{span_end.format('YYYY-MM-DDTHH:mm:ss')}Z", + } + ) response = _do_chs_iwls_api_request(endpoint, query_params, retry_args) water_levels.extend(event["value"] for event in response.json()) datetimes.extend(event["eventDate"] for event in response.json()) @@ -499,7 +499,8 @@ def get_chs_tides( if not water_levels: logging.info( f"no {data_type} water level data available from {stn_id} during " - f"{begin.format('YYYY-MM-DD HH:mm:ss')}Z to {end.format('YYYY-MM-DD HH:mm:ss')}Z") + f"{begin.format('YYYY-MM-DD HH:mm:ss')}Z to {end.format('YYYY-MM-DD HH:mm:ss')}Z" + ) return time_series = pd.Series( data=water_levels, @@ -508,7 +509,7 @@ def get_chs_tides( f"{stn_number} water levels" if int(stn_number) == stn else f"{stn_number} {stn} water levels" - ) + ), ) return time_series @@ -556,9 +557,7 @@ def get_chs_tide_stn_id( endpoint = f"{api_server}/api/{api_version}/stations" stn_code = resolve_chs_tide_stn(stn) if stn_code is None: - logging.warning( - f"can't resolve a valid CHS station code for {stn}" - ) + logging.warning(f"can't resolve a valid CHS station code for {stn}") return query_params = {"code": stn_code} response = _do_chs_iwls_api_request(endpoint, query_params, retry_args) @@ -585,6 +584,7 @@ def _do_chs_iwls_api_request(endpoint, query_params, retry_args): :return: API response :rtype: :py:class:`requests.Response` """ + @retry(**retry_args) def do_api_request(endpoint, quer_params): return requests.get(endpoint, query_params) @@ -614,7 +614,9 @@ def resolve_chs_tide_stn(stn): ) return except TypeError: - logging.warning(f"invalid station number for {stn} station: {PLACES[stn]['stn number']}") + logging.warning( + f"invalid station number for {stn} station: {PLACES[stn]['stn number']}" + ) return diff --git a/SalishSeaTools/salishsea_tools/diagnosis_tools.py b/SalishSeaTools/salishsea_tools/diagnosis_tools.py index ace9dfb3..9bda978d 100644 --- a/SalishSeaTools/salishsea_tools/diagnosis_tools.py +++ b/SalishSeaTools/salishsea_tools/diagnosis_tools.py @@ -22,11 +22,10 @@ from salishsea_tools import nc_tools -__all__ = [ - 'pcourantu', 'pcourantv','pcourantw' -] +__all__ = ["pcourantu", "pcourantv", "pcourantw"] -def pcourantu(files,meshmask): + +def pcourantu(files, meshmask): """Given a list of U filenames and a mesh mask, returns an array with the unscaled Courant numbers. :arg files: list of U filenames @@ -38,22 +37,23 @@ def pcourantu(files,meshmask): :rtype: :py:class: `numpy.ma.core.MaskedArray` """ - delta_x = meshmask['e1u'][:] - with nc_tools.scDataset(files) as f: #merging files - nt,nz,ny,nx = f.variables['vozocrtx'].shape - umax = np.zeros((nz,ny,nx)) + delta_x = meshmask["e1u"][:] + with nc_tools.scDataset(files) as f: # merging files + nt, nz, ny, nx = f.variables["vozocrtx"].shape + umax = np.zeros((nz, ny, nx)) for n in range(nt): - utmp = np.abs(f.variables['vozocrtx'][n,:,:,:]) - umax = np.maximum(utmp,umax) #taking maximum over time - ubdxmax = np.zeros((ny,nx)) + utmp = np.abs(f.variables["vozocrtx"][n, :, :, :]) + umax = np.maximum(utmp, umax) # taking maximum over time + ubdxmax = np.zeros((ny, nx)) for m in range(nz): - ubdxtmp = umax[m,...] / delta_x[0,...] - ubdxmax = np.maximum(ubdxtmp,ubdxmax) #taking maximum over depth + ubdxtmp = umax[m, ...] / delta_x[0, ...] + ubdxmax = np.maximum(ubdxtmp, ubdxmax) # taking maximum over depth + + umask = meshmask["umask"][0, 0, ...] + return ma.masked_array(ubdxmax, mask=1 - umask) - umask = meshmask['umask'][0,0,...] - return ma.masked_array(ubdxmax, mask = 1-umask) -def pcourantv(files,meshmask): +def pcourantv(files, meshmask): """Given a list of V filenames and a mesh mask, returns an array with the unscaled Courant numbers. :arg files: list of V filenames @@ -65,22 +65,23 @@ def pcourantv(files,meshmask): :rtype: :py:class: `numpy.ma.core.MaskedArray` """ - delta_y = meshmask['e2v'][:] - with nc_tools.scDataset(files) as f: #merging files - nt,nz,ny,nx = f.variables['vomecrty'].shape - vmax = np.zeros((nz,ny,nx)) + delta_y = meshmask["e2v"][:] + with nc_tools.scDataset(files) as f: # merging files + nt, nz, ny, nx = f.variables["vomecrty"].shape + vmax = np.zeros((nz, ny, nx)) for n in range(nt): - vtmp = np.abs(f.variables['vomecrty'][n,:,:,:]) - vmax = np.maximum(vtmp,vmax) #taking maximum over time - vbdymax = np.zeros((ny,nx)) + vtmp = np.abs(f.variables["vomecrty"][n, :, :, :]) + vmax = np.maximum(vtmp, vmax) # taking maximum over time + vbdymax = np.zeros((ny, nx)) for m in range(nz): - vbdytmp = vmax[m,...] / delta_y[0,...] - vbdymax = np.maximum(vbdytmp,vbdymax) #taking maximum over depth + vbdytmp = vmax[m, ...] / delta_y[0, ...] + vbdymax = np.maximum(vbdytmp, vbdymax) # taking maximum over depth + + vmask = meshmask["vmask"][0, 0, ...] + return ma.masked_array(vbdymax, mask=1 - vmask) - vmask = meshmask['vmask'][0,0,...] - return ma.masked_array(vbdymax, mask = 1-vmask) -def pcourantw(files,meshmask): +def pcourantw(files, meshmask): """Given a list of W filenames and a mesh mask, returns an array with the unscaled Courant numbers. :arg files: list of W filenames @@ -92,21 +93,21 @@ def pcourantw(files,meshmask): :rtype: :py:class: `numpy.ma.core.MaskedArray` """ - with nc_tools.scDataset(files) as f: #merging files - nt,nz,ny,nx = f.variables['vovecrtz'].shape - delta_z = meshmask['e3w_1d'][0,...] - delta_z = delta_z[:,np.newaxis,np.newaxis] + with nc_tools.scDataset(files) as f: # merging files + nt, nz, ny, nx = f.variables["vovecrtz"].shape + delta_z = meshmask["e3w_1d"][0, ...] + delta_z = delta_z[:, np.newaxis, np.newaxis] - wmax = np.zeros((nz,ny,nx)) + wmax = np.zeros((nz, ny, nx)) for n in range(nt): - wtmp = np.abs(f.variables['vovecrtz'][n,:,:,:]) - wmax = np.maximum(wtmp,wmax) #taking maximum over time + wtmp = np.abs(f.variables["vovecrtz"][n, :, :, :]) + wmax = np.maximum(wtmp, wmax) # taking maximum over time wbdz = wmax / delta_z - wbdzmax = np.zeros((ny,nx)) + wbdzmax = np.zeros((ny, nx)) for m in range(nz): - wbdztmp = wbdz[m,...] - wbdzmax = np.maximum(wbdztmp,wbdzmax) #taking maximum over depth + wbdztmp = wbdz[m, ...] + wbdzmax = np.maximum(wbdztmp, wbdzmax) # taking maximum over depth - tmask = meshmask['tmask'][0,0,...] - return ma.masked_array(wbdzmax, mask = 1-tmask) + tmask = meshmask["tmask"][0, 0, ...] + return ma.masked_array(wbdzmax, mask=1 - tmask) diff --git a/SalishSeaTools/salishsea_tools/ellipse.py b/SalishSeaTools/salishsea_tools/ellipse.py index 212591cf..8cd1f804 100644 --- a/SalishSeaTools/salishsea_tools/ellipse.py +++ b/SalishSeaTools/salishsea_tools/ellipse.py @@ -41,37 +41,38 @@ def ellipse_params(uamp, upha, vamp, vpha): :returns: CX, SX, CY, SY, ap, am, ep, em, major, minor, theta, phase The positively and negatively rotating amplitude and phase. As well as the major and minor axis and the axis tilt. - """ - - CX = uamp*np.cos(np.pi*upha/180.) - SX = uamp*np.sin(np.pi*upha/180.) - CY = vamp*np.cos(np.pi*vpha/180.) - SY = vamp*np.sin(np.pi*vpha/180.) - ap = np.sqrt((CX+SY)**2+(CY-SX)**2)/2. - am = np.sqrt((CX-SY)**2+(CY+SX)**2)/2. - ep = np.arctan2(CY-SX, CX+SY) - em = np.arctan2(CY+SX, CX-SY) - major = ap+am - minor = ap-am - theta = (ep+em)/2.*180./np.pi - phase = (em-ep)/2.*180./np.pi + """ + + CX = uamp * np.cos(np.pi * upha / 180.0) + SX = uamp * np.sin(np.pi * upha / 180.0) + CY = vamp * np.cos(np.pi * vpha / 180.0) + SY = vamp * np.sin(np.pi * vpha / 180.0) + ap = np.sqrt((CX + SY) ** 2 + (CY - SX) ** 2) / 2.0 + am = np.sqrt((CX - SY) ** 2 + (CY + SX) ** 2) / 2.0 + ep = np.arctan2(CY - SX, CX + SY) + em = np.arctan2(CY + SX, CX - SY) + major = ap + am + minor = ap - am + theta = (ep + em) / 2.0 * 180.0 / np.pi + phase = (em - ep) / 2.0 * 180.0 / np.pi # Make angles be between [0,360] - phase = (phase+360) % 360 - theta = (theta+360) % 360 + phase = (phase + 360) % 360 + theta = (theta + 360) % 360 ind = np.divide(theta, 180) k = np.floor(ind) - theta = theta - k*180 - phase = phase + k*180 - phase = (phase+360) % 360 + theta = theta - k * 180 + phase = phase + k * 180 + phase = (phase + 360) % 360 return CX, SX, CY, SY, ap, am, ep, em, major, minor, theta, phase -def ellipse_files_nowcast(to, tf, iss, jss, path, depthrange='None', - period='1h', station='None'): - """ This function loads all the data between the start and the end date +def ellipse_files_nowcast( + to, tf, iss, jss, path, depthrange="None", period="1h", station="None" +): + """This function loads all the data between the start and the end date that contains in the netCDF4 nowcast files in the specified depth range. This will make an area with all the indices indicated, the area must be continuous for unstaggering. @@ -108,21 +109,21 @@ def ellipse_files_nowcast(to, tf, iss, jss, path, depthrange='None', # The unstaggering in prepare_vel.py requiers an extra i and j, we add one # on here to maintain the area, or point chosen. - jss = np.append(jss[0]-1, jss) - iss = np.append(iss[0]-1, iss) + jss = np.append(jss[0] - 1, jss) + iss = np.append(iss[0] - 1, iss) # Makes a list of the filenames that follow the criteria in the indicated # path between the start and end dates. - if period == '15m': + if period == "15m": files = analyze.get_filenames_15(to, tf, station, path) filesu = files filesv = files else: - filesu = analyze.get_filenames(to, tf, period, 'grid_U', path) - filesv = analyze.get_filenames(to, tf, period, 'grid_V', path) + filesu = analyze.get_filenames(to, tf, period, "grid_U", path) + filesv = analyze.get_filenames(to, tf, period, "grid_V", path) # Set up depth array and depth range - depth = nc.Dataset(filesu[-1]).variables['depthu'][:] + depth = nc.Dataset(filesu[-1]).variables["depthu"][:] # Case one: for a single depth. if type(depthrange) == float or type(depthrange) == int: @@ -130,9 +131,7 @@ def ellipse_files_nowcast(to, tf, iss, jss, path, depthrange='None', dep = depth[k] # Case two: for a specific range of depths elif type(depthrange) == list: - k = np.where(np.logical_and( - depth > depthrange[0], - depth < depthrange[1]))[0] + k = np.where(np.logical_and(depth > depthrange[0], depth < depthrange[1]))[0] dep = depth[k] # Case three: For the whole depth range 0 to 441m. else: @@ -140,19 +139,19 @@ def ellipse_files_nowcast(to, tf, iss, jss, path, depthrange='None', dep = depth # Load the files - u, time = analyze.combine_files(filesu, 'vozocrtx', k, jss, iss) - v, time = analyze.combine_files(filesv, 'vomecrty', k, jss, iss) + u, time = analyze.combine_files(filesu, "vozocrtx", k, jss, iss) + v, time = analyze.combine_files(filesv, "vomecrty", k, jss, iss) # For the nowcast the reftime is always Sep10th 2014. Set time of area we # are looking at relative to this time. - reftime = tidetools.CorrTides['reftime'] + reftime = tidetools.CorrTides["reftime"] time = tidetools.convert_to_hours(time, reftime=reftime) return u, v, time, dep -def prepare_vel(u, v, depav=False, depth='None'): - """ Preparing the time series of the orthogonal pair of velocities to get tidal +def prepare_vel(u, v, depav=False, depth="None"): + """Preparing the time series of the orthogonal pair of velocities to get tidal ellipse parameters. This function masks, rotates and unstaggers the time series. The depth averaging does not work over masked values. @@ -223,42 +222,45 @@ def get_params(u, v, time, nconst, tidecorr=tidetools.CorrTides): # Cycling through the constituents in the ap-parameter dict given by fittit for const in uapparam: # Applying tidal corrections to u and v phase parameter - uapparam[const]['phase'] = ( - uapparam[const]['phase'] + tidecorr[const]['uvt']) - vapparam[const]['phase'] = ( - vapparam[const]['phase'] + tidecorr[const]['uvt']) + uapparam[const]["phase"] = uapparam[const]["phase"] + tidecorr[const]["uvt"] + vapparam[const]["phase"] = vapparam[const]["phase"] + tidecorr[const]["uvt"] # Applying tidal corrections to u and v amplitude parameter - uapparam[const]['amp'] = uapparam[const]['amp'] / tidecorr[const]['ft'] - vapparam[const]['amp'] = vapparam[const]['amp'] / tidecorr[const]['ft'] + uapparam[const]["amp"] = uapparam[const]["amp"] / tidecorr[const]["ft"] + vapparam[const]["amp"] = vapparam[const]["amp"] / tidecorr[const]["ft"] # Converting from u/v amplitude and phase to ellipe parameters. Inputs # are the amplitude and phase of both velocities, runs once for each # contituent CX, SX, CY, SY, ap, am, ep, em, maj, mi, the, pha = ellipse_params( - uapparam[const]['amp'], - uapparam[const]['phase'], - vapparam[const]['amp'], - vapparam[const]['phase']) + uapparam[const]["amp"], + uapparam[const]["phase"], + vapparam[const]["amp"], + vapparam[const]["phase"], + ) # Filling the dictionary with ep-parameters given by ellipse_param. # Each constituent will be a different key. params[const] = { - 'Semi-Major Axis': maj, - 'Semi-Minor Axis': mi, - 'Inclination': the, - 'Phase': pha - } + "Semi-Major Axis": maj, + "Semi-Minor Axis": mi, + "Inclination": the, + "Phase": pha, + } return params def get_params_nowcast_15( - to, tf, - station, - path, nconst, - depthrange='None', - depav=False, tidecorr=tidetools.CorrTides): + to, + tf, + station, + path, + nconst, + depthrange="None", + depav=False, + tidecorr=tidetools.CorrTides, +): """This function loads all the data between the start and the end date that contains quater houlry velocities in the netCDF4 nowcast files in the depth range. Then masks, rotates and unstaggers the time series. The @@ -299,10 +301,8 @@ def get_params_nowcast_15( """ u, v, time, dep = ellipse_files_nowcast( - to, tf, - [1], [1], - path, - depthrange=depthrange, period='15m', station=station) + to, tf, [1], [1], path, depthrange=depthrange, period="15m", station=station + ) u_u, v_v = prepare_vel(u, v, depav=depav, depth=dep) params = get_params(u_u, v_v, time, nconst, tidecorr=tidecorr) @@ -310,11 +310,16 @@ def get_params_nowcast_15( def get_params_nowcast( - to, tf, - i, j, - path, nconst, - depthrange='None', - depav=False, tidecorr=tidetools.CorrTides): + to, + tf, + i, + j, + path, + nconst, + depthrange="None", + depav=False, + tidecorr=tidetools.CorrTides, +): """This function loads all the data between the start and the end date that contains hourly velocities in the netCDF4 nowcast files in the specified depth range. Then masks, rotates and unstaggers the time series. The @@ -359,11 +364,7 @@ def get_params_nowcast( dep is the depths of the ellipse paramters """ - u, v, time, dep = ellipse_files_nowcast( - to, tf, - i, j, - path, - depthrange=depthrange) + u, v, time, dep = ellipse_files_nowcast(to, tf, i, j, path, depthrange=depthrange) u_u, v_v = prepare_vel(u, v, depav=depav, depth=dep) params = get_params(u_u, v_v, time, nconst, tidecorr=tidecorr) diff --git a/SalishSeaTools/salishsea_tools/evaltools.py b/SalishSeaTools/salishsea_tools/evaltools.py index c0ba077b..c7fd953d 100644 --- a/SalishSeaTools/salishsea_tools/evaltools.py +++ b/SalishSeaTools/salishsea_tools/evaltools.py @@ -39,14 +39,19 @@ # Check which Excel reader engine is available, if any, and set variable excelEngine try: import openpyxl - excelEngine='openpyxl' + + excelEngine = "openpyxl" except ImportError as iE: try: import xlrd - excelEngine='xlrd' + + excelEngine = "xlrd" except ImportError as iE: - excelEngine=None - warnings.warn("Neither Python Excel module ('openpyxl','xlrd') found",UserWarning) + excelEngine = None + warnings.warn( + "Neither Python Excel module ('openpyxl','xlrd') found", UserWarning + ) + # :arg dict varmap: dictionary mapping names of data columns to variable names, string to string, model:data def matchData( @@ -55,21 +60,21 @@ def matchData( fdict, mod_start=None, mod_end=None, - mod_nam_fmt='nowcast', - mod_basedir='/results/SalishSea/nowcast-green/', + mod_nam_fmt="nowcast", + mod_basedir="/results/SalishSea/nowcast-green/", mod_flen=1, - method='bin', + method="bin", meshPath=None, - maskName='tmask', + maskName="tmask", wrapSearch=False, fastSearch=False, wrapTol=1, - e3tvar='e3t', + e3tvar="e3t", fid=None, sdim=3, quiet=False, - preIndexed=False - ): + preIndexed=False, +): """Given a discrete sample dataset, find match model output note: only one grid mask is loaded so all model variables must be on same grid; defaults to tmask; @@ -137,596 +142,837 @@ def matchData( """ # define dictionaries of mesh lat and lon variables to use with different grids: - lonvar={'tmask':'nav_lon','umask':'glamu','vmask':'glamv','fmask':'glamf'} - latvar={'tmask':'nav_lat','umask':'gphiu','vmask':'gphiv','fmask':'gphif'} + lonvar = {"tmask": "nav_lon", "umask": "glamu", "vmask": "glamv", "fmask": "glamf"} + latvar = {"tmask": "nav_lat", "umask": "gphiu", "vmask": "gphiv", "fmask": "gphif"} # check that required columns are in dataframe: - if method == 'ferry' or sdim==2: - reqsubset=['dtUTC','Lat','Lon'] + if method == "ferry" or sdim == 2: + reqsubset = ["dtUTC", "Lat", "Lon"] if preIndexed: - reqsubset=['dtUTC','i','j'] - elif method == 'vertNet': - reqsubset=['dtUTC','Lat','Lon','Z_upper','Z_lower'] + reqsubset = ["dtUTC", "i", "j"] + elif method == "vertNet": + reqsubset = ["dtUTC", "Lat", "Lon", "Z_upper", "Z_lower"] if preIndexed: - reqsubset=['dtUTC','i','j','Z_upper','Z_lower'] + reqsubset = ["dtUTC", "i", "j", "Z_upper", "Z_lower"] else: - reqsubset=['dtUTC','Lat','Lon','Z'] + reqsubset = ["dtUTC", "Lat", "Lon", "Z"] if preIndexed: - reqsubset=['dtUTC','i','j','k'] + reqsubset = ["dtUTC", "i", "j", "k"] if not set(reqsubset) <= set(data.keys()): - raise Exception('{} missing from data'.format([el for el in set(reqsubset)-set(data.keys())],'%s')) + raise Exception( + "{} missing from data".format( + [el for el in set(reqsubset) - set(data.keys())], "%s" + ) + ) - fkeysVar=list(filemap.keys()) # list of model variables to return + fkeysVar = list(filemap.keys()) # list of model variables to return # don't load more files than necessary: - ftypes=list(fdict.keys()) + ftypes = list(fdict.keys()) for ikey in ftypes: if ikey not in set(filemap.values()): fdict.pop(ikey) - if len(set(filemap.values())-set(fdict.keys()))>0: - print('Error: file(s) missing from fdict:',set(filemap.values())-set(fdict.keys())) - ftypes=list(fdict.keys()) # list of filetypes to containing the desired model variables + if len(set(filemap.values()) - set(fdict.keys())) > 0: + print( + "Error: file(s) missing from fdict:", + set(filemap.values()) - set(fdict.keys()), + ) + ftypes = list( + fdict.keys() + ) # list of filetypes to containing the desired model variables # create inverted version of filemap dict mapping file types to the variables they contain - filemap_r=dict() + filemap_r = dict() for ift in ftypes: - filemap_r[ift]=list() + filemap_r[ift] = list() for ikey in filemap: filemap_r[filemap[ikey]].append(ikey) # if mod_start and mod_end not provided, use min and max of data datetimes if mod_start is None: - mod_start=np.min(data['dtUTC']) + mod_start = np.min(data["dtUTC"]) print(mod_start) if mod_end is None: - mod_end=np.max(data['dtUTC']) + mod_end = np.max(data["dtUTC"]) print(mod_end) # adjustments to data dataframe to avoid unnecessary calculations - data=data.loc[(data.dtUTC>=mod_start)&(data.dtUTC= mod_start) & (data.dtUTC < mod_end)].copy(deep=True) + data = data.dropna( + how="any", subset=reqsubset + ) # .dropna(how='all',subset=[*varmap.keys()]) - if maskName=='ops': + if maskName == "ops": # set default mesh file for ops data (atmos forcing) - if meshPath==None: - meshPath='/results/forcing/atmospheric/GEM2.5/operational/ops_y2015m01d01.nc' + if meshPath == None: + meshPath = ( + "/results/forcing/atmospheric/GEM2.5/operational/ops_y2015m01d01.nc" + ) # load lat, lon, and mask (all ones for ops - no land in sky) with nc.Dataset(meshPath) as fmesh: - navlon=np.squeeze(np.copy(fmesh.variables['nav_lon'][:,:]-360)) - navlat=np.squeeze(np.copy(fmesh.variables['nav_lat'][:,:])) - omask=np.expand_dims(np.ones(np.shape(navlon)),axis=(0,1)) - nemops='GEM2.5' + navlon = np.squeeze(np.copy(fmesh.variables["nav_lon"][:, :] - 360)) + navlat = np.squeeze(np.copy(fmesh.variables["nav_lat"][:, :])) + omask = np.expand_dims(np.ones(np.shape(navlon)), axis=(0, 1)) + nemops = "GEM2.5" else: # set default mesh file for SalishSeaCast data - if meshPath==None: - meshPath='/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc' + if meshPath == None: + meshPath = "/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc" # load lat lon and ocean mask with nc.Dataset(meshPath) as fmesh: - omask=np.copy(fmesh.variables[maskName]) - navlon=np.squeeze(np.copy(fmesh.variables[lonvar[maskName]][:,:])) - navlat=np.squeeze(np.copy(fmesh.variables[latvar[maskName]][:,:])) - if method == 'vertNet': - e3t0=np.squeeze(np.copy(fmesh.variables['e3t_0'][0,:,:,:])) - if maskName != 'tmask': - print('Warning: Using tmask thickness for variable on different grid') - nemops='NEMO' + omask = np.copy(fmesh.variables[maskName]) + navlon = np.squeeze(np.copy(fmesh.variables[lonvar[maskName]][:, :])) + navlat = np.squeeze(np.copy(fmesh.variables[latvar[maskName]][:, :])) + if method == "vertNet": + e3t0 = np.squeeze(np.copy(fmesh.variables["e3t_0"][0, :, :, :])) + if maskName != "tmask": + print( + "Warning: Using tmask thickness for variable on different grid" + ) + nemops = "NEMO" # handle horizontal gridding as necessary; make sure data is in order of ascending time if not preIndexed: # find location of each obs on model grid and add to data as additional columns 'i' and 'j' - data=_gridHoriz(data,omask,navlon,navlat,wrapSearch,wrapTol,fastSearch, quiet=quiet,nemops=nemops) - data=data.sort_values(by=[ix for ix in ['dtUTC','Z','j','i'] if ix in reqsubset]) # preserve list order + data = _gridHoriz( + data, + omask, + navlon, + navlat, + wrapSearch, + wrapTol, + fastSearch, + quiet=quiet, + nemops=nemops, + ) + data = data.sort_values( + by=[ix for ix in ["dtUTC", "Z", "j", "i"] if ix in reqsubset] + ) # preserve list order else: - data=data.sort_values(by=[ix for ix in ['dtUTC','k','j','i'] if ix in reqsubset]) # preserve list order - data.reset_index(drop=True,inplace=True) + data = data.sort_values( + by=[ix for ix in ["dtUTC", "k", "j", "i"] if ix in reqsubset] + ) # preserve list order + data.reset_index(drop=True, inplace=True) # set up columns to accept model values; prepend 'mod' to distinguish from obs names for ivar in filemap.keys(): - data['mod_'+ivar]=np.full(len(data),np.nan) + data["mod_" + ivar] = np.full(len(data), np.nan) # create dictionary of dataframes of filename, start time, and end time for each file type - flist=dict() + flist = dict() for ift in ftypes: - flist[ift]=index_model_files(mod_start,mod_end,mod_basedir,mod_nam_fmt,mod_flen,ift,fdict[ift]) + flist[ift] = index_model_files( + mod_start, mod_end, mod_basedir, mod_nam_fmt, mod_flen, ift, fdict[ift] + ) # call a function to carry out vertical matching based on specified method - if method == 'bin': - data = _binmatch(data,flist,ftypes,filemap_r,omask,maskName,sdim,preIndexed=preIndexed) - elif method == 'ferry': - print('data is matched to shallowest model level') - data = _ferrymatch(data,flist,ftypes,filemap_r,omask,fdict) - elif method == 'vvlZ': - data = _interpvvlZ(data,flist,ftypes,filemap,filemap_r,omask,fdict,e3tvar) - elif method == 'vvlBin': - data= _vvlBin(data,flist,ftypes,filemap,filemap_r,omask,fdict,e3tvar) - elif method == 'vertNet': - data = _vertNetmatch(data,flist,ftypes,filemap_r,omask,e3t0,maskName) + if method == "bin": + data = _binmatch( + data, flist, ftypes, filemap_r, omask, maskName, sdim, preIndexed=preIndexed + ) + elif method == "ferry": + print("data is matched to shallowest model level") + data = _ferrymatch(data, flist, ftypes, filemap_r, omask, fdict) + elif method == "vvlZ": + data = _interpvvlZ( + data, flist, ftypes, filemap, filemap_r, omask, fdict, e3tvar + ) + elif method == "vvlBin": + data = _vvlBin(data, flist, ftypes, filemap, filemap_r, omask, fdict, e3tvar) + elif method == "vertNet": + data = _vertNetmatch(data, flist, ftypes, filemap_r, omask, e3t0, maskName) else: - print('option '+method+' not written yet') + print("option " + method + " not written yet") return - data.reset_index(drop=True,inplace=True) + data.reset_index(drop=True, inplace=True) return data -def _gridHoriz(data,omask,navlon,navlat,wrapSearch,wrapTol,fastSearch=False, resetIndex=False,quiet=False,nemops='NEMO'): - """ this function finds the horizontal grid (i,j) indices for each model point and adds them - to the dataframe 'data' as additional columns - NOTE: points that are matched are dropped from the dataFrame; with quiet=False, the unmatched - lats and lons are printed + +def _gridHoriz( + data, + omask, + navlon, + navlat, + wrapSearch, + wrapTol, + fastSearch=False, + resetIndex=False, + quiet=False, + nemops="NEMO", +): + """this function finds the horizontal grid (i,j) indices for each model point and adds them + to the dataframe 'data' as additional columns + NOTE: points that are matched are dropped from the dataFrame; with quiet=False, the unmatched + lats and lons are printed """ - lmask=-1*(omask[0,0,:,:]-1) # NEMO masks have ocean = 1, but the functions called below require land = 1 + lmask = -1 * ( + omask[0, 0, :, :] - 1 + ) # NEMO masks have ocean = 1, but the functions called below require land = 1 if wrapSearch: # this speeds up the matching process for ferry data where there is a high likelihood each point # is close to the point before it - jj,ii = geo_tools.closestPointArray(data['Lon'].values,data['Lat'].values,navlon,navlat, - tol2=wrapTol,land_mask = lmask) - data['j']=[-1 if np.isnan(mm) else int(mm) for mm in jj] - data['i']=[-1 if np.isnan(mm) else int(mm) for mm in ii] + jj, ii = geo_tools.closestPointArray( + data["Lon"].values, + data["Lat"].values, + navlon, + navlat, + tol2=wrapTol, + land_mask=lmask, + ) + data["j"] = [-1 if np.isnan(mm) else int(mm) for mm in jj] + data["i"] = [-1 if np.isnan(mm) else int(mm) for mm in ii] elif fastSearch: - jjii = xr.open_dataset('~/MEOPAR/grid/grid_from_lat_lon_mask999.nc') - print (data['Lat']) - mylats = xr.DataArray(data['Lat']) - mylons = xr.DataArray(data['Lon']) - jj = jjii.jj.sel(lats=mylats, lons=mylons, method='nearest').values - ii = jjii.ii.sel(lats=mylats, lons=mylons, method='nearest').values - print (jj.shape, jj) - data['j'] = [-1 if mm==-999 else mm for mm in jj] - data['i'] = [-1 if mm==-999 else mm for mm in ii] + jjii = xr.open_dataset("~/MEOPAR/grid/grid_from_lat_lon_mask999.nc") + print(data["Lat"]) + mylats = xr.DataArray(data["Lat"]) + mylons = xr.DataArray(data["Lon"]) + jj = jjii.jj.sel(lats=mylats, lons=mylons, method="nearest").values + ii = jjii.ii.sel(lats=mylats, lons=mylons, method="nearest").values + print(jj.shape, jj) + data["j"] = [-1 if mm == -999 else mm for mm in jj] + data["i"] = [-1 if mm == -999 else mm for mm in ii] else: - data['j']=-1*np.ones((len(data))).astype(int) - data['i']=-1*np.ones((len(data))).astype(int) - for la,lo in np.unique(data.loc[:,['Lat','Lon']].values,axis=0): + data["j"] = -1 * np.ones((len(data))).astype(int) + data["i"] = -1 * np.ones((len(data))).astype(int) + for la, lo in np.unique(data.loc[:, ["Lat", "Lon"]].values, axis=0): try: - jj, ii = geo_tools.find_closest_model_point(lo, la, navlon, - navlat, grid=nemops,land_mask = lmask,checkTol=True) + jj, ii = geo_tools.find_closest_model_point( + lo, la, navlon, navlat, grid=nemops, land_mask=lmask, checkTol=True + ) except: - print('lo:',lo,'la:',la) + print("lo:", lo, "la:", la) raise - if isinstance(jj,int): - data.loc[(data.Lat==la)&(data.Lon==lo),['j','i']]=jj,ii + if isinstance(jj, int): + data.loc[(data.Lat == la) & (data.Lon == lo), ["j", "i"]] = jj, ii else: if not quiet: - print('(Lat,Lon)=',la,lo,' not matched to domain') - data.drop(data.loc[(data.i==-1)|(data.j==-1)].index, inplace=True) - if resetIndex==True: - data.reset_index(drop=True,inplace=True) + print("(Lat,Lon)=", la, lo, " not matched to domain") + data.drop(data.loc[(data.i == -1) | (data.j == -1)].index, inplace=True) + if resetIndex == True: + data.reset_index(drop=True, inplace=True) return data -def _vertNetmatch(data,flist,ftypes,filemap_r,gridmask,e3t0,maskName='tmask'): - """ basic vertical matching of model output to data - returns model value from model grid cell that would contain the observation point with - no interpolation; no consideration of the changing of grid thickenss with the tides (vvl) - strategy: loop through data, openening and closing model files as needed and storing model data + +def _vertNetmatch(data, flist, ftypes, filemap_r, gridmask, e3t0, maskName="tmask"): + """basic vertical matching of model output to data + returns model value from model grid cell that would contain the observation point with + no interpolation; no consideration of the changing of grid thickenss with the tides (vvl) + strategy: loop through data, openening and closing model files as needed and storing model data """ - if len(data)>5000: - pprint=True - lendat=len(data) + if len(data) > 5000: + pprint = True + lendat = len(data) else: - pprint= False + pprint = False # set up columns to hold indices for upper and lower end of range to average over - data['k_upper']=-1*np.ones((len(data))).astype(int) - data['k_lower']=-1*np.ones((len(data))).astype(int) + data["k_upper"] = -1 * np.ones((len(data))).astype(int) + data["k_lower"] = -1 * np.ones((len(data))).astype(int) for ind, row in data.iterrows(): - if (pprint==True and ind%5000==0): - print('progress: {}%'.format(ind/lendat*100)) - if ind==0: # special case for start of loop; load first files - fid=dict() - fend=dict() - torig=dict() + if pprint == True and ind % 5000 == 0: + print("progress: {}%".format(ind / lendat * 100)) + if ind == 0: # special case for start of loop; load first files + fid = dict() + fend = dict() + torig = dict() for ift in ftypes: - fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist) + fid, fend = _nextfile_bin( + ift, row["dtUTC"], flist[ift], fid, fend, flist + ) # handle NEMO files time reference - if 'time_centered' in fid[ftypes[0]].variables.keys(): - torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_centered'].time_origin,'%Y-%m-%d %H:%M:%S') + if "time_centered" in fid[ftypes[0]].variables.keys(): + torig[ift] = dt.datetime.strptime( + fid[ftypes[0]].variables["time_centered"].time_origin, + "%Y-%m-%d %H:%M:%S", + ) else: - torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%m-%d %H:%M:%S') + torig[ift] = dt.datetime.strptime( + fid[ftypes[0]].variables["time_counter"].time_origin, + "%Y-%m-%d %H:%M:%S", + ) # loop through each file type to extract data from the appropriate time and location for ift in ftypes: - if row['dtUTC']>=fend[ift]: - fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist) + if row["dtUTC"] >= fend[ift]: + fid, fend = _nextfile_bin( + ift, row["dtUTC"], flist[ift], fid, fend, flist + ) # now read data # find time index try: - if 'time_centered_bounds' in fid[ift].variables.keys(): # no problem! - ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift]) - else: # annoying! - hpf=(flist[ift]['t_n'][0]-flist[ift]['t_0'][0]).total_seconds()/3600 #hours per file - ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift],hpf=hpf) + if "time_centered_bounds" in fid[ift].variables.keys(): # no problem! + ih = _getTimeInd_bin(row["dtUTC"], fid[ift], torig[ift]) + else: # annoying! + hpf = ( + flist[ift]["t_n"][0] - flist[ift]["t_0"][0] + ).total_seconds() / 3600 # hours per file + ih = _getTimeInd_bin(row["dtUTC"], fid[ift], torig[ift], hpf=hpf) except: - print(row['dtUTC'], ift.torig[ift]) - tlist=fid[ift].variables['time_centered_bounds'][:,:] + print(row["dtUTC"], ift.torig[ift]) + tlist = fid[ift].variables["time_centered_bounds"][:, :] for el in tlist: print(el) - print((row['dtUTC']-torig[ift]).total_seconds()) - print(tlist[-1,1]) + print((row["dtUTC"] - torig[ift]).total_seconds()) + print(tlist[-1, 1]) raise # find depth indices (assume they may be reversed) - z_l=max(row['Z_upper'],row['Z_lower']) - z_u=min(row['Z_upper'],row['Z_lower']) - if len(set(fid[ift].variables.keys()).intersection(set(('deptht_bounds','depthu_bounds','depthv_bounds'))))>0: # no problem! - ik_l=_getZInd_bin(z_l,fid[ift],maskName=maskName) - ik_u=_getZInd_bin(z_u,fid[ift],maskName=maskName) - else: # workaround for missing variable - ik_l=_getZInd_bin(z_l,fid[ift],boundsFlag=True,maskName=maskName) - ik_u=_getZInd_bin(z_u,fid[ift],boundsFlag=True,maskName=maskName) + z_l = max(row["Z_upper"], row["Z_lower"]) + z_u = min(row["Z_upper"], row["Z_lower"]) + if ( + len( + set(fid[ift].variables.keys()).intersection( + set(("deptht_bounds", "depthu_bounds", "depthv_bounds")) + ) + ) + > 0 + ): # no problem! + ik_l = _getZInd_bin(z_l, fid[ift], maskName=maskName) + ik_u = _getZInd_bin(z_u, fid[ift], maskName=maskName) + else: # workaround for missing variable + ik_l = _getZInd_bin(z_l, fid[ift], boundsFlag=True, maskName=maskName) + ik_u = _getZInd_bin(z_u, fid[ift], boundsFlag=True, maskName=maskName) # assign values for each var assoc with ift - if (not np.isnan(ik_l)) and (not np.isnan(ik_u)) and \ - (gridmask[0,ik_u,row['j'],row['i']]==1): - data.loc[ind,['k_upper']]=int(ik_u) - data.loc[ind,['k_lower']]=int(ik_l) + if ( + (not np.isnan(ik_l)) + and (not np.isnan(ik_u)) + and (gridmask[0, ik_u, row["j"], row["i"]] == 1) + ): + data.loc[ind, ["k_upper"]] = int(ik_u) + data.loc[ind, ["k_lower"]] = int(ik_l) for ivar in filemap_r[ift]: - var=fid[ift].variables[ivar][ih,ik_u:(ik_l+1),row['j'],row['i']] - e3t=e3t0[ik_u:(ik_l+1),row['j'],row['i']] - imask=gridmask[0,ik_u:(ik_l+1),row['j'],row['i']] - meanvar=np.sum(var*e3t*imask)/np.sum(e3t*imask) - data.loc[ind,['mod_'+ivar]]=meanvar - if gridmask[0,ik_l,row['j'],row['i']]==0: - print(f"Warning: lower limit is not an ocean value:", - f" i={row['i']}, j={row['j']}, k_upper={ik_u}, k_lower={ik_l},", - f"k_seafloor={np.sum(imask)}", - f"Lon={row['Lon']}, Lat={row['Lat']}, dtUTC={row['dtUTC']}") + var = fid[ift].variables[ivar][ + ih, ik_u : (ik_l + 1), row["j"], row["i"] + ] + e3t = e3t0[ik_u : (ik_l + 1), row["j"], row["i"]] + imask = gridmask[0, ik_u : (ik_l + 1), row["j"], row["i"]] + meanvar = np.sum(var * e3t * imask) / np.sum(e3t * imask) + data.loc[ind, ["mod_" + ivar]] = meanvar + if gridmask[0, ik_l, row["j"], row["i"]] == 0: + print( + f"Warning: lower limit is not an ocean value:", + f" i={row['i']}, j={row['j']}, k_upper={ik_u}, k_lower={ik_l},", + f"k_seafloor={np.sum(imask)}", + f"Lon={row['Lon']}, Lat={row['Lat']}, dtUTC={row['dtUTC']}", + ) else: - print(f"Warning: upper limit is not an ocean value:", - f" i={row['i']}, j={row['j']}, k_upper={ik_u},Lat={row['Lat']},", - f"Lon={row['Lon']},dtUTC={row['dtUTC']}") + print( + f"Warning: upper limit is not an ocean value:", + f" i={row['i']}, j={row['j']}, k_upper={ik_u},Lat={row['Lat']},", + f"Lon={row['Lon']},dtUTC={row['dtUTC']}", + ) return data -def _binmatch(data,flist,ftypes,filemap_r,gridmask,maskName='tmask',sdim=3,preIndexed=False): - """ basic vertical matching of model output to data - returns model value from model grid cell that would contain the observation point with - no interpolation; no consideration of the changing of grid thickenss with the tides (vvl) - strategy: loop through data, openening and closing model files as needed and storing model data + +def _binmatch( + data, flist, ftypes, filemap_r, gridmask, maskName="tmask", sdim=3, preIndexed=False +): + """basic vertical matching of model output to data + returns model value from model grid cell that would contain the observation point with + no interpolation; no consideration of the changing of grid thickenss with the tides (vvl) + strategy: loop through data, openening and closing model files as needed and storing model data """ - if len(data)>5000: - pprint=True - lendat=len(data) + if len(data) > 5000: + pprint = True + lendat = len(data) else: - pprint= False + pprint = False if not preIndexed: - data['k']=-1*np.ones((len(data))).astype(int) + data["k"] = -1 * np.ones((len(data))).astype(int) for ind, row in data.iterrows(): - if (pprint==True and ind%5000==0): - print('progress: {}%'.format(ind/lendat*100)) - if ind==0: # special case for start of loop; load first files - fid=dict() - fend=dict() - torig=dict() + if pprint == True and ind % 5000 == 0: + print("progress: {}%".format(ind / lendat * 100)) + if ind == 0: # special case for start of loop; load first files + fid = dict() + fend = dict() + torig = dict() for ift in ftypes: - fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist) - if ift=='ops': # specially handle time origin for ops forcing files - torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%b-%d %H:%M:%S') - else: # handle NEMO files time reference - if 'time_centered' in fid[ftypes[0]].variables.keys(): - torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_centered'].time_origin,'%Y-%m-%d %H:%M:%S') + fid, fend = _nextfile_bin( + ift, row["dtUTC"], flist[ift], fid, fend, flist + ) + if ift == "ops": # specially handle time origin for ops forcing files + torig[ift] = dt.datetime.strptime( + fid[ftypes[0]].variables["time_counter"].time_origin, + "%Y-%b-%d %H:%M:%S", + ) + else: # handle NEMO files time reference + if "time_centered" in fid[ftypes[0]].variables.keys(): + torig[ift] = dt.datetime.strptime( + fid[ftypes[0]].variables["time_centered"].time_origin, + "%Y-%m-%d %H:%M:%S", + ) else: - torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%m-%d %H:%M:%S') + torig[ift] = dt.datetime.strptime( + fid[ftypes[0]].variables["time_counter"].time_origin, + "%Y-%m-%d %H:%M:%S", + ) # loop through each file type to extract data from the appropriate time and location for ift in ftypes: - if row['dtUTC']>=fend[ift]: - fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist) + if row["dtUTC"] >= fend[ift]: + fid, fend = _nextfile_bin( + ift, row["dtUTC"], flist[ift], fid, fend, flist + ) # now read data # find time index - if ift=='ops': # special handling for ops atm forcing files - ih=_getTimeInd_bin_ops(row['dtUTC'],fid[ift],torig[ift]) - else: # NEMO files + if ift == "ops": # special handling for ops atm forcing files + ih = _getTimeInd_bin_ops(row["dtUTC"], fid[ift], torig[ift]) + else: # NEMO files try: - if 'time_centered_bounds' in fid[ift].variables.keys(): # no problem! - ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift]) - else: # annoying! - hpf=(flist[ift]['t_n'][0]-flist[ift]['t_0'][0]).total_seconds()/3600 #hours per file - ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift],hpf=hpf) + if ( + "time_centered_bounds" in fid[ift].variables.keys() + ): # no problem! + ih = _getTimeInd_bin(row["dtUTC"], fid[ift], torig[ift]) + else: # annoying! + hpf = ( + flist[ift]["t_n"][0] - flist[ift]["t_0"][0] + ).total_seconds() / 3600 # hours per file + ih = _getTimeInd_bin( + row["dtUTC"], fid[ift], torig[ift], hpf=hpf + ) except: - print('fend',fend) - print('flist[ift]',flist[ift]['paths'][0]) - print(row['dtUTC'],ift,torig[ift]) - tlist=fid[ift].variables['time_centered_bounds'][:,:] + print("fend", fend) + print("flist[ift]", flist[ift]["paths"][0]) + print(row["dtUTC"], ift, torig[ift]) + tlist = fid[ift].variables["time_centered_bounds"][:, :] for el in tlist: print(el) - print((row['dtUTC']-torig[ift]).total_seconds()) - print(tlist[-1,1]) + print((row["dtUTC"] - torig[ift]).total_seconds()) + print(tlist[-1, 1]) raise # find depth index if vars are 3d - if sdim==3: + if sdim == 3: if preIndexed: - ik=row['k'] + ik = row["k"] # assign values for each var assoc with ift - if (not np.isnan(ik)) and (gridmask[0,ik,row['j'],row['i']]==1): + if (not np.isnan(ik)) and ( + gridmask[0, ik, row["j"], row["i"]] == 1 + ): for ivar in filemap_r[ift]: try: - data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,ik,row['j'],row['i']] + data.loc[ind, ["mod_" + ivar]] = fid[ift].variables[ + ivar + ][ih, ik, row["j"], row["i"]] except: - print(ind,ift,ih,ik,row['j'],row['i']) + print(ind, ift, ih, ik, row["j"], row["i"]) raise else: - if len(set(fid[ift].variables.keys()).intersection(set(('deptht_bounds','depthu_bounds','depthv_bounds'))))>0: # no problem! - ik=_getZInd_bin(row['Z'],fid[ift],maskName=maskName) - else: #workaround for missing variables in postprocessed files - ik=_getZInd_bin(row['Z'],fid[ift],boundsFlag=True,maskName=maskName) + if ( + len( + set(fid[ift].variables.keys()).intersection( + set(("deptht_bounds", "depthu_bounds", "depthv_bounds")) + ) + ) + > 0 + ): # no problem! + ik = _getZInd_bin(row["Z"], fid[ift], maskName=maskName) + else: # workaround for missing variables in postprocessed files + ik = _getZInd_bin( + row["Z"], fid[ift], boundsFlag=True, maskName=maskName + ) # assign values for each var assoc with ift - if (not np.isnan(ik)) and (gridmask[0,ik,row['j'],row['i']]==1): - data.loc[ind,['k']]=int(ik) + if (not np.isnan(ik)) and ( + gridmask[0, ik, row["j"], row["i"]] == 1 + ): + data.loc[ind, ["k"]] = int(ik) for ivar in filemap_r[ift]: - data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,ik,row['j'],row['i']] - elif sdim==2: + data.loc[ind, ["mod_" + ivar]] = fid[ift].variables[ivar][ + ih, ik, row["j"], row["i"] + ] + elif sdim == 2: # assign values for each var assoc with ift - if (gridmask[0,0,row['j'],row['i']]==1): + if gridmask[0, 0, row["j"], row["i"]] == 1: for ivar in filemap_r[ift]: - data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,row['j'],row['i']] + data.loc[ind, ["mod_" + ivar]] = fid[ift].variables[ivar][ + ih, row["j"], row["i"] + ] else: - raise('invalid sdim') + raise ("invalid sdim") return data -def _vvlBin(data,flist,ftypes,filemap,filemap_r,tmask,fdict,e3tvar): - """ vertical matching of model output to data by bin method but considering vvl change in - grid thickness with tides + +def _vvlBin(data, flist, ftypes, filemap, filemap_r, tmask, fdict, e3tvar): + """vertical matching of model output to data by bin method but considering vvl change in + grid thickness with tides """ - data['k']=-1*np.ones((len(data))).astype(int) - ifte3t=filemap[e3tvar] - pere3t=fdict[ifte3t] - pers=np.unique([i for i in fdict.values()]) + data["k"] = -1 * np.ones((len(data))).astype(int) + ifte3t = filemap[e3tvar] + pere3t = fdict[ifte3t] + pers = np.unique([i for i in fdict.values()]) # reverse fdict - fdict_r=dict() + fdict_r = dict() for iii in pers: - fdict_r[iii]=list() + fdict_r[iii] = list() for ikey in fdict: fdict_r[fdict[ikey]].append(ikey) # so far we have only allowed for 1 file duration for all input files, so all indices equivalent # also, we are only dealing with data saved at same interval as e3t - test=fdict_r.copy() + test = fdict_r.copy() test.pop(pere3t) - if len(test)>0: # loop through and print eliminated variables - print('Warning: variables excluded because save interval mismatched with e3t:') + if len(test) > 0: # loop through and print eliminated variables + print("Warning: variables excluded because save interval mismatched with e3t:") for aa in test: for bb in fdict_r[aa]: print(filemap_r[bb]) - data['indf'] = [int(flist[ifte3t].loc[(aa>=flist[ifte3t].t_0)&(aa= flist[ifte3t].t_0) & (aa < flist[ifte3t].t_n)] + .index[0] + ) + for aa in data["dtUTC"] + ] + t2 = [flist[ifte3t].loc[aa, ["t_0"]].values[0] for aa in data["indf"].values] + data["ih"] = [ + int(np.floor((aa - bb).total_seconds() / (pere3t * 3600))) + for aa, bb in zip(data["dtUTC"], t2) + ] # now get appropriate e3t for each set of data points: - for indf,grp0 in data.groupby(['indf']): - with nc.Dataset(flist[ifte3t].loc[indf,['paths']].values[0]) as fe3t: - ff=dict() + for indf, grp0 in data.groupby(["indf"]): + with nc.Dataset(flist[ifte3t].loc[indf, ["paths"]].values[0]) as fe3t: + ff = dict() for ift in fdict_r[pere3t]: - ff[ift]=nc.Dataset(flist[ift].loc[indf,['paths']].values[0]) - for (ih,jj,ii),grp1 in grp0.groupby(['ih','j','i']): - e3t=fe3t.variables[e3tvar][ih,:,jj,ii][tmask[0,:,jj,ii]==1] - zl=np.zeros((len(e3t),2)) - zl[1:,0]=np.cumsum(e3t[:-1]) - zl[:,1]=np.cumsum(e3t) - ztar=grp1['Z'].values + ff[ift] = nc.Dataset(flist[ift].loc[indf, ["paths"]].values[0]) + for (ih, jj, ii), grp1 in grp0.groupby(["ih", "j", "i"]): + e3t = fe3t.variables[e3tvar][ih, :, jj, ii][tmask[0, :, jj, ii] == 1] + zl = np.zeros((len(e3t), 2)) + zl[1:, 0] = np.cumsum(e3t[:-1]) + zl[:, 1] = np.cumsum(e3t) + ztar = grp1["Z"].values for ift in fdict_r[pere3t]: - for iz, iind in zip(ztar,grp1.index): - ik=[iii for iii,hhh in enumerate(zl) if hhh[1]>iz][0] # return first index where latter endpoint is larger + for iz, iind in zip(ztar, grp1.index): + ik = [iii for iii, hhh in enumerate(zl) if hhh[1] > iz][ + 0 + ] # return first index where latter endpoint is larger # assign values for each var assoc with ift - if (not np.isnan(ik)) and (tmask[0,ik,jj,ii]==1): - data.loc[iind,['k']]=int(ik) + if (not np.isnan(ik)) and (tmask[0, ik, jj, ii] == 1): + data.loc[iind, ["k"]] = int(ik) for ivar in filemap_r[ift]: - data.loc[iind,['mod_'+ivar]]=ff[ift].variables[ivar][ih,ik,jj,ii] + data.loc[iind, ["mod_" + ivar]] = ff[ift].variables[ + ivar + ][ih, ik, jj, ii] for ift in fdict_r[pere3t]: ff[ift].close() return data -def _interpvvlZ(data,flist,ftypes,filemap,filemap_r,tmask,fdict,e3tvar): - """ vertical interpolation of model output to observation depths considering vvl change in - grid thickness with tides + +def _interpvvlZ(data, flist, ftypes, filemap, filemap_r, tmask, fdict, e3tvar): + """vertical interpolation of model output to observation depths considering vvl change in + grid thickness with tides """ - ifte3t=filemap.pop(e3tvar) - pere3t=fdict.pop(ifte3t) - pers=np.unique([i for i in fdict.values()]) + ifte3t = filemap.pop(e3tvar) + pere3t = fdict.pop(ifte3t) + pers = np.unique([i for i in fdict.values()]) # reverse fdict - fdict_r=dict() + fdict_r = dict() for iii in pers: - fdict_r[iii]=list() + fdict_r[iii] = list() for ikey in fdict: fdict_r[fdict[ikey]].append(ikey) # so far we have only allowed for 1 file duration for all input files, so all indices equivalent # also, we are only dealing with data saved at same interval as e3t - test=fdict_r.copy() + test = fdict_r.copy() test.pop(pere3t) - if len(test)>0: # loop through and print eliminated variables - print('Warning: variables excluded because save interval mismatched with e3t:') + if len(test) > 0: # loop through and print eliminated variables + print("Warning: variables excluded because save interval mismatched with e3t:") for aa in test: for bb in fdict_r[aa]: print(filemap_r[bb]) - data['indf'] = [int(flist[ifte3t].loc[(aa>=flist[ifte3t].t_0)&(aa= flist[ifte3t].t_0) & (aa < flist[ifte3t].t_n)] + .index[0] + ) + for aa in data["dtUTC"] + ] + t2 = [flist[ifte3t].loc[aa, ["t_0"]].values[0] for aa in data["indf"].values] + data["ih"] = [ + int(np.floor((aa - bb).total_seconds() / (pere3t * 3600))) + for aa, bb in zip(data["dtUTC"], t2) + ] # now get appropriate e3t for each set of data points: - for indf,grp0 in data.groupby(['indf']): - with nc.Dataset(flist[ifte3t].loc[indf,['paths']].values[0]) as fe3t: - ff=dict() + for indf, grp0 in data.groupby(["indf"]): + with nc.Dataset(flist[ifte3t].loc[indf, ["paths"]].values[0]) as fe3t: + ff = dict() for ift in fdict_r[pere3t]: - ff[ift]=nc.Dataset(flist[ift].loc[indf,['paths']].values[0]) - for (ih,jj,ii),grp1 in grp0.groupby(['ih','j','i']): - e3t=fe3t.variables[e3tvar][ih,:,jj,ii][tmask[0,:,jj,ii]==1] - zs=np.cumsum(e3t)-.5*e3t - ztar=grp1['Z'].values + ff[ift] = nc.Dataset(flist[ift].loc[indf, ["paths"]].values[0]) + for (ih, jj, ii), grp1 in grp0.groupby(["ih", "j", "i"]): + e3t = fe3t.variables[e3tvar][ih, :, jj, ii][tmask[0, :, jj, ii] == 1] + zs = np.cumsum(e3t) - 0.5 * e3t + ztar = grp1["Z"].values for ift in fdict_r[pere3t]: for ivar in filemap_r[ift]: - vals=ff[ift].variables[ivar][ih,:,jj,ii][tmask[0,:,jj,ii]==1] - data.loc[grp1.index,['mod_'+ivar]]=np.where(ztar5000: - pprint=True - lendat=len(data) + if len(data) > 5000: + pprint = True + lendat = len(data) else: - pprint= False + pprint = False for ift in ftypes: - data['indf_'+ift] = [int(flist[ift].loc[(aa>=flist[ift].t_0)&(aa= flist[ift].t_0) & (aa < flist[ift].t_n)].index[0]) + for aa in data["dtUTC"] + ] + t2 = [ + flist[ift].loc[aa, ["t_0"]].values[0] for aa in data["indf_" + ift].values + ] + data["ih_" + ift] = [ + int(np.floor((aa - bb).total_seconds() / (fdict[ift] * 3600))) + for aa, bb in zip(data["dtUTC"], t2) + ] + print("done index " + ift, dt.datetime.now()) + indflast = -1 for ind, row in data.iterrows(): - if (pprint==True and ind%np.round(lendat/10)==0): - print(ift,'progress: {}%'.format(ind/lendat*100)) - if not row['indf_'+ift]==indflast: - if not indflast==-1: + if pprint == True and ind % np.round(lendat / 10) == 0: + print(ift, "progress: {}%".format(ind / lendat * 100)) + if not row["indf_" + ift] == indflast: + if not indflast == -1: fid.close() - fid=nc.Dataset(flist[ift].loc[row['indf_'+ift],['paths']].values[0]) - indflast=row['indf_'+ift] + fid = nc.Dataset( + flist[ift].loc[row["indf_" + ift], ["paths"]].values[0] + ) + indflast = row["indf_" + ift] for ivar in filemap_r[ift]: - data.loc[ind,['mod_'+ivar]] = fid.variables[ivar][row['ih_'+ift], 0, row['j'], row['i']] + data.loc[ind, ["mod_" + ivar]] = fid.variables[ivar][ + row["ih_" + ift], 0, row["j"], row["i"] + ] return data -def _nextfile_bin(ift,idt,ifind,fid,fend,flist): # to do: replace flist[ift] with ifind and get rid of flist argument - """ close last file and open the next one""" + +def _nextfile_bin( + ift, idt, ifind, fid, fend, flist +): # to do: replace flist[ift] with ifind and get rid of flist argument + """close last file and open the next one""" if ift in fid.keys(): fid[ift].close() - frow=flist[ift].loc[(ifind.t_0<=idt)&(ifind.t_n>idt)] - #print('idt:',idt) - #print(frow) - #print('switched files: ',frow['paths'].values[0]) - fid[ift]=nc.Dataset(frow['paths'].values[0]) - fend[ift]=frow['t_n'].values[0] + frow = flist[ift].loc[(ifind.t_0 <= idt) & (ifind.t_n > idt)] + # print('idt:',idt) + # print(frow) + # print('switched files: ',frow['paths'].values[0]) + fid[ift] = nc.Dataset(frow["paths"].values[0]) + fend[ift] = frow["t_n"].values[0] return fid, fend -def _getTimeInd_bin(idt,ifid,torig,hpf=None): - """ find time index for SalishSeaCast output interval including observation time """ - if 'time_centered_bounds' in ifid.variables.keys(): - tlist=ifid.variables['time_centered_bounds'][:,:] + +def _getTimeInd_bin(idt, ifid, torig, hpf=None): + """find time index for SalishSeaCast output interval including observation time""" + if "time_centered_bounds" in ifid.variables.keys(): + tlist = ifid.variables["time_centered_bounds"][:, :] # return first index where latter endpoint is larger - ih=[iii for iii,hhh in enumerate(tlist) if hhh[1]>(idt-torig).total_seconds()][0] - else: # hacky fix because time_centered_bounds missing from post-processed daily files - nt=len(ifid.variables['time_counter'][:]) - if ('hours' in ifid.variables['time_counter'].units): + ih = [ + iii + for iii, hhh in enumerate(tlist) + if hhh[1] > (idt - torig).total_seconds() + ][0] + else: # hacky fix because time_centered_bounds missing from post-processed daily files + nt = len(ifid.variables["time_counter"][:]) + if "hours" in ifid.variables["time_counter"].units: tcorr = 3600 - elif ('seconds' in ifid.variables['time_counter'].units): + elif "seconds" in ifid.variables["time_counter"].units: tcorr = 1 else: - print ('problem in time_counter units') - tlist = [ii * tcorr + hpf / (nt * 2) * 3600 for ii in ifid.variables['time_counter'][:]] - ih=[iii for iii,hhh in enumerate(tlist) if hhh>(idt-torig).total_seconds()][0] + print("problem in time_counter units") + tlist = [ + ii * tcorr + hpf / (nt * 2) * 3600 + for ii in ifid.variables["time_counter"][:] + ] + ih = [ + iii for iii, hhh in enumerate(tlist) if hhh > (idt - torig).total_seconds() + ][0] return ih -def _getTimeInd_bin_ops(idt,ifid,torig): - """ find time index for ops file""" - tlist=ifid.variables['time_counter'][:].data - tinterval=ifid.variables['time_counter'].time_step - #ih=[iii for iii,hhh in enumerate(tlist) if (hhh+tinterval/2)>(idt-torig).total_seconds()][0] + +def _getTimeInd_bin_ops(idt, ifid, torig): + """find time index for ops file""" + tlist = ifid.variables["time_counter"][:].data + tinterval = ifid.variables["time_counter"].time_step + # ih=[iii for iii,hhh in enumerate(tlist) if (hhh+tinterval/2)>(idt-torig).total_seconds()][0] ## NEMO is reading in files as if they were on the half hour so do the same: # return first index where latter endpoint is larger - ih=[iii for iii,hhh in enumerate(tlist) if (hhh+tinterval)>(idt-torig).total_seconds()][0] + ih = [ + iii + for iii, hhh in enumerate(tlist) + if (hhh + tinterval) > (idt - torig).total_seconds() + ][0] return ih -def _getZInd_bin(idt,ifid=None,boundsFlag=False,maskName='tmask'): - """ get vertical index of cell containing observation depth """ - if boundsFlag==True: - with nc.Dataset('/home/sallen/MEOPAR/grid/depth_grid_bounds.nc') as ftemp: - if maskName == 'tmask': - tlist=ftemp.variables['deptht_bounds'][:,:] - elif maskName=='umask': - tlist=ftemp.variables['depthu_bounds'][:,:] - elif maskName=='vmask': - tlist=ftemp.variables['depthv_bounds'][:,:] + +def _getZInd_bin(idt, ifid=None, boundsFlag=False, maskName="tmask"): + """get vertical index of cell containing observation depth""" + if boundsFlag == True: + with nc.Dataset("/home/sallen/MEOPAR/grid/depth_grid_bounds.nc") as ftemp: + if maskName == "tmask": + tlist = ftemp.variables["deptht_bounds"][:, :] + elif maskName == "umask": + tlist = ftemp.variables["depthu_bounds"][:, :] + elif maskName == "vmask": + tlist = ftemp.variables["depthv_bounds"][:, :] else: - raise('choice not coded') + raise ("choice not coded") else: - dboundvar={'tmask':'deptht_bounds','umask':'depthu_bounds','vmask':'depthv_bounds'} - tlist=ifid.variables[dboundvar[maskName]][:,:] - if idt<=np.max(tlist): - ih=[iii for iii,hhh in enumerate(tlist) if hhh[1]>idt][0] # return first index where latter endpoint is larger + dboundvar = { + "tmask": "deptht_bounds", + "umask": "depthu_bounds", + "vmask": "depthv_bounds", + } + tlist = ifid.variables[dboundvar[maskName]][:, :] + if idt <= np.max(tlist): + ih = [iii for iii, hhh in enumerate(tlist) if hhh[1] > idt][ + 0 + ] # return first index where latter endpoint is larger else: - ih=np.nan + ih = np.nan return ih -def index_model_files(start,end,basedir,nam_fmt,flen,ftype=None,tres=1): + +def index_model_files(start, end, basedir, nam_fmt, flen, ftype=None, tres=1): """ See inputs for matchData above. outputs pandas dataframe containing columns 'paths','t_0', and 't_1' where paths are all the model output files of a given type in the time interval (start,end) with end not included """ - if ftype not in ('ptrc_T', 'grid_T', 'grid_W', 'grid_U', 'grid_V', - 'dia1_T', 'carp_T', 'biol_T', 'chem_T', 'None', None): - print('ftype={}, are you sure? (if yes, add to list)'.format(ftype)) - if tres==24: - ftres='1d' + if ftype not in ( + "ptrc_T", + "grid_T", + "grid_W", + "grid_U", + "grid_V", + "dia1_T", + "carp_T", + "biol_T", + "chem_T", + "None", + None, + ): + print("ftype={}, are you sure? (if yes, add to list)".format(ftype)) + if tres == 24: + ftres = "1d" else: - ftres=str(int(tres))+'h' - ffmt='%Y%m%d' - dfmt='%d%b%y' - wfmt='y%Ym%md%d' - if nam_fmt=='nowcast': - stencil='{0}/SalishSea_'+ftres+'_{1}_{2}_'+ftype+'.nc' - elif nam_fmt=='long': - stencil='**/SalishSea_'+ftres+'*'+ftype+'_{1}-{2}.nc' - elif nam_fmt=='sockeye': - stencil=f'*/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc' - elif nam_fmt == 'optimum': - stencil = f'???????/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc' - elif nam_fmt=='wind': - stencil='ops_{3}.nc' - elif nam_fmt=='ops': - stencil='ops_{3}.nc' - elif nam_fmt=='gemlam': - stencil='gemlam_{3}.nc' - elif nam_fmt=='forcing': # use ftype as prefix - stencil=ftype+'_{3}.nc' + ftres = str(int(tres)) + "h" + ffmt = "%Y%m%d" + dfmt = "%d%b%y" + wfmt = "y%Ym%md%d" + if nam_fmt == "nowcast": + stencil = "{0}/SalishSea_" + ftres + "_{1}_{2}_" + ftype + ".nc" + elif nam_fmt == "long": + stencil = "**/SalishSea_" + ftres + "*" + ftype + "_{1}-{2}.nc" + elif nam_fmt == "sockeye": + stencil = f"*/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc" + elif nam_fmt == "optimum": + stencil = f"???????/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc" + elif nam_fmt == "wind": + stencil = "ops_{3}.nc" + elif nam_fmt == "ops": + stencil = "ops_{3}.nc" + elif nam_fmt == "gemlam": + stencil = "gemlam_{3}.nc" + elif nam_fmt == "forcing": # use ftype as prefix + stencil = ftype + "_{3}.nc" else: - raise Exception('nam_fmt '+nam_fmt+' is not defined') - #Note fix: to avoid errors if hour and second included with start and end time, strip them! - iits=dt.datetime(start.year,start.month,start.day) - iite=iits+dt.timedelta(days=(flen-1)) + raise Exception("nam_fmt " + nam_fmt + " is not defined") + # Note fix: to avoid errors if hour and second included with start and end time, strip them! + iits = dt.datetime(start.year, start.month, start.day) + iite = iits + dt.timedelta(days=(flen - 1)) # check if start is a file start date and if not, try to identify the file including it # (in case start date is in the middle of a multi-day file) - nday=0 + nday = 0 while True: try: - ipathstr=os.path.join(basedir,stencil.format(iits.strftime(dfmt).lower(), - iits.strftime(ffmt),iite.strftime(ffmt),iits.strftime(wfmt))) - iifstr=glob.glob(ipathstr,recursive=True)[0] - if nday>0: - print('first file starts on ',iits) - break # file has been found + ipathstr = os.path.join( + basedir, + stencil.format( + iits.strftime(dfmt).lower(), + iits.strftime(ffmt), + iite.strftime(ffmt), + iits.strftime(wfmt), + ), + ) + iifstr = glob.glob(ipathstr, recursive=True)[0] + if nday > 0: + print("first file starts on ", iits) + break # file has been found except IndexError: - nday=nday+1 - if nday==flen: - iits_str=iits.strftime('%Y %b %d') - start_str=start.strftime('%Y %b %d') - if flen==1: - exc_msg= (f'\nFile not found:\n{ipathstr}\n' - f'Check that results directory is accessible and the start date entered is included in the run. \n') + nday = nday + 1 + if nday == flen: + iits_str = iits.strftime("%Y %b %d") + start_str = start.strftime("%Y %b %d") + if flen == 1: + exc_msg = ( + f"\nFile not found:\n{ipathstr}\n" + f"Check that results directory is accessible and the start date entered is included in the run. \n" + ) else: - exc_msg= (f'\nDays per output file is set to {flen}. \n' - f'No file found with start date in range {iits_str} to {start_str} \n' - f'of form {ipathstr}\n' - f'Check that results directory is accessible and the start date entered is included in run. \n') - raise Exception(exc_msg) # file has not been found - iits=start-dt.timedelta(days=nday) - iite=iits+dt.timedelta(days=(flen-1)) - ind=0 - inds=list() - paths=list() - t_0=list() - t_n=list() - while iitsstart)&(idf['t_0'] start) & (idf["t_0"] < end) + idf = idf.loc[ilocs, :].copy(deep=True) + idf = idf.sort_values(["t_0"]).reset_index(drop=True) return idf -def loadDFOCTD(basedir='/ocean/shared/SalishSeaCastData/DFO/CTD/', dbname='DFO_CTD.sqlite', - datelims=()): +def loadDFOCTD( + basedir="/ocean/shared/SalishSeaCastData/DFO/CTD/", + dbname="DFO_CTD.sqlite", + datelims=(), +): """ load DFO CTD data stored in SQLite database (exclude most points outside Salish Sea) basedir is location of database @@ -772,67 +1029,153 @@ def loadDFOCTD(basedir='/ocean/shared/SalishSeaCastData/DFO/CTD/', dbname='DFO_C from sqlalchemy.ext.automap import automap_base from sqlalchemy.sql import and_, or_, not_, func except ImportError: - raise ImportError('You need to install sqlalchemy in your environment to use this function.') + raise ImportError( + "You need to install sqlalchemy in your environment to use this function." + ) # definitions # if db does not exist, exit if not os.path.isfile(os.path.join(basedir, dbname)): - raise Exception(f'ERROR: {dbname} does not exist in {basedir}') - engine = create_engine('sqlite:///' + basedir + dbname, echo = False) + raise Exception(f"ERROR: {dbname} does not exist in {basedir}") + engine = create_engine("sqlite:///" + basedir + dbname, echo=False) Base = automap_base() # reflect the tables in salish.sqlite: Base.prepare(engine, reflect=True) # mapped classes have been created # existing tables: - StationTBL=Base.classes.StationTBL - ObsTBL=Base.classes.ObsTBL - CalcsTBL=Base.classes.CalcsTBL - session = create_session(bind = engine, autocommit = False, autoflush = True) - SA=case([(CalcsTBL.Salinity_T0_C0_SA!=None, CalcsTBL.Salinity_T0_C0_SA)], else_= - case([(CalcsTBL.Salinity_T1_C1_SA!=None, CalcsTBL.Salinity_T1_C1_SA)], else_= - case([(CalcsTBL.Salinity_SA!=None, CalcsTBL.Salinity_SA)], else_= None))) - CT=case([(CalcsTBL.Temperature_Primary_CT!=None, CalcsTBL.Temperature_Primary_CT)], else_= - case([(CalcsTBL.Temperature_Secondary_CT!=None, CalcsTBL.Temperature_Secondary_CT)], else_=CalcsTBL.Temperature_CT)) - ZD=case([(ObsTBL.Depth!=None,ObsTBL.Depth)], else_= CalcsTBL.Z) - FL=case([(ObsTBL.Fluorescence_URU_Seapoint!=None,ObsTBL.Fluorescence_URU_Seapoint)], else_= ObsTBL.Fluorescence_URU_Wetlabs) - if len(datelims)<2: - qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'), - StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'), - StationTBL.Lat,StationTBL.Lon,ZD.label('Z'),SA.label('SA'),CT.label('CT'),FL.label('Fluor'), - ObsTBL.Oxygen_Dissolved_SBE.label('DO_mLL'),ObsTBL.Oxygen_Dissolved_SBE_1.label('DO_umolkg')).\ - select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\ - join(CalcsTBL,CalcsTBL.ObsTBLID==ObsTBL.ID).filter(and_(StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5), - StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121), - StationTBL.Include==True,ObsTBL.Include==True,CalcsTBL.Include==True)) + StationTBL = Base.classes.StationTBL + ObsTBL = Base.classes.ObsTBL + CalcsTBL = Base.classes.CalcsTBL + session = create_session(bind=engine, autocommit=False, autoflush=True) + SA = case( + [(CalcsTBL.Salinity_T0_C0_SA != None, CalcsTBL.Salinity_T0_C0_SA)], + else_=case( + [(CalcsTBL.Salinity_T1_C1_SA != None, CalcsTBL.Salinity_T1_C1_SA)], + else_=case( + [(CalcsTBL.Salinity_SA != None, CalcsTBL.Salinity_SA)], else_=None + ), + ), + ) + CT = case( + [(CalcsTBL.Temperature_Primary_CT != None, CalcsTBL.Temperature_Primary_CT)], + else_=case( + [ + ( + CalcsTBL.Temperature_Secondary_CT != None, + CalcsTBL.Temperature_Secondary_CT, + ) + ], + else_=CalcsTBL.Temperature_CT, + ), + ) + ZD = case([(ObsTBL.Depth != None, ObsTBL.Depth)], else_=CalcsTBL.Z) + FL = case( + [(ObsTBL.Fluorescence_URU_Seapoint != None, ObsTBL.Fluorescence_URU_Seapoint)], + else_=ObsTBL.Fluorescence_URU_Wetlabs, + ) + if len(datelims) < 2: + qry = ( + session.query( + StationTBL.StartYear.label("Year"), + StationTBL.StartMonth.label("Month"), + StationTBL.StartDay.label("Day"), + StationTBL.StartHour.label("Hour"), + StationTBL.Lat, + StationTBL.Lon, + ZD.label("Z"), + SA.label("SA"), + CT.label("CT"), + FL.label("Fluor"), + ObsTBL.Oxygen_Dissolved_SBE.label("DO_mLL"), + ObsTBL.Oxygen_Dissolved_SBE_1.label("DO_umolkg"), + ) + .select_from(StationTBL) + .join(ObsTBL, ObsTBL.StationTBLID == StationTBL.ID) + .join(CalcsTBL, CalcsTBL.ObsTBLID == ObsTBL.ID) + .filter( + and_( + StationTBL.Lat > 47 - 3 / 2.5 * (StationTBL.Lon + 123.5), + StationTBL.Lat < 47 - 3 / 2.5 * (StationTBL.Lon + 121), + StationTBL.Include == True, + ObsTBL.Include == True, + CalcsTBL.Include == True, + ) + ) + ) else: - start_y=datelims[0].year - start_m=datelims[0].month - start_d=datelims[0].day - end_y=datelims[1].year - end_m=datelims[1].month - end_d=datelims[1].day - qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'), - StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'), - StationTBL.Lat,StationTBL.Lon,ZD.label('Z'),SA.label('SA'),CT.label('CT'),FL.label('Fluor'),\ - ObsTBL.Oxygen_Dissolved_SBE.label('DO_mLL'),ObsTBL.Oxygen_Dissolved_SBE_1.label('DO_umolkg')).\ - select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\ - join(CalcsTBL,CalcsTBL.ObsTBLID==ObsTBL.ID).filter(and_(or_(StationTBL.StartYear>start_y, - and_(StationTBL.StartYear==start_y, StationTBL.StartMonth>start_m), - and_(StationTBL.StartYear==start_y, StationTBL.StartMonth==start_m, StationTBL.StartDay>=start_d)), - or_(StationTBL.StartYear47-3/2.5*(StationTBL.Lon+123.5), - StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121), - StationTBL.Include==True,ObsTBL.Include==True,CalcsTBL.Include==True)) - df1=pd.read_sql_query(qry.statement, engine) - df1['dtUTC']=[dt.datetime(int(y),int(m),int(d))+dt.timedelta(hours=h) for y,m,d,h in zip(df1['Year'],df1['Month'],df1['Day'],df1['Hour'])] + start_y = datelims[0].year + start_m = datelims[0].month + start_d = datelims[0].day + end_y = datelims[1].year + end_m = datelims[1].month + end_d = datelims[1].day + qry = ( + session.query( + StationTBL.StartYear.label("Year"), + StationTBL.StartMonth.label("Month"), + StationTBL.StartDay.label("Day"), + StationTBL.StartHour.label("Hour"), + StationTBL.Lat, + StationTBL.Lon, + ZD.label("Z"), + SA.label("SA"), + CT.label("CT"), + FL.label("Fluor"), + ObsTBL.Oxygen_Dissolved_SBE.label("DO_mLL"), + ObsTBL.Oxygen_Dissolved_SBE_1.label("DO_umolkg"), + ) + .select_from(StationTBL) + .join(ObsTBL, ObsTBL.StationTBLID == StationTBL.ID) + .join(CalcsTBL, CalcsTBL.ObsTBLID == ObsTBL.ID) + .filter( + and_( + or_( + StationTBL.StartYear > start_y, + and_( + StationTBL.StartYear == start_y, + StationTBL.StartMonth > start_m, + ), + and_( + StationTBL.StartYear == start_y, + StationTBL.StartMonth == start_m, + StationTBL.StartDay >= start_d, + ), + ), + or_( + StationTBL.StartYear < end_y, + and_( + StationTBL.StartYear == end_y, StationTBL.StartMonth < end_m + ), + and_( + StationTBL.StartYear == end_y, + StationTBL.StartMonth == end_m, + StationTBL.StartDay < end_d, + ), + ), + StationTBL.Lat > 47 - 3 / 2.5 * (StationTBL.Lon + 123.5), + StationTBL.Lat < 47 - 3 / 2.5 * (StationTBL.Lon + 121), + StationTBL.Include == True, + ObsTBL.Include == True, + CalcsTBL.Include == True, + ) + ) + ) + df1 = pd.read_sql_query(qry.statement, engine) + df1["dtUTC"] = [ + dt.datetime(int(y), int(m), int(d)) + dt.timedelta(hours=h) + for y, m, d, h in zip(df1["Year"], df1["Month"], df1["Day"], df1["Hour"]) + ] session.close() engine.dispose() return df1 -def loadDFO(basedir='/ocean/eolson/MEOPAR/obs/DFOOPDB/', dbname='DFO_OcProfDB.sqlite', - datelims=(),excludeSaanich=True): + +def loadDFO( + basedir="/ocean/eolson/MEOPAR/obs/DFOOPDB/", + dbname="DFO_OcProfDB.sqlite", + datelims=(), + excludeSaanich=True, +): """ load DFO data stored in SQLite database basedir is location of database @@ -845,480 +1188,1167 @@ def loadDFO(basedir='/ocean/eolson/MEOPAR/obs/DFOOPDB/', dbname='DFO_OcProfDB.sq from sqlalchemy.ext.automap import automap_base from sqlalchemy.sql import and_, or_, not_, func except ImportError: - raise ImportError('You need to install sqlalchemy in your environment to use this function.') + raise ImportError( + "You need to install sqlalchemy in your environment to use this function." + ) # definitions # if db does not exist, exit if not os.path.isfile(os.path.join(basedir, dbname)): - raise Exception('ERROR: {}.sqlite does not exist'.format(dbname)) - engine = create_engine('sqlite:///' + basedir + dbname, echo = False) + raise Exception("ERROR: {}.sqlite does not exist".format(dbname)) + engine = create_engine("sqlite:///" + basedir + dbname, echo=False) Base = automap_base() # reflect the tables in salish.sqlite: Base.prepare(engine, reflect=True) # mapped classes have been created # existing tables: - StationTBL=Base.classes.StationTBL - ObsTBL=Base.classes.ObsTBL - CalcsTBL=Base.classes.CalcsTBL - session = create_session(bind = engine, autocommit = False, autoflush = True) - SA=case([(CalcsTBL.Salinity_Bottle_SA!=None, CalcsTBL.Salinity_Bottle_SA)], else_= - case([(CalcsTBL.Salinity_T0_C0_SA!=None, CalcsTBL.Salinity_T0_C0_SA)], else_= - case([(CalcsTBL.Salinity_T1_C1_SA!=None, CalcsTBL.Salinity_T1_C1_SA)], else_= - case([(CalcsTBL.Salinity_SA!=None, CalcsTBL.Salinity_SA)], else_= - case([(CalcsTBL.Salinity__Unknown_SA!=None, CalcsTBL.Salinity__Unknown_SA)], - else_=CalcsTBL.Salinity__Pre1978_SA) - )))) - Tem=case([(ObsTBL.Temperature!=None, ObsTBL.Temperature)], else_= - case([(ObsTBL.Temperature_Primary!=None, ObsTBL.Temperature_Primary)], else_= - case([(ObsTBL.Temperature_Secondary!=None, ObsTBL.Temperature_Secondary)], else_=ObsTBL.Temperature_Reversing))) - TemUnits=case([(ObsTBL.Temperature!=None, ObsTBL.Temperature_units)], else_= - case([(ObsTBL.Temperature_Primary!=None, ObsTBL.Temperature_Primary_units)], else_= - case([(ObsTBL.Temperature_Secondary!=None, ObsTBL.Temperature_Secondary_units)], - else_=ObsTBL.Temperature_Reversing_units))) - TemFlag=ObsTBL.Quality_Flag_Temp - CT=case([(CalcsTBL.Temperature_CT!=None, CalcsTBL.Temperature_CT)], else_= - case([(CalcsTBL.Temperature_Primary_CT!=None, CalcsTBL.Temperature_Primary_CT)], else_= - case([(CalcsTBL.Temperature_Secondary_CT!=None, CalcsTBL.Temperature_Secondary_CT)], - else_=CalcsTBL.Temperature_Reversing_CT) - )) - - if len(datelims)<2: - qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'), - StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'), - StationTBL.Lat,StationTBL.Lon, - ObsTBL.Pressure,ObsTBL.Depth,ObsTBL.Chlorophyll_Extracted, - ObsTBL.Chlorophyll_Extracted_units,ObsTBL.Nitrate_plus_Nitrite.label('N'), - ObsTBL.Silicate.label('Si'),ObsTBL.Silicate_units,SA.label('AbsSal'),CT.label('ConsT'), - ObsTBL.Oxygen_Dissolved,ObsTBL.Oxygen_Dissolved_units).\ - select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\ - join(CalcsTBL,CalcsTBL.ObsID==ObsTBL.ID).filter(and_(StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5), - StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121))) + StationTBL = Base.classes.StationTBL + ObsTBL = Base.classes.ObsTBL + CalcsTBL = Base.classes.CalcsTBL + session = create_session(bind=engine, autocommit=False, autoflush=True) + SA = case( + [(CalcsTBL.Salinity_Bottle_SA != None, CalcsTBL.Salinity_Bottle_SA)], + else_=case( + [(CalcsTBL.Salinity_T0_C0_SA != None, CalcsTBL.Salinity_T0_C0_SA)], + else_=case( + [(CalcsTBL.Salinity_T1_C1_SA != None, CalcsTBL.Salinity_T1_C1_SA)], + else_=case( + [(CalcsTBL.Salinity_SA != None, CalcsTBL.Salinity_SA)], + else_=case( + [ + ( + CalcsTBL.Salinity__Unknown_SA != None, + CalcsTBL.Salinity__Unknown_SA, + ) + ], + else_=CalcsTBL.Salinity__Pre1978_SA, + ), + ), + ), + ), + ) + Tem = case( + [(ObsTBL.Temperature != None, ObsTBL.Temperature)], + else_=case( + [(ObsTBL.Temperature_Primary != None, ObsTBL.Temperature_Primary)], + else_=case( + [(ObsTBL.Temperature_Secondary != None, ObsTBL.Temperature_Secondary)], + else_=ObsTBL.Temperature_Reversing, + ), + ), + ) + TemUnits = case( + [(ObsTBL.Temperature != None, ObsTBL.Temperature_units)], + else_=case( + [(ObsTBL.Temperature_Primary != None, ObsTBL.Temperature_Primary_units)], + else_=case( + [ + ( + ObsTBL.Temperature_Secondary != None, + ObsTBL.Temperature_Secondary_units, + ) + ], + else_=ObsTBL.Temperature_Reversing_units, + ), + ), + ) + TemFlag = ObsTBL.Quality_Flag_Temp + CT = case( + [(CalcsTBL.Temperature_CT != None, CalcsTBL.Temperature_CT)], + else_=case( + [ + ( + CalcsTBL.Temperature_Primary_CT != None, + CalcsTBL.Temperature_Primary_CT, + ) + ], + else_=case( + [ + ( + CalcsTBL.Temperature_Secondary_CT != None, + CalcsTBL.Temperature_Secondary_CT, + ) + ], + else_=CalcsTBL.Temperature_Reversing_CT, + ), + ), + ) + + if len(datelims) < 2: + qry = ( + session.query( + StationTBL.StartYear.label("Year"), + StationTBL.StartMonth.label("Month"), + StationTBL.StartDay.label("Day"), + StationTBL.StartHour.label("Hour"), + StationTBL.Lat, + StationTBL.Lon, + ObsTBL.Pressure, + ObsTBL.Depth, + ObsTBL.Chlorophyll_Extracted, + ObsTBL.Chlorophyll_Extracted_units, + ObsTBL.Nitrate_plus_Nitrite.label("N"), + ObsTBL.Silicate.label("Si"), + ObsTBL.Silicate_units, + SA.label("AbsSal"), + CT.label("ConsT"), + ObsTBL.Oxygen_Dissolved, + ObsTBL.Oxygen_Dissolved_units, + ) + .select_from(StationTBL) + .join(ObsTBL, ObsTBL.StationTBLID == StationTBL.ID) + .join(CalcsTBL, CalcsTBL.ObsID == ObsTBL.ID) + .filter( + and_( + StationTBL.Lat > 47 - 3 / 2.5 * (StationTBL.Lon + 123.5), + StationTBL.Lat < 47 - 3 / 2.5 * (StationTBL.Lon + 121), + ) + ) + ) else: - start_y=datelims[0].year - start_m=datelims[0].month - start_d=datelims[0].day - end_y=datelims[1].year - end_m=datelims[1].month - end_d=datelims[1].day - qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'), - StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'), - StationTBL.Lat,StationTBL.Lon, - ObsTBL.Pressure,ObsTBL.Depth,ObsTBL.Chlorophyll_Extracted, - ObsTBL.Chlorophyll_Extracted_units,ObsTBL.Nitrate_plus_Nitrite.label('N'), - ObsTBL.Silicate.label('Si'),ObsTBL.Silicate_units,SA.label('AbsSal'),CT.label('ConsT'), - ObsTBL.Oxygen_Dissolved,ObsTBL.Oxygen_Dissolved_units).\ - select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\ - join(CalcsTBL,CalcsTBL.ObsID==ObsTBL.ID).filter(and_(or_(StationTBL.StartYear>start_y, - and_(StationTBL.StartYear==start_y, StationTBL.StartMonth>start_m), - and_(StationTBL.StartYear==start_y, StationTBL.StartMonth==start_m, StationTBL.StartDay>=start_d)), - or_(StationTBL.StartYear47-3/2.5*(StationTBL.Lon+123.5), - StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121)))#, - #not_(and_(StationTBL.Lat>48.77,StationTBL.Lat<49.27, - # StationTBL.Lon<-123.43)))) + start_y = datelims[0].year + start_m = datelims[0].month + start_d = datelims[0].day + end_y = datelims[1].year + end_m = datelims[1].month + end_d = datelims[1].day + qry = ( + session.query( + StationTBL.StartYear.label("Year"), + StationTBL.StartMonth.label("Month"), + StationTBL.StartDay.label("Day"), + StationTBL.StartHour.label("Hour"), + StationTBL.Lat, + StationTBL.Lon, + ObsTBL.Pressure, + ObsTBL.Depth, + ObsTBL.Chlorophyll_Extracted, + ObsTBL.Chlorophyll_Extracted_units, + ObsTBL.Nitrate_plus_Nitrite.label("N"), + ObsTBL.Silicate.label("Si"), + ObsTBL.Silicate_units, + SA.label("AbsSal"), + CT.label("ConsT"), + ObsTBL.Oxygen_Dissolved, + ObsTBL.Oxygen_Dissolved_units, + ) + .select_from(StationTBL) + .join(ObsTBL, ObsTBL.StationTBLID == StationTBL.ID) + .join(CalcsTBL, CalcsTBL.ObsID == ObsTBL.ID) + .filter( + and_( + or_( + StationTBL.StartYear > start_y, + and_( + StationTBL.StartYear == start_y, + StationTBL.StartMonth > start_m, + ), + and_( + StationTBL.StartYear == start_y, + StationTBL.StartMonth == start_m, + StationTBL.StartDay >= start_d, + ), + ), + or_( + StationTBL.StartYear < end_y, + and_( + StationTBL.StartYear == end_y, StationTBL.StartMonth < end_m + ), + and_( + StationTBL.StartYear == end_y, + StationTBL.StartMonth == end_m, + StationTBL.StartDay < end_d, + ), + ), + StationTBL.Lat > 47 - 3 / 2.5 * (StationTBL.Lon + 123.5), + StationTBL.Lat < 47 - 3 / 2.5 * (StationTBL.Lon + 121), + ) + ) + ) # , + # not_(and_(StationTBL.Lat>48.77,StationTBL.Lat<49.27, + # StationTBL.Lon<-123.43)))) if excludeSaanich: - qry1=qry.filter(not_(and_(StationTBL.Lat>48.47,StationTBL.Lat<48.67, - StationTBL.Lon>-123.6,StationTBL.Lon<-123.43))) - df1=pd.read_sql_query(qry1.statement, engine) + qry1 = qry.filter( + not_( + and_( + StationTBL.Lat > 48.47, + StationTBL.Lat < 48.67, + StationTBL.Lon > -123.6, + StationTBL.Lon < -123.43, + ) + ) + ) + df1 = pd.read_sql_query(qry1.statement, engine) else: - df1=pd.read_sql_query(qry.statement, engine) - df1['Z']=np.where(df1['Depth']>=0,df1['Depth'],-1.0*gsw.z_from_p(p=df1['Pressure'].values,lat=df1['Lat'].values)) - df1['dtUTC']=[dt.datetime(int(y),int(m),int(d))+dt.timedelta(hours=h) for ind, (y,m,d,h) in df1.loc[:,['Year','Month','Day','Hour']].iterrows()] + df1 = pd.read_sql_query(qry.statement, engine) + df1["Z"] = np.where( + df1["Depth"] >= 0, + df1["Depth"], + -1.0 * gsw.z_from_p(p=df1["Pressure"].values, lat=df1["Lat"].values), + ) + df1["dtUTC"] = [ + dt.datetime(int(y), int(m), int(d)) + dt.timedelta(hours=h) + for ind, (y, m, d, h) in df1.loc[:, ["Year", "Month", "Day", "Hour"]].iterrows() + ] session.close() engine.dispose() return df1 + def _lt0convert(arg): # convert text '<0' to numeric zero since nutrient concentrations cannot be negative - if arg=='<0': - val=0.0 + if arg == "<0": + val = 0.0 else: - val=pd.to_numeric(arg, errors='coerce',downcast=None) + val = pd.to_numeric(arg, errors="coerce", downcast=None) return float(val) -def loadPSF(datelims=(),loadChl=True,loadCTD=False): - """ load PSF data from spreadsheets, optionally loading matched T and S data from nearest CTD casts """ - dfs=list() - dfchls=list() - if len(datelims)<2: - datelims=(dt.datetime(2014,1,1),dt.datetime(2020,1,1)) + +def loadPSF(datelims=(), loadChl=True, loadCTD=False): + """load PSF data from spreadsheets, optionally loading matched T and S data from nearest CTD casts""" + dfs = list() + dfchls = list() + if len(datelims) < 2: + datelims = (dt.datetime(2014, 1, 1), dt.datetime(2020, 1, 1)) if loadCTD: - ctddfs=dict() - if datelims[0].year<2016: + ctddfs = dict() + if datelims[0].year < 2016: # load 2015 - f2015 = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx', - sheet_name = '2015 N+P+Si',dtype={'date (dd/mm/yyyy)':str},engine=excelEngine) - f2015=f2015.drop(f2015.loc[(f2015['lon']<-360)|(f2015['lon']>360)].index) - f2015 = f2015.dropna(subset = ['date (dd/mm/yyyy)', 'Time (Local)', 'lat', 'lon', 'depth'], how='any') - ds=f2015['date (dd/mm/yyyy)'].values - ts=f2015['Time (Local)'].values - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii,'%Y-%m-%d %H:%M:%S')+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second) - ).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)] - f2015['dtUTC']=dts - f2015.rename(columns={'lat':'Lat','lon':'Lon','depth':'Z','station':'Station','no23':'NO23','po4':'PO4','si':'Si'},inplace=True) - f2015.drop(['num','date (dd/mm/yyyy)','Time (Local)'],axis=1,inplace=True) - f2015_g=f2015.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False) - f2015_m=f2015_g.mean() - f2015=f2015_m.reindex() + f2015 = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx", + sheet_name="2015 N+P+Si", + dtype={"date (dd/mm/yyyy)": str}, + engine=excelEngine, + ) + f2015 = f2015.drop( + f2015.loc[(f2015["lon"] < -360) | (f2015["lon"] > 360)].index + ) + f2015 = f2015.dropna( + subset=["date (dd/mm/yyyy)", "Time (Local)", "lat", "lon", "depth"], + how="any", + ) + ds = f2015["date (dd/mm/yyyy)"].values + ts = f2015["Time (Local)"].values + dts = [ + pytz.timezone("Canada/Pacific") + .localize( + dt.datetime.strptime(ii, "%Y-%m-%d %H:%M:%S") + + dt.timedelta(hours=jj.hour, minutes=jj.minute, seconds=jj.second) + ) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + f2015["dtUTC"] = dts + f2015.rename( + columns={ + "lat": "Lat", + "lon": "Lon", + "depth": "Z", + "station": "Station", + "no23": "NO23", + "po4": "PO4", + "si": "Si", + }, + inplace=True, + ) + f2015.drop(["num", "date (dd/mm/yyyy)", "Time (Local)"], axis=1, inplace=True) + f2015_g = f2015.groupby(["Station", "Lat", "Lon", "dtUTC", "Z"], as_index=False) + f2015_m = f2015_g.mean() + f2015 = f2015_m.reindex() dfs.append(f2015) if loadChl: # load 2015 chl - Chl2015=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/Chla_2015PSFSalish_Sea_22.01.2018vers_8_CN_edits.csv',encoding='latin-1', - dtype={'Date sampled (mm/dd/yyyy)':str, 'Time of Day (Local)':str, - 'Latitude':str,'Longitude':str,'Chl a':float,'Phaeophytin':float,'Depth':float},parse_dates=False) - degminlat=[ii.split('ç') for ii in Chl2015['Latitude'].values] - Chl2015['Lat']=[float(ii[0])+float(ii[1])/60 for ii in degminlat] - degminlon=[ii.split('ç') for ii in Chl2015['Longitude'].values] - Chl2015['Lon']=[-1.0*(float(ii[0])+float(ii[1])/60) for ii in degminlon] - Chl2015 = Chl2015.dropna(subset = ['Date sampled (mm/dd/yyyy)', 'Time of Day (Local)', 'Lat', 'Lon', 'Depth'], how='any') - ds=Chl2015['Date sampled (mm/dd/yyyy)'] - ts=Chl2015['Time of Day (Local)'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii+'T'+jj,'%m/%d/%yT%I:%M:%S %p')).astimezone(pytz.utc).replace(tzinfo=None) - for ii,jj in zip(ds,ts)] - Chl2015['dtUTC']=dts - Chl2015['Z']=[float(ii) for ii in Chl2015['Depth']] - Chl2015.drop(['Date sampled (mm/dd/yyyy)','Time of Day (Local)','Latitude','Longitude','Depth'],axis=1,inplace=True) - Chl2015.rename(columns={'Chl a':'Chl','Phaeophytin':'Phaeo','Station Name':'Station'},inplace=True) - Chl2015_g=Chl2015.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False) - Chl2015_m=Chl2015_g.mean() - Chl2015=Chl2015_m.reindex() + Chl2015 = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/Chla_2015PSFSalish_Sea_22.01.2018vers_8_CN_edits.csv", + encoding="latin-1", + dtype={ + "Date sampled (mm/dd/yyyy)": str, + "Time of Day (Local)": str, + "Latitude": str, + "Longitude": str, + "Chl a": float, + "Phaeophytin": float, + "Depth": float, + }, + parse_dates=False, + ) + degminlat = [ii.split("ç") for ii in Chl2015["Latitude"].values] + Chl2015["Lat"] = [float(ii[0]) + float(ii[1]) / 60 for ii in degminlat] + degminlon = [ii.split("ç") for ii in Chl2015["Longitude"].values] + Chl2015["Lon"] = [ + -1.0 * (float(ii[0]) + float(ii[1]) / 60) for ii in degminlon + ] + Chl2015 = Chl2015.dropna( + subset=[ + "Date sampled (mm/dd/yyyy)", + "Time of Day (Local)", + "Lat", + "Lon", + "Depth", + ], + how="any", + ) + ds = Chl2015["Date sampled (mm/dd/yyyy)"] + ts = Chl2015["Time of Day (Local)"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize(dt.datetime.strptime(ii + "T" + jj, "%m/%d/%yT%I:%M:%S %p")) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + Chl2015["dtUTC"] = dts + Chl2015["Z"] = [float(ii) for ii in Chl2015["Depth"]] + Chl2015.drop( + [ + "Date sampled (mm/dd/yyyy)", + "Time of Day (Local)", + "Latitude", + "Longitude", + "Depth", + ], + axis=1, + inplace=True, + ) + Chl2015.rename( + columns={ + "Chl a": "Chl", + "Phaeophytin": "Phaeo", + "Station Name": "Station", + }, + inplace=True, + ) + Chl2015_g = Chl2015.groupby( + ["Station", "Lat", "Lon", "dtUTC", "Z"], as_index=False + ) + Chl2015_m = Chl2015_g.mean() + Chl2015 = Chl2015_m.reindex() dfchls.append(Chl2015) if loadCTD: - phys2015=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2015_20180621.csv',skiprows=lambda x: x in [0,1,2,3,4,6],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) - ctddfs[2015]=dict() - ctddfs[2015]['df']=phys2015 - ctddfs[2015]['dtlims']=(dt.datetime(2014,12,31),dt.datetime(2016,1,1)) - if (datelims[0].year<2017) and (datelims[1].year>2015): + phys2015 = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2015_20180621.csv", + skiprows=lambda x: x in [0, 1, 2, 3, 4, 6], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) + ctddfs[2015] = dict() + ctddfs[2015]["df"] = phys2015 + ctddfs[2015]["dtlims"] = ( + dt.datetime(2014, 12, 31), + dt.datetime(2016, 1, 1), + ) + if (datelims[0].year < 2017) and (datelims[1].year > 2015): # load 2016 - f2016N = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx', - sheet_name = '2016 N+P',dtype={'NO3+NO':str,'PO4':str},na_values=('nan','NaN','30..09'), - engine=excelEngine) + f2016N = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx", + sheet_name="2016 N+P", + dtype={"NO3+NO": str, "PO4": str}, + na_values=("nan", "NaN", "30..09"), + engine=excelEngine, + ) f2016N = f2016N.drop(f2016N.keys()[11:], axis=1) - f2016N['NO23']=[_lt0convert(ii) for ii in f2016N['NO3+NO']] - f2016N['PO4_2']=[_lt0convert(ii) for ii in f2016N['PO4']] - f2016N = f2016N.dropna(subset = ['Date (dd/mm/yyyy)', 'Time (Local)', 'Latitude', 'Longitude', 'Depth'], how='any') - ds=f2016N['Date (dd/mm/yyyy)'] - ts=f2016N['Time (Local)'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second) - ).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)] - f2016N['dtUTC']=dts - f2016N.drop(['Crew','Date (dd/mm/yyyy)','Time (Local)', 'Lat_reported', - 'Long_reported','PO4','NO3+NO'],axis=1,inplace=True) - f2016N.rename(columns={'PO4_2':'PO4','Latitude':'Lat','Longitude':'Lon','Depth':'Z'},inplace=True) - f2016N_g=f2016N.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False) - f2016N_m=f2016N_g.mean() - f2016Si = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx', - sheet_name = '2016 SiO2',engine=excelEngine) + f2016N["NO23"] = [_lt0convert(ii) for ii in f2016N["NO3+NO"]] + f2016N["PO4_2"] = [_lt0convert(ii) for ii in f2016N["PO4"]] + f2016N = f2016N.dropna( + subset=[ + "Date (dd/mm/yyyy)", + "Time (Local)", + "Latitude", + "Longitude", + "Depth", + ], + how="any", + ) + ds = f2016N["Date (dd/mm/yyyy)"] + ts = f2016N["Time (Local)"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize( + dt.datetime(ii.year, ii.month, ii.day) + + dt.timedelta(hours=jj.hour, minutes=jj.minute, seconds=jj.second) + ) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + f2016N["dtUTC"] = dts + f2016N.drop( + [ + "Crew", + "Date (dd/mm/yyyy)", + "Time (Local)", + "Lat_reported", + "Long_reported", + "PO4", + "NO3+NO", + ], + axis=1, + inplace=True, + ) + f2016N.rename( + columns={ + "PO4_2": "PO4", + "Latitude": "Lat", + "Longitude": "Lon", + "Depth": "Z", + }, + inplace=True, + ) + f2016N_g = f2016N.groupby( + ["Station", "Lat", "Lon", "dtUTC", "Z"], as_index=False + ) + f2016N_m = f2016N_g.mean() + f2016Si = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx", + sheet_name="2016 SiO2", + engine=excelEngine, + ) f2016Si = f2016Si.drop(f2016Si.keys()[9:], axis=1) - f2016Si = f2016Si.dropna(subset = ['DDMMYYYY', 'Time (Local)', 'Latitude', 'Longitude', 'Depth'], how='any') - ds=f2016Si['DDMMYYYY'] - ts=f2016Si['Time (Local)'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second) - ).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)] - f2016Si['dtUTC']=dts - z=[0 if (iii=='S') else float(iii) for iii in f2016Si['Depth'].values] - f2016Si['Z']=z - f2016Si.rename(columns={'Latitude':'Lat','Longitude':'Lon','SiO2 µM':'Si','Site ID':'Station'},inplace=True) - f2016Si.drop(['DDMMYYYY','Time (Local)', 'Lat_reported', - 'Long_reported','Depth'],axis=1,inplace=True) - f2016Si_g=f2016Si.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False) - f2016Si_m=f2016Si_g.mean() - f2016 = pd.merge(f2016N_m, f2016Si_m, how='outer', left_on=['Station','Lat','Lon','dtUTC','Z'], right_on = ['Station','Lat','Lon','dtUTC','Z']) + f2016Si = f2016Si.dropna( + subset=["DDMMYYYY", "Time (Local)", "Latitude", "Longitude", "Depth"], + how="any", + ) + ds = f2016Si["DDMMYYYY"] + ts = f2016Si["Time (Local)"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize( + dt.datetime(ii.year, ii.month, ii.day) + + dt.timedelta(hours=jj.hour, minutes=jj.minute, seconds=jj.second) + ) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + f2016Si["dtUTC"] = dts + z = [0 if (iii == "S") else float(iii) for iii in f2016Si["Depth"].values] + f2016Si["Z"] = z + f2016Si.rename( + columns={ + "Latitude": "Lat", + "Longitude": "Lon", + "SiO2 µM": "Si", + "Site ID": "Station", + }, + inplace=True, + ) + f2016Si.drop( + ["DDMMYYYY", "Time (Local)", "Lat_reported", "Long_reported", "Depth"], + axis=1, + inplace=True, + ) + f2016Si_g = f2016Si.groupby( + ["Station", "Lat", "Lon", "dtUTC", "Z"], as_index=False + ) + f2016Si_m = f2016Si_g.mean() + f2016 = pd.merge( + f2016N_m, + f2016Si_m, + how="outer", + left_on=["Station", "Lat", "Lon", "dtUTC", "Z"], + right_on=["Station", "Lat", "Lon", "dtUTC", "Z"], + ) dfs.append(f2016) if loadChl: # load 2016 chl - Chl2016Dat=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllChlData.csv')#,encoding='latin-1') - Chl2016Sta=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllStationData.csv') - Chl2016Sta.rename(columns={'DateCollected ':'DateCollected','Latitude':'Lat','Longitude':'Lon'},inplace=True) - Chl2016Sta.dropna(subset = ['DateCollected', 'TimeCollected', 'Lat','Lon', 'Depth_m'], how='any',inplace=True) + Chl2016Dat = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllChlData.csv" + ) # ,encoding='latin-1') + Chl2016Sta = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllStationData.csv" + ) + Chl2016Sta.rename( + columns={ + "DateCollected ": "DateCollected", + "Latitude": "Lat", + "Longitude": "Lon", + }, + inplace=True, + ) + Chl2016Sta.dropna( + subset=["DateCollected", "TimeCollected", "Lat", "Lon", "Depth_m"], + how="any", + inplace=True, + ) Chl2016Sta.drop_duplicates(inplace=True) - Chl2016Dat.drop(Chl2016Dat.loc[Chl2016Dat.quality_flag>3].index,axis=0,inplace=True) - Chl2016Dat.drop(['Chla_ugL','Phaeophytin_ugL','quality_flag','ShipBoat'],axis=1,inplace=True) - Chl2016Dat.rename(columns={'MeanChla_ugL':'Chl','MeanPhaeophytin_ugL':'Phaeo'},inplace=True) - Chl2016=pd.merge(Chl2016Sta,Chl2016Dat,how='inner', left_on=['DateCollected','Station','Depth_m'], right_on = ['DateCollected','Station','Depth_m']) - Chl2016['Z']=[float(ii) for ii in Chl2016['Depth_m']] - ds=Chl2016['DateCollected'] - ts=Chl2016['TimeCollected'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii+'T'+jj,'%m-%d-%YT%I:%M:%S %p')).astimezone(pytz.utc).replace(tzinfo=None) - for ii,jj in zip(ds,ts)] - Chl2016['dtUTC']=dts - Chl2016.drop(['DateCollected','TimeCollected','CV'],axis=1,inplace=True) + Chl2016Dat.drop( + Chl2016Dat.loc[Chl2016Dat.quality_flag > 3].index, axis=0, inplace=True + ) + Chl2016Dat.drop( + ["Chla_ugL", "Phaeophytin_ugL", "quality_flag", "ShipBoat"], + axis=1, + inplace=True, + ) + Chl2016Dat.rename( + columns={"MeanChla_ugL": "Chl", "MeanPhaeophytin_ugL": "Phaeo"}, + inplace=True, + ) + Chl2016 = pd.merge( + Chl2016Sta, + Chl2016Dat, + how="inner", + left_on=["DateCollected", "Station", "Depth_m"], + right_on=["DateCollected", "Station", "Depth_m"], + ) + Chl2016["Z"] = [float(ii) for ii in Chl2016["Depth_m"]] + ds = Chl2016["DateCollected"] + ts = Chl2016["TimeCollected"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize(dt.datetime.strptime(ii + "T" + jj, "%m-%d-%YT%I:%M:%S %p")) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + Chl2016["dtUTC"] = dts + Chl2016.drop(["DateCollected", "TimeCollected", "CV"], axis=1, inplace=True) dfchls.append(Chl2016) if loadCTD: - phys2016=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2016_20180621.csv',skiprows=lambda x: x in [0,1,2,3,4,5,6,7,9],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) - ctddfs[2016]=dict() - ctddfs[2016]['df']=phys2016 - ctddfs[2016]['dtlims']=(dt.datetime(2015,12,31),dt.datetime(2017,1,1)) - if (datelims[1].year>2016): + phys2016 = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2016_20180621.csv", + skiprows=lambda x: x in [0, 1, 2, 3, 4, 5, 6, 7, 9], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) + ctddfs[2016] = dict() + ctddfs[2016]["df"] = phys2016 + ctddfs[2016]["dtlims"] = ( + dt.datetime(2015, 12, 31), + dt.datetime(2017, 1, 1), + ) + if datelims[1].year > 2016: # load 2017 - f2017 = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx', - sheet_name = '2017 N+P+Si',skiprows=3,dtype={'Date (dd/mm/yyyy)':dt.date,'Time (Local)':dt.time, - 'NO3+NO':str,'PO4':str,'Si':str},engine=excelEngine) - f2017['NO23']=[_lt0convert(ii) for ii in f2017['NO3+NO']] - f2017['PO4_2']=[_lt0convert(ii) for ii in f2017['PO4']] - f2017['Si_2']=[_lt0convert(ii) for ii in f2017['Si']] - degminlat=[ii.split('°') for ii in f2017['Latitude'].values] - f2017['Lat']=[float(ii[0])+float(ii[1])/60 for ii in degminlat] - degminlon=[ii.split('°') for ii in f2017['Longitude'].values] - f2017['Lon']=[-1.0*(float(ii[0])+float(ii[1])/60) for ii in degminlon] - f2017 = f2017.dropna(subset = ['Date (dd/mm/yyyy)', 'Time (Local)', 'Lat', 'Lon', 'Depth'], how='any') - ds=f2017['Date (dd/mm/yyyy)'] - ts=f2017['Time (Local)'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second) - ).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)] - f2017['dtUTC']=dts - f2017.drop(['Crew','Date (dd/mm/yyyy)','Time (Local)','Comments','Latitude','Longitude','NO3+NO'],axis=1,inplace=True) - f2017.rename(columns={'Depth':'Z','PO4_2':'PO4','Si_2':'Si'},inplace=True) - f2017_g=f2017.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False) - f2017_m=f2017_g.mean() - f2017=f2017_m.reindex() + f2017 = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx", + sheet_name="2017 N+P+Si", + skiprows=3, + dtype={ + "Date (dd/mm/yyyy)": dt.date, + "Time (Local)": dt.time, + "NO3+NO": str, + "PO4": str, + "Si": str, + }, + engine=excelEngine, + ) + f2017["NO23"] = [_lt0convert(ii) for ii in f2017["NO3+NO"]] + f2017["PO4_2"] = [_lt0convert(ii) for ii in f2017["PO4"]] + f2017["Si_2"] = [_lt0convert(ii) for ii in f2017["Si"]] + degminlat = [ii.split("°") for ii in f2017["Latitude"].values] + f2017["Lat"] = [float(ii[0]) + float(ii[1]) / 60 for ii in degminlat] + degminlon = [ii.split("°") for ii in f2017["Longitude"].values] + f2017["Lon"] = [-1.0 * (float(ii[0]) + float(ii[1]) / 60) for ii in degminlon] + f2017 = f2017.dropna( + subset=["Date (dd/mm/yyyy)", "Time (Local)", "Lat", "Lon", "Depth"], + how="any", + ) + ds = f2017["Date (dd/mm/yyyy)"] + ts = f2017["Time (Local)"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize( + dt.datetime(ii.year, ii.month, ii.day) + + dt.timedelta(hours=jj.hour, minutes=jj.minute, seconds=jj.second) + ) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + f2017["dtUTC"] = dts + f2017.drop( + [ + "Crew", + "Date (dd/mm/yyyy)", + "Time (Local)", + "Comments", + "Latitude", + "Longitude", + "NO3+NO", + ], + axis=1, + inplace=True, + ) + f2017.rename(columns={"Depth": "Z", "PO4_2": "PO4", "Si_2": "Si"}, inplace=True) + f2017_g = f2017.groupby(["Station", "Lat", "Lon", "dtUTC", "Z"], as_index=False) + f2017_m = f2017_g.mean() + f2017 = f2017_m.reindex() dfs.append(f2017) if loadChl: # load 2017 chl - Chl2017=pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/PSF 2017 Chla_Data_Final_v-January 22-2018_CN_edits.xlsx', - sheet_name='avg-mean-cv%',skiprows=15,usecols=[1,3,4,5,7,9,11], - names=['Date','Station','Time','Z0','Chl','Qflag','Phaeo'],engine=excelEngine) - Chl2017.dropna(subset=['Station','Date','Time','Z0'],how='any',inplace=True) - Chl2017.dropna(subset=['Chl','Phaeo'],how='all',inplace=True) - Chl2017.drop(Chl2017.loc[Chl2017.Qflag>3].index,axis=0,inplace=True) - ds=Chl2017['Date'] - ts=Chl2017['Time'] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)).astimezone(pytz.utc).replace(tzinfo=None) - for ii,jj in zip(ds,ts)] - Chl2017['dtUTC']=dts - staMap2017=f2017.loc[:,['Station','Lat','Lon']].copy(deep=True) + Chl2017 = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/PSF 2017 Chla_Data_Final_v-January 22-2018_CN_edits.xlsx", + sheet_name="avg-mean-cv%", + skiprows=15, + usecols=[1, 3, 4, 5, 7, 9, 11], + names=["Date", "Station", "Time", "Z0", "Chl", "Qflag", "Phaeo"], + engine=excelEngine, + ) + Chl2017.dropna( + subset=["Station", "Date", "Time", "Z0"], how="any", inplace=True + ) + Chl2017.dropna(subset=["Chl", "Phaeo"], how="all", inplace=True) + Chl2017.drop(Chl2017.loc[Chl2017.Qflag > 3].index, axis=0, inplace=True) + ds = Chl2017["Date"] + ts = Chl2017["Time"] + dts = [ + pytz.timezone("Canada/Pacific") + .localize( + dt.datetime(ii.year, ii.month, ii.day) + + dt.timedelta(hours=jj.hour, minutes=jj.minute, seconds=jj.second) + ) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for ii, jj in zip(ds, ts) + ] + Chl2017["dtUTC"] = dts + staMap2017 = f2017.loc[:, ["Station", "Lat", "Lon"]].copy(deep=True) staMap2017.drop_duplicates(inplace=True) - Chl2017=pd.merge(Chl2017,staMap2017,how='inner', left_on=['Station'], right_on = ['Station']) - Chl2017['Z']=[float(ii) for ii in Chl2017['Z0']] - Chl2017.drop(['Qflag','Date','Z0','Time'],axis=1,inplace=True) + Chl2017 = pd.merge( + Chl2017, + staMap2017, + how="inner", + left_on=["Station"], + right_on=["Station"], + ) + Chl2017["Z"] = [float(ii) for ii in Chl2017["Z0"]] + Chl2017.drop(["Qflag", "Date", "Z0", "Time"], axis=1, inplace=True) dfchls.append(Chl2017) if loadCTD: - phys2017=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2017_20180621.csv',skiprows=lambda x: x in [0,1,2,3,4,5,7],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) - ctddfs[2017]=dict() - ctddfs[2017]['df']=phys2017 - ctddfs[2017]['dtlims']=(dt.datetime(2016,12,31),dt.datetime(2018,1,1)) - if len(dfs)>1: - df=pd.concat(dfs,ignore_index=True,sort=True) + phys2017 = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2017_20180621.csv", + skiprows=lambda x: x in [0, 1, 2, 3, 4, 5, 7], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) + ctddfs[2017] = dict() + ctddfs[2017]["df"] = phys2017 + ctddfs[2017]["dtlims"] = ( + dt.datetime(2016, 12, 31), + dt.datetime(2018, 1, 1), + ) + if len(dfs) > 1: + df = pd.concat(dfs, ignore_index=True, sort=True) if loadChl: - dfChl=pd.concat(dfchls, ignore_index=True,sort=True) + dfChl = pd.concat(dfchls, ignore_index=True, sort=True) else: - df=dfs[0] + df = dfs[0] if loadChl: - dfChl=dfchls[0] + dfChl = dfchls[0] if loadChl: - df_a=pd.merge(df, dfChl, how='outer', left_on=['Station','Lat','Lon','dtUTC','Z'], right_on = ['Station','Lat','Lon','dtUTC','Z']) - df=df_a + df_a = pd.merge( + df, + dfChl, + how="outer", + left_on=["Station", "Lat", "Lon", "dtUTC", "Z"], + right_on=["Station", "Lat", "Lon", "dtUTC", "Z"], + ) + df = df_a # set surface sample to more likely value of 0.55 m to aid matching with CTD data # extra 0.01 is to make np.round round up to 1 so that CTD data can match - df.loc[df.Z==0,['Z']]=0.51 - df=df.loc[(df.dtUTC>=datelims[0])&(df.dtUTC= datelims[0]) & (df.dtUTC < datelims[1])].copy(deep=True) if loadCTD: - df1=df.copy(deep=True) + df1 = df.copy(deep=True) for ik in ctddfs.keys(): - idf=ctddfs[ik]['df'] - dtsP=[dt.datetime.strptime(ii,'%d/%m/%Y %H:%M:%S') for ii in idf['datetime'].values] - tPacP=utc_to_pac(dtsP) - PacDayP=[dt.datetime(ii.year,ii.month,ii.day) for ii in tPacP] - idf['tPacPhys']=tPacP - idf['PacDay']=PacDayP - idf['dtUTCPhys']=dtsP - idf['StaTrim']=[ii.replace('-','') for ii in idf['station']] - idf['HrsPast']=[(ii-dt.datetime(2014,1,1)).total_seconds()/3600 for ii in dtsP] - idf['CT']=[gsw.CT_from_t(SA,t,p) for SA, t, p, in zip(idf['salinity'],idf['temperature'],idf['pressure'])] - tPac=utc_to_pac(df['dtUTC']) - PacDay=[dt.datetime(ii.year,ii.month,ii.day) for ii in tPac] - df1['tPac']=tPac - df1['PacDay']=PacDay - df1['StaTrim']=[ii.replace('-','') for ii in df['Station']] - df1['HrsPast']=[(ii-dt.datetime(2014,1,1)).total_seconds()/3600 for ii in df['dtUTC']] - df['SA']=np.nan - df['CT']=np.nan - df['pLat']=np.nan - df['pLon']=np.nan - df['tdiffH']=np.nan + idf = ctddfs[ik]["df"] + dtsP = [ + dt.datetime.strptime(ii, "%d/%m/%Y %H:%M:%S") + for ii in idf["datetime"].values + ] + tPacP = utc_to_pac(dtsP) + PacDayP = [dt.datetime(ii.year, ii.month, ii.day) for ii in tPacP] + idf["tPacPhys"] = tPacP + idf["PacDay"] = PacDayP + idf["dtUTCPhys"] = dtsP + idf["StaTrim"] = [ii.replace("-", "") for ii in idf["station"]] + idf["HrsPast"] = [ + (ii - dt.datetime(2014, 1, 1)).total_seconds() / 3600 for ii in dtsP + ] + idf["CT"] = [ + gsw.CT_from_t(SA, t, p) + for SA, t, p, in zip( + idf["salinity"], idf["temperature"], idf["pressure"] + ) + ] + tPac = utc_to_pac(df["dtUTC"]) + PacDay = [dt.datetime(ii.year, ii.month, ii.day) for ii in tPac] + df1["tPac"] = tPac + df1["PacDay"] = PacDay + df1["StaTrim"] = [ii.replace("-", "") for ii in df["Station"]] + df1["HrsPast"] = [ + (ii - dt.datetime(2014, 1, 1)).total_seconds() / 3600 for ii in df["dtUTC"] + ] + df["SA"] = np.nan + df["CT"] = np.nan + df["pLat"] = np.nan + df["pLon"] = np.nan + df["tdiffH"] = np.nan for ik in ctddfs.keys(): - jdf=ctddfs[ik]['df'] - dtlims=ctddfs[ik]['dtlims'] - for i, row in df1.loc[(df1['dtUTC']>dtlims[0])&(df1['dtUTC']0: + jdf = ctddfs[ik]["df"] + dtlims = ctddfs[ik]["dtlims"] + for i, row in df1.loc[ + (df1["dtUTC"] > dtlims[0]) & (df1["dtUTC"] < dtlims[1]) + ].iterrows(): + idf = jdf.loc[ + (jdf.StaTrim == row["StaTrim"]) + & (jdf.depth == np.round(row["Z"])) + & (np.abs(jdf.HrsPast - row["HrsPast"]) < 1.0) + & (np.abs(jdf.latitude - row["Lat"]) < 0.05) + & (np.abs(jdf.longitude - row["Lon"]) < 0.05) + ] + if len(idf) > 0: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) - sal=np.nanmean(idf['salinity'].values) - tem=np.nanmean(idf['CT'].values) - lat=np.nanmean(idf['latitude'].values) - lon=np.nanmean(idf['longitude'].values) - tdelta=np.nanmean([np.abs((ii-row['dtUTC']).total_seconds()/3600) for ii in idf['dtUTCPhys']]) - df.at[i,'SA']=sal - df.at[i,'CT']=tem - df.at[i,'pLat']=lat - df.at[i,'pLon']=lon - df.at[i,'tdiffH']=tdelta + sal = np.nanmean(idf["salinity"].values) + tem = np.nanmean(idf["CT"].values) + lat = np.nanmean(idf["latitude"].values) + lon = np.nanmean(idf["longitude"].values) + tdelta = np.nanmean( + [ + np.abs((ii - row["dtUTC"]).total_seconds() / 3600) + for ii in idf["dtUTCPhys"] + ] + ) + df.at[i, "SA"] = sal + df.at[i, "CT"] = tem + df.at[i, "pLat"] = lat + df.at[i, "pLon"] = lon + df.at[i, "tdiffH"] = tdelta return df -def loadPSFCTD(datelims=(),pathbase='/ocean/eolson/MEOPAR/obs/PSFCitSci/phys'): - """ load PSF CTD data only """ - if len(datelims)<2: - datelims=(dt.datetime(2014,1,1),dt.datetime(2020,1,1)) - ctddfs=list() - if datelims[0].year<2016: + +def loadPSFCTD(datelims=(), pathbase="/ocean/eolson/MEOPAR/obs/PSFCitSci/phys"): + """load PSF CTD data only""" + if len(datelims) < 2: + datelims = (dt.datetime(2014, 1, 1), dt.datetime(2020, 1, 1)) + ctddfs = list() + if datelims[0].year < 2016: # load 2015 - phys2015=pd.read_csv(os.path.join(pathbase,'CitSci2015_20180621.csv'),skiprows=lambda x: x in [0,1,2,3,4,6],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) + phys2015 = pd.read_csv( + os.path.join(pathbase, "CitSci2015_20180621.csv"), + skiprows=lambda x: x in [0, 1, 2, 3, 4, 6], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) ctddfs.append(phys2015) - print(np.min(phys2015['salinity']),np.max(phys2015['salinity'])) - if (datelims[0].year<2017) and (datelims[1].year>2015): + print(np.min(phys2015["salinity"]), np.max(phys2015["salinity"])) + if (datelims[0].year < 2017) and (datelims[1].year > 2015): # load 2016 - phys2016=pd.read_csv(os.path.join(pathbase,'CitSci2016_20180621.csv'),skiprows=lambda x: x in [0,1,2,3,4,5,6,7,9],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) + phys2016 = pd.read_csv( + os.path.join(pathbase, "CitSci2016_20180621.csv"), + skiprows=lambda x: x in [0, 1, 2, 3, 4, 5, 6, 7, 9], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) ctddfs.append(phys2016) - print(np.min(phys2016['salinity']),np.max(phys2016['salinity'])) - if (datelims[1].year>2016): + print(np.min(phys2016["salinity"]), np.max(phys2016["salinity"])) + if datelims[1].year > 2016: # load 2017 - phys2017=pd.read_csv(os.path.join(pathbase,'CitSci2017_20180621.csv'),skiprows=lambda x: x in [0,1,2,3,4,5,7],delimiter=',', - dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float}, - converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x), - 'conductivity': lambda x: float(x),'salinity': lambda x: float(x), - 'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)}) + phys2017 = pd.read_csv( + os.path.join(pathbase, "CitSci2017_20180621.csv"), + skiprows=lambda x: x in [0, 1, 2, 3, 4, 5, 7], + delimiter=",", + dtype={ + "Patrol": str, + "ID": str, + "station": str, + "datetime": str, + "latitude": float, + "longitude": float, + }, + converters={ + "pressure": lambda x: float(x), + "depth": lambda x: float(x), + "temperature": lambda x: float(x), + "conductivity": lambda x: float(x), + "salinity": lambda x: float(x), + "o2SAT": lambda x: float(x), + "o2uM": lambda x: float(x), + "chl": lambda x: float(x), + }, + ) ctddfs.append(phys2017) - print(np.min(phys2017['salinity']),np.max(phys2017['salinity'])) + print(np.min(phys2017["salinity"]), np.max(phys2017["salinity"])) # note: csv files report salnity in TEOS-10 g/kg - df=pd.concat(ctddfs,ignore_index=True,sort=True) - df['dtUTC']=[dt.datetime.strptime(ii,'%d/%m/%Y %H:%M:%S') for ii in df['datetime'].values] - df['CT']=[gsw.CT_from_t(SA,t,p) for SA, t, p, in zip(df['salinity'],df['temperature'],df['pressure'])] - df.rename(columns={'salinity':'SA','latitude':'Lat','longitude':'Lon','depth':'Z'},inplace=True) + df = pd.concat(ctddfs, ignore_index=True, sort=True) + df["dtUTC"] = [ + dt.datetime.strptime(ii, "%d/%m/%Y %H:%M:%S") for ii in df["datetime"].values + ] + df["CT"] = [ + gsw.CT_from_t(SA, t, p) + for SA, t, p, in zip(df["salinity"], df["temperature"], df["pressure"]) + ] + df.rename( + columns={"salinity": "SA", "latitude": "Lat", "longitude": "Lon", "depth": "Z"}, + inplace=True, + ) return df -def loadHakai(datelims=(),loadCTD=False): - """ load data from Hakai sampling program from spreadsheets""" - if len(datelims)<2: - datelims=(dt.datetime(1900,1,1),dt.datetime(2100,1,1)) - start_date=datelims[0] - end_date=datelims[1] - - f0 = pd.read_excel('/ocean/eolson/MEOPAR/obs/Hakai/Dosser20180911/2018-09-11_144804_HakaiData_nutrients.xlsx', - sheet_name = 'Hakai Data',engine=excelEngine) - f0.drop(['ACTION','Lat', 'Long', 'Collection Method', 'Installed', 'Lab Technician', 'NH4+', 'NO2+NO3 (ug/L)', - 'no2_no3_units', 'TP', 'TDP', 'TN', 'TDN', 'SRP', 'Project Specific ID', 'Hakai ID', 'Source', - 'po4pfilt', 'no3nfilt', 'po4punfl', 'no3nunfl', 'nh4nunfl', 'NH4+ Flag', - 'TP FLag', 'TDP FLag', 'TN Flag', 'TDN FLag','Volume (ml)', - 'SRP Flag', 'PO4 Flag', 'po4pfilt_flag', 'no3nfilt_flag','Preserved', 'Analyzed', - 'po4punfl_flag', 'no3nunfl_flag', 'nh4nunfl_flag', 'Analyzing Lab', 'Sample Status', - 'Quality Level', 'Comments', 'Quality Log'], axis = 1, inplace = True) - dts0=[pytz.timezone('Canada/Pacific').localize(i).astimezone(pytz.utc).replace(tzinfo=None) - for i in f0['Collected']] - f0['dtUTC']=dts0 - - fc = pd.read_csv('/ocean/eolson/MEOPAR/obs/Hakai/Dosser20180911/ctd-bulk-1536702711696.csv', - usecols=['Cast PK','Cruise','Station', 'Drop number','Start time', 'Bottom time', - 'Latitude', 'Longitude', 'Depth (m)', 'Temperature (deg C)', 'Temperature flag', 'Pressure (dbar)', - 'Pressure flag', 'PAR', 'PAR flag', 'Fluorometry Chlorophyll (ug/L)', 'Fluorometry Chlorophyll flag', - 'Turbidity (FTU)', 'Turbidity flag', - 'Salinity (PSU)', 'Salinity flag'], - dtype={'Drop number':np.float64,'PAR flag':str,'Fluorometry Chlorophyll flag':str},na_values=('null','-9.99e-29')) + +def loadHakai(datelims=(), loadCTD=False): + """load data from Hakai sampling program from spreadsheets""" + if len(datelims) < 2: + datelims = (dt.datetime(1900, 1, 1), dt.datetime(2100, 1, 1)) + start_date = datelims[0] + end_date = datelims[1] + + f0 = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/Hakai/Dosser20180911/2018-09-11_144804_HakaiData_nutrients.xlsx", + sheet_name="Hakai Data", + engine=excelEngine, + ) + f0.drop( + [ + "ACTION", + "Lat", + "Long", + "Collection Method", + "Installed", + "Lab Technician", + "NH4+", + "NO2+NO3 (ug/L)", + "no2_no3_units", + "TP", + "TDP", + "TN", + "TDN", + "SRP", + "Project Specific ID", + "Hakai ID", + "Source", + "po4pfilt", + "no3nfilt", + "po4punfl", + "no3nunfl", + "nh4nunfl", + "NH4+ Flag", + "TP FLag", + "TDP FLag", + "TN Flag", + "TDN FLag", + "Volume (ml)", + "SRP Flag", + "PO4 Flag", + "po4pfilt_flag", + "no3nfilt_flag", + "Preserved", + "Analyzed", + "po4punfl_flag", + "no3nunfl_flag", + "nh4nunfl_flag", + "Analyzing Lab", + "Sample Status", + "Quality Level", + "Comments", + "Quality Log", + ], + axis=1, + inplace=True, + ) + dts0 = [ + pytz.timezone("Canada/Pacific") + .localize(i) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for i in f0["Collected"] + ] + f0["dtUTC"] = dts0 + + fc = pd.read_csv( + "/ocean/eolson/MEOPAR/obs/Hakai/Dosser20180911/ctd-bulk-1536702711696.csv", + usecols=[ + "Cast PK", + "Cruise", + "Station", + "Drop number", + "Start time", + "Bottom time", + "Latitude", + "Longitude", + "Depth (m)", + "Temperature (deg C)", + "Temperature flag", + "Pressure (dbar)", + "Pressure flag", + "PAR", + "PAR flag", + "Fluorometry Chlorophyll (ug/L)", + "Fluorometry Chlorophyll flag", + "Turbidity (FTU)", + "Turbidity flag", + "Salinity (PSU)", + "Salinity flag", + ], + dtype={ + "Drop number": np.float64, + "PAR flag": str, + "Fluorometry Chlorophyll flag": str, + }, + na_values=("null", "-9.99e-29"), + ) ## fix apparent typos: # reversed lats and lons - iii=fc['Latitude']>90 - lons=-1*fc.loc[iii,'Latitude'].values - lats=-1*fc.loc[iii,'Longitude'].values - fc.loc[iii,'Longitude']=lons - fc.loc[iii,'Latitude']=lats + iii = fc["Latitude"] > 90 + lons = -1 * fc.loc[iii, "Latitude"].values + lats = -1 * fc.loc[iii, "Longitude"].values + fc.loc[iii, "Longitude"] = lons + fc.loc[iii, "Latitude"] = lats # remove data with missing lats and lons - nans=fc.loc[(fc['Latitude'].isnull())|(fc['Longitude'].isnull())] - fc=fc.drop(nans.index) + nans = fc.loc[(fc["Latitude"].isnull()) | (fc["Longitude"].isnull())] + fc = fc.drop(nans.index) # apparently bad lats/lons - QU16bad=fc.loc[(fc['Station']=='QU16')&(fc['Latitude']>50.3)] - fc=fc.drop(QU16bad.index) - QU36bad=fc.loc[(fc['Station']=='QU36')&(fc['Latitude']>50.2)] - fc=fc.drop(QU36bad.index) - QU37bad=fc.loc[(fc['Station']=='QU37')&(fc['Longitude']<-125.1)] - fc=fc.drop(QU37bad.index) - QU38bad=fc.loc[(fc['Station']=='QU38')&(fc['Longitude']>-125.2)] - fc=fc.drop(QU38bad.index) - QU5bad=fc.loc[(fc['Station']=='QU5')&(fc['Longitude']>-125.18)] - fc=fc.drop(QU5bad.index) + QU16bad = fc.loc[(fc["Station"] == "QU16") & (fc["Latitude"] > 50.3)] + fc = fc.drop(QU16bad.index) + QU36bad = fc.loc[(fc["Station"] == "QU36") & (fc["Latitude"] > 50.2)] + fc = fc.drop(QU36bad.index) + QU37bad = fc.loc[(fc["Station"] == "QU37") & (fc["Longitude"] < -125.1)] + fc = fc.drop(QU37bad.index) + QU38bad = fc.loc[(fc["Station"] == "QU38") & (fc["Longitude"] > -125.2)] + fc = fc.drop(QU38bad.index) + QU5bad = fc.loc[(fc["Station"] == "QU5") & (fc["Longitude"] > -125.18)] + fc = fc.drop(QU5bad.index) # remove data with suspicious 0 temperature and salinity - iind=(fc['Temperature (deg C)']==0)&(fc['Salinity (PSU)']==0) - fc.loc[iind,['Temperature (deg C)', 'Pressure (dbar)', 'PAR', 'Fluorometry Chlorophyll (ug/L)', 'Turbidity (FTU)', 'Salinity (PSU)']]=np.nan - - fc['dt']=[dt.datetime.strptime(i.split('.')[0],'%Y-%m-%d %H:%M:%S') for i in fc['Start time']] - dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(i.split('.')[0],'%Y-%m-%d %H:%M:%S')).astimezone(pytz.utc).replace(tzinfo=None) - for i in fc['Start time']] - fc['dtUTC']=dts - - dloc=[dt.datetime(i.year,i.month,i.day) for i in fc['dt']] - fc['dloc']=dloc - - fcS=fc.loc[:,['Latitude','Longitude']].groupby([fc['Station'],fc['dloc']]).mean().reset_index() - - f0['Station']=f0['Site ID'] - #f0['dt']=[dt.datetime.strptime(i,'%Y-%m-%d %H:%M:%S') for i in f0['Collected']] - dloc0=[dt.datetime(i.year,i.month,i.day) for i in f0['Collected']] - f0['dloc']=dloc0 - - fdata=f0.merge(fcS,how='left') - - fdata['Lat']=fdata['Latitude'] - fdata['Lon']=fdata['Longitude'] - fdata['Z']=fdata['Line Out Depth'] - - fdata['SA']=np.nan - fdata['CT']=np.nan - fdata['pZ']=np.nan - df2=fdata.copy(deep=True) - - zthresh=1.5 - print("Note: CTD depths (pZ) may vary from bottle depths (Z) by up to ",str(zthresh)," m.") + iind = (fc["Temperature (deg C)"] == 0) & (fc["Salinity (PSU)"] == 0) + fc.loc[ + iind, + [ + "Temperature (deg C)", + "Pressure (dbar)", + "PAR", + "Fluorometry Chlorophyll (ug/L)", + "Turbidity (FTU)", + "Salinity (PSU)", + ], + ] = np.nan + + fc["dt"] = [ + dt.datetime.strptime(i.split(".")[0], "%Y-%m-%d %H:%M:%S") + for i in fc["Start time"] + ] + dts = [ + pytz.timezone("Canada/Pacific") + .localize(dt.datetime.strptime(i.split(".")[0], "%Y-%m-%d %H:%M:%S")) + .astimezone(pytz.utc) + .replace(tzinfo=None) + for i in fc["Start time"] + ] + fc["dtUTC"] = dts + + dloc = [dt.datetime(i.year, i.month, i.day) for i in fc["dt"]] + fc["dloc"] = dloc + + fcS = ( + fc.loc[:, ["Latitude", "Longitude"]] + .groupby([fc["Station"], fc["dloc"]]) + .mean() + .reset_index() + ) + + f0["Station"] = f0["Site ID"] + # f0['dt']=[dt.datetime.strptime(i,'%Y-%m-%d %H:%M:%S') for i in f0['Collected']] + dloc0 = [dt.datetime(i.year, i.month, i.day) for i in f0["Collected"]] + f0["dloc"] = dloc0 + + fdata = f0.merge(fcS, how="left") + + fdata["Lat"] = fdata["Latitude"] + fdata["Lon"] = fdata["Longitude"] + fdata["Z"] = fdata["Line Out Depth"] + + fdata["SA"] = np.nan + fdata["CT"] = np.nan + fdata["pZ"] = np.nan + df2 = fdata.copy(deep=True) + + zthresh = 1.5 + print( + "Note: CTD depths (pZ) may vary from bottle depths (Z) by up to ", + str(zthresh), + " m.", + ) for i, row in df2.iterrows(): - idf=fc.loc[(fc.Station==row['Station'])&(fc.dloc==row['dloc'])&\ - ((np.abs(fc['Depth (m)']-row['Pressure Transducer Depth (m)'])0: - zrow=row['Pressure Transducer Depth (m)'] if ~np.isnan(row['Pressure Transducer Depth (m)']) else row['Line Out Depth'] - zdifmin=np.min([np.abs(ii-zrow) for ii in idf['Depth (m)']]) + idf = fc.loc[ + (fc.Station == row["Station"]) + & (fc.dloc == row["dloc"]) + & ( + ( + np.abs(fc["Depth (m)"] - row["Pressure Transducer Depth (m)"]) + < zthresh + ) + | (np.abs(fc["Depth (m)"] - row["Line Out Depth"]) < zthresh) + ) + ] + if len(idf) > 0: + zrow = ( + row["Pressure Transducer Depth (m)"] + if ~np.isnan(row["Pressure Transducer Depth (m)"]) + else row["Line Out Depth"] + ) + zdifmin = np.min([np.abs(ii - zrow) for ii in idf["Depth (m)"]]) # if there are multiple minimum distance rows, just take the first - idfZ=idf.loc[np.abs(idf['Depth (m)']-zrow)==zdifmin] - isna = (not np.isnan(idfZ['Salinity (PSU)'].values[0])) and (not np.isnan(idfZ['Pressure (dbar)'].values[0])) and \ - (not np.isnan(idfZ['Longitude'].values[0])) and (not np.isnan(idfZ['Latitude'].values[0])) - isnat = (not np.isnan(idfZ['Temperature (deg C)'].values[0])) - sal=gsw.SA_from_SP(idfZ['Salinity (PSU)'].values[0],idfZ['Pressure (dbar)'].values[0],idfZ['Longitude'].values[0], - idfZ['Latitude'].values[0]) if isna else np.nan - tem=gsw.CT_from_t(sal,idfZ['Temperature (deg C)'].values[0],idfZ['Pressure (dbar)'].values[0]) if (isna and isnat) else np.nan - fdata.at[i,'SA']=sal - fdata.at[i,'CT']=tem - fdata.at[i,'pZ']=idfZ['Depth (m)'].values[0] - - fdata2=fdata.loc[(fdata['dtUTC']>=start_date)&(fdata['dtUTC']=0)&(fdata['Z']<440)&(fdata['Lon']<360)&(fdata['Lat']<=90)].copy(deep=True).reset_index() - fdata2.drop(['no','event_pk','Date','Sampling Bout','Latitude','Longitude','index','Gather Lat','Gather Long', 'Pressure Transducer Depth (m)', - 'Filter Type','dloc','Collected','Line Out Depth','Replicate Number','Work Area','Survey','Site ID','NO2+NO3 Flag','SiO2 Flag'],axis=1,inplace=True) + idfZ = idf.loc[np.abs(idf["Depth (m)"] - zrow) == zdifmin] + isna = ( + (not np.isnan(idfZ["Salinity (PSU)"].values[0])) + and (not np.isnan(idfZ["Pressure (dbar)"].values[0])) + and (not np.isnan(idfZ["Longitude"].values[0])) + and (not np.isnan(idfZ["Latitude"].values[0])) + ) + isnat = not np.isnan(idfZ["Temperature (deg C)"].values[0]) + sal = ( + gsw.SA_from_SP( + idfZ["Salinity (PSU)"].values[0], + idfZ["Pressure (dbar)"].values[0], + idfZ["Longitude"].values[0], + idfZ["Latitude"].values[0], + ) + if isna + else np.nan + ) + tem = ( + gsw.CT_from_t( + sal, + idfZ["Temperature (deg C)"].values[0], + idfZ["Pressure (dbar)"].values[0], + ) + if (isna and isnat) + else np.nan + ) + fdata.at[i, "SA"] = sal + fdata.at[i, "CT"] = tem + fdata.at[i, "pZ"] = idfZ["Depth (m)"].values[0] + + fdata2 = ( + fdata.loc[ + (fdata["dtUTC"] >= start_date) + & (fdata["dtUTC"] < end_date) + & (fdata["Z"] >= 0) + & (fdata["Z"] < 440) + & (fdata["Lon"] < 360) + & (fdata["Lat"] <= 90) + ] + .copy(deep=True) + .reset_index() + ) + fdata2.drop( + [ + "no", + "event_pk", + "Date", + "Sampling Bout", + "Latitude", + "Longitude", + "index", + "Gather Lat", + "Gather Long", + "Pressure Transducer Depth (m)", + "Filter Type", + "dloc", + "Collected", + "Line Out Depth", + "Replicate Number", + "Work Area", + "Survey", + "Site ID", + "NO2+NO3 Flag", + "SiO2 Flag", + ], + axis=1, + inplace=True, + ) return fdata2 def load_ferry_ERDDAP(datelims, variables=None): - """ load ferry data from ERDDAP, return a pandas dataframe. Do conversion on temperature to + """load ferry data from ERDDAP, return a pandas dataframe. Do conversion on temperature to conservative temperature, oxygen to uMol and rename grid i and grid j columns :arg datelims: start date and end date; as a 2-tuple of datetimes @@ -1350,17 +2380,17 @@ def load_ferry_ERDDAP(datelims, variables=None): "o2_concentration_corrected", "time", "nemo_grid_j", - "nemo_grid_i" + "nemo_grid_i", ] - start_date = datelims[0].strftime('%Y-%m-%dT00:00:00Z') - end_date = datelims[1].strftime('%Y-%m-%dT00:00:00Z') + start_date = datelims[0].strftime("%Y-%m-%dT00:00:00Z") + end_date = datelims[1].strftime("%Y-%m-%dT00:00:00Z") constraints = { - "time>=": start_date, - "time<=": end_date, - "nemo_grid_j>=": 0, - "on_crossing_mask=": 1, + "time>=": start_date, + "time<=": end_date, + "nemo_grid_j>=": 0, + "on_crossing_mask=": 1, } obs = ERDDAP(server=server, protocol=protocol) @@ -1368,20 +2398,32 @@ def load_ferry_ERDDAP(datelims, variables=None): obs.variables = variables obs.constraints = constraints - obs_pd = obs.to_pandas(index_col="time (UTC)", parse_dates=True,).dropna() + obs_pd = obs.to_pandas( + index_col="time (UTC)", + parse_dates=True, + ).dropna() - obs_pd['oxygen (uM)'] = 44.661 * obs_pd['o2_concentration_corrected (ml/l)'] - obs_pd['conservative temperature (oC)'] = gsw.CT_from_pt(obs_pd['salinity (g/kg)'], obs_pd['temperature (degrees_Celcius)'] ) - obs_pd['dtUTC'] = obs_pd.index.tz_localize(None) + obs_pd["oxygen (uM)"] = 44.661 * obs_pd["o2_concentration_corrected (ml/l)"] + obs_pd["conservative temperature (oC)"] = gsw.CT_from_pt( + obs_pd["salinity (g/kg)"], obs_pd["temperature (degrees_Celcius)"] + ) + obs_pd["dtUTC"] = obs_pd.index.tz_localize(None) obs_pd.reset_index(inplace=True) - obs_pd.rename(columns={"latitude (degrees_north)": "Lat", "longitude (degrees_east)": "Lon", - "nemo_grid_j (count)": "j", "nemo_grid_i (count)": "i"}, inplace=True) + obs_pd.rename( + columns={ + "latitude (degrees_north)": "Lat", + "longitude (degrees_east)": "Lon", + "nemo_grid_j (count)": "j", + "nemo_grid_i (count)": "i", + }, + inplace=True, + ) return obs_pd def load_ONC_node_ERDDAP(datelims, variables=None): - """ load ONC data from the nodes from ERDDAP, return a pandas dataframe. Do conversion on temperature to + """load ONC data from the nodes from ERDDAP, return a pandas dataframe. Do conversion on temperature to conservative temperature. Pull out grid i, grid j and depth from places :arg datelims: start date and end date; as a 2-tuple of datetimes @@ -1400,7 +2442,12 @@ def load_ONC_node_ERDDAP(datelims, variables=None): server = "https://salishsea.eos.ubc.ca/erddap" protocol = "tabledap" - dataset_ids = ["ubcONCSCVIPCTD15mV1", "ubcONCSEVIPCTD15mV1", "ubcONCLSBBLCTD15mV1", "ubcONCUSDDLCTD15mV1"] + dataset_ids = [ + "ubcONCSCVIPCTD15mV1", + "ubcONCSEVIPCTD15mV1", + "ubcONCLSBBLCTD15mV1", + "ubcONCUSDDLCTD15mV1", + ] nodes = ["Central node", "Delta BBL node", "Delta DDL node", "East node"] if variables == None: @@ -1413,18 +2460,18 @@ def load_ONC_node_ERDDAP(datelims, variables=None): "depth", ] - start_date = datelims[0].strftime('%Y-%m-%dT00:00:00Z') - end_date = datelims[1].strftime('%Y-%m-%dT00:00:00Z') + start_date = datelims[0].strftime("%Y-%m-%dT00:00:00Z") + end_date = datelims[1].strftime("%Y-%m-%dT00:00:00Z") constraints = { - "time>=": start_date, - "time<=": end_date, + "time>=": start_date, + "time<=": end_date, } obs_tot = [] for inode, (dataset_id, node) in enumerate(zip(dataset_ids, nodes)): - print (node, start_date, end_date) + print(node, start_date, end_date) obs = ERDDAP(server=server, protocol=protocol) obs.dataset_id = dataset_id @@ -1432,483 +2479,739 @@ def load_ONC_node_ERDDAP(datelims, variables=None): obs.constraints = constraints try: - obs_pd = obs.to_pandas(index_col="time (UTC)", parse_dates=True,).dropna() + obs_pd = obs.to_pandas( + index_col="time (UTC)", + parse_dates=True, + ).dropna() except Exception as error: - print (error) - print ('Assuming no data') - columns = ["dtUTC", "conservative temperature (oC)", "salinity (g/kg)", "latitude (degrees_north)", "longitude (degrees_east)"] + print(error) + print("Assuming no data") + columns = [ + "dtUTC", + "conservative temperature (oC)", + "salinity (g/kg)", + "latitude (degrees_north)", + "longitude (degrees_east)", + ] obs_pd = pd.DataFrame(columns=columns) else: - obs_pd['conservative temperature (oC)'] = gsw.CT_from_pt(obs_pd['salinity (g/kg)'], obs_pd['temperature (degrees_Celcius)'] ) - obs_pd['dtUTC'] = obs_pd.index.tz_localize(None) + obs_pd["conservative temperature (oC)"] = gsw.CT_from_pt( + obs_pd["salinity (g/kg)"], obs_pd["temperature (degrees_Celcius)"] + ) + obs_pd["dtUTC"] = obs_pd.index.tz_localize(None) obs_pd.reset_index(inplace=True) - obs_pd.rename(columns={"latitude (degrees_north)": "Lat", "longitude (degrees_east)": "Lon"}, inplace=True) - (obs_pd['j'], obs_pd['i']) = places.PLACES[node]['NEMO grid ji'] - obs_pd['k'] = places.PLACES[node]['NEMO grid k'] + obs_pd.rename( + columns={ + "latitude (degrees_north)": "Lat", + "longitude (degrees_east)": "Lon", + }, + inplace=True, + ) + (obs_pd["j"], obs_pd["i"]) = places.PLACES[node]["NEMO grid ji"] + obs_pd["k"] = places.PLACES[node]["NEMO grid k"] obs_tot.append(obs_pd) obs_concat = pd.concat(obs_tot) - obs_concat.to_csv('checkitout.csv') + obs_concat.to_csv("checkitout.csv") return obs_concat -def WSS(obs,mod): +def WSS(obs, mod): # Willmott skill core, cannot include any NaN values - return 1.0-np.sum((mod-obs)**2)/np.sum((np.abs(mod-np.mean(obs))+np.abs(obs-np.mean(obs)))**2) + return 1.0 - np.sum((mod - obs) ** 2) / np.sum( + (np.abs(mod - np.mean(obs)) + np.abs(obs - np.mean(obs))) ** 2 + ) -def RMSE(obs,mod): + +def RMSE(obs, mod): # root mean square error, cannot include any NaN values - return np.sqrt(np.sum((mod-obs)**2)/len(mod)) - -def stats(obs0,mod0): - """ calculate useful model-data comparison statistics """ - obs0=_deframe(obs0) - mod0=_deframe(mod0) - iii=np.logical_and(~np.isnan(obs0),~np.isnan(mod0)) - obs=obs0[iii] - mod=mod0[iii] - N=len(obs) - if N>0: - modmean=np.mean(mod) - obsmean=np.mean(obs) - bias=modmean-obsmean - vRMSE=RMSE(obs,mod) - vWSS=WSS(obs,mod) + return np.sqrt(np.sum((mod - obs) ** 2) / len(mod)) + + +def stats(obs0, mod0): + """calculate useful model-data comparison statistics""" + obs0 = _deframe(obs0) + mod0 = _deframe(mod0) + iii = np.logical_and(~np.isnan(obs0), ~np.isnan(mod0)) + obs = obs0[iii] + mod = mod0[iii] + N = len(obs) + if N > 0: + modmean = np.mean(mod) + obsmean = np.mean(obs) + bias = modmean - obsmean + vRMSE = RMSE(obs, mod) + vWSS = WSS(obs, mod) else: - modmean=np.nan - obsmean=np.nan - bias=np.nan - vRMSE=np.nan - vWSS=np.nan + modmean = np.nan + obsmean = np.nan + bias = np.nan + vRMSE = np.nan + vWSS = np.nan return N, modmean, obsmean, bias, vRMSE, vWSS -def varvarScatter(ax,df,obsvar,modvar,colvar,vmin=0,vmax=0,cbar=False,cm=cmo.cm.thermal,args={}): - """ add scatter plot to axes ax with df[obsvar] on x-axis, df[modvar] on y-axis, - and color determined by df[colvar] - vmin and vmax are limits on color scale + +def varvarScatter( + ax, + df, + obsvar, + modvar, + colvar, + vmin=0, + vmax=0, + cbar=False, + cm=cmo.cm.thermal, + args={}, +): + """add scatter plot to axes ax with df[obsvar] on x-axis, df[modvar] on y-axis, + and color determined by df[colvar] + vmin and vmax are limits on color scale """ - obs0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[colvar]==df[colvar]),[obsvar]]) - mod0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[colvar]==df[colvar]),[modvar]]) - sep0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[colvar]==df[colvar]),[colvar]]) - if 'norm' in args: - ps=ax.scatter(obs0,mod0,c=sep0,cmap=cm,**args) + obs0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[colvar] == df[colvar]), + [obsvar], + ] + ) + mod0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[colvar] == df[colvar]), + [modvar], + ] + ) + sep0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[colvar] == df[colvar]), + [colvar], + ] + ) + if "norm" in args: + ps = ax.scatter(obs0, mod0, c=sep0, cmap=cm, **args) else: - if vmin==vmax: - vmin=np.min(sep0) - vmax=np.max(sep0) - ps=ax.scatter(obs0,mod0,c=sep0,vmin=vmin,vmax=vmax,cmap=cm,**args) - if cbar==True: + if vmin == vmax: + vmin = np.min(sep0) + vmax = np.max(sep0) + ps = ax.scatter(obs0, mod0, c=sep0, vmin=vmin, vmax=vmax, cmap=cm, **args) + if cbar == True: plt.colorbar(ps) return ps -def varvarPlot(ax,df,obsvar,modvar,sepvar='',sepvals=np.array([]),lname='',sepunits='', - cols=('darkslateblue','royalblue','skyblue','mediumseagreen','darkseagreen','goldenrod', - 'coral','tomato','firebrick','mediumvioletred','magenta'),labels=''): - """ model vs obs plot like varvarScatter but colors taken from a list - as determined by determined from df[sepvar] and a list of bin edges, sepvals """ + +def varvarPlot( + ax, + df, + obsvar, + modvar, + sepvar="", + sepvals=np.array([]), + lname="", + sepunits="", + cols=( + "darkslateblue", + "royalblue", + "skyblue", + "mediumseagreen", + "darkseagreen", + "goldenrod", + "coral", + "tomato", + "firebrick", + "mediumvioletred", + "magenta", + ), + labels="", +): + """model vs obs plot like varvarScatter but colors taken from a list + as determined by determined from df[sepvar] and a list of bin edges, sepvals""" # remember labels must include < and > cases - if len(lname)==0: - lname=sepvar - ps=list() - if len(sepvals)==0: - obs0=_deframe(df[obsvar]) - mod0=_deframe(df[modvar]) - ps.append(ax.plot(obs0,mod0,'.',color=cols[0],label=lname)) + if len(lname) == 0: + lname = sepvar + ps = list() + if len(sepvals) == 0: + obs0 = _deframe(df[obsvar]) + mod0 = _deframe(df[modvar]) + ps.append(ax.plot(obs0, mod0, ".", color=cols[0], label=lname)) else: - obs0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar]),[obsvar]]) - mod0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar]),[modvar]]) - sep0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar]),[sepvar]]) - sepvals=np.sort(sepvals) + obs0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]), + [obsvar], + ] + ) + mod0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]), + [modvar], + ] + ) + sep0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]), + [sepvar], + ] + ) + sepvals = np.sort(sepvals) # less than min case: - ii=0 - iii=sep00: - #ll=u'{} < {} {}'.format(lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[0] + ii = 0 + iii = sep0 < sepvals[ii] + if np.sum(iii) > 0: + # ll=u'{} < {} {}'.format(lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[0] else: - ll=u'{} $<$ {} {}'.format(lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(obs0[iii],mod0[iii],'.',color=cols[ii],label=ll) + ll = "{} $<$ {} {}".format(lname, sepvals[ii], sepunits).strip() + (p0,) = ax.plot(obs0[iii], mod0[iii], ".", color=cols[ii], label=ll) ps.append(p0) # between min and max: - for ii in range(1,len(sepvals)): - iii=np.logical_and(sep0=sepvals[ii-1]) - if np.sum(iii)>0: - #ll=u'{} {} \u2264 {} < {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[ii] + for ii in range(1, len(sepvals)): + iii = np.logical_and(sep0 < sepvals[ii], sep0 >= sepvals[ii - 1]) + if np.sum(iii) > 0: + # ll=u'{} {} \u2264 {} < {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[ii] else: - ll=u'{} {} $\leq$ {} $<$ {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(obs0[iii],mod0[iii],'.',color=cols[ii],label=ll) + ll = "{} {} $\leq$ {} $<$ {} {}".format( + sepvals[ii - 1], sepunits, lname, sepvals[ii], sepunits + ).strip() + (p0,) = ax.plot(obs0[iii], mod0[iii], ".", color=cols[ii], label=ll) ps.append(p0) # greater than max: - iii=sep0>=sepvals[ii] - if np.sum(iii)>0: - #ll=u'{} \u2265 {} {}'.format(lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[ii+1] + iii = sep0 >= sepvals[ii] + if np.sum(iii) > 0: + # ll=u'{} \u2265 {} {}'.format(lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[ii + 1] else: - ll=u'{} $\geq$ {} {}'.format(lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(obs0[iii],mod0[iii],'.',color=cols[ii+1],label=ll) + ll = "{} $\geq$ {} {}".format(lname, sepvals[ii], sepunits).strip() + (p0,) = ax.plot(obs0[iii], mod0[iii], ".", color=cols[ii + 1], label=ll) ps.append(p0) return ps -def varvarIter(ax,df,obsvar,modvar,sepvar='',lname='', - cols=('darkslateblue','royalblue','skyblue','mediumseagreen','darkseagreen','goldenrod', - 'coral','tomato','firebrick','mediumvioletred','magenta'),labels=''): - """ model vs obs plot like varvarScatter but colors taken from a list - as determined by determined from df[sepvar] and a list of bin edges, sepvals """ + +def varvarIter( + ax, + df, + obsvar, + modvar, + sepvar="", + lname="", + cols=( + "darkslateblue", + "royalblue", + "skyblue", + "mediumseagreen", + "darkseagreen", + "goldenrod", + "coral", + "tomato", + "firebrick", + "mediumvioletred", + "magenta", + ), + labels="", +): + """model vs obs plot like varvarScatter but colors taken from a list + as determined by determined from df[sepvar] and a list of bin edges, sepvals""" # remember labels must include < and > cases - if len(lname)==0: - lname=sepvar - ps=list() - df2=df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])] - for ii,isep in enumerate(df2[sepvar].drop_duplicates().values): - obs0=_deframe(df2.loc[df2[sepvar]==isep,[obsvar]]) - mod0=_deframe(df2.loc[df2[sepvar]==isep,[modvar]]) - p0,=ax.plot(obs0,mod0,'.',color=cols[ii],label=isep) + if len(lname) == 0: + lname = sepvar + ps = list() + df2 = df.loc[(df[obsvar] == df[obsvar]) & (df[modvar] == df[modvar])] + for ii, isep in enumerate(df2[sepvar].drop_duplicates().values): + obs0 = _deframe(df2.loc[df2[sepvar] == isep, [obsvar]]) + mod0 = _deframe(df2.loc[df2[sepvar] == isep, [modvar]]) + (p0,) = ax.plot(obs0, mod0, ".", color=cols[ii], label=isep) ps.append(p0) return ps -def tsertser_graph(ax,df,obsvar,modvar,start_date,end_date,sepvar='',sepvals=([]),lname='',sepunits='', - ocols=('blue','darkviolet','teal','green','deepskyblue'), - mcols=('fuchsia','firebrick','orange','darkgoldenrod','maroon'),labels=''): - """ Creates timeseries by adding scatter plot to axes ax with df['dtUTC'] on x-axis, - df[obsvar] and df[modvar] on y axis, and colors taken from a listas determined from - df[sepvar] and a list of bin edges, sepvals + +def tsertser_graph( + ax, + df, + obsvar, + modvar, + start_date, + end_date, + sepvar="", + sepvals=([]), + lname="", + sepunits="", + ocols=("blue", "darkviolet", "teal", "green", "deepskyblue"), + mcols=("fuchsia", "firebrick", "orange", "darkgoldenrod", "maroon"), + labels="", +): + """Creates timeseries by adding scatter plot to axes ax with df['dtUTC'] on x-axis, + df[obsvar] and df[modvar] on y axis, and colors taken from a listas determined from + df[sepvar] and a list of bin edges, sepvals """ - if len(lname)==0: - lname=sepvar - ps=list() - if len(sepvals)==0: - obs0=_deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[obsvar]]) - mod0=_deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[modvar]]) - time0=_deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['dtUTC']]) - p0,=ax.plot(time0,obs0,'.',color=ocols[0],label=f'Observed {lname}') + if len(lname) == 0: + lname = sepvar + ps = list() + if len(sepvals) == 0: + obs0 = _deframe( + df.loc[(df["dtUTC"] >= start_date) & (df["dtUTC"] <= end_date), [obsvar]] + ) + mod0 = _deframe( + df.loc[(df["dtUTC"] >= start_date) & (df["dtUTC"] <= end_date), [modvar]] + ) + time0 = _deframe( + df.loc[(df["dtUTC"] >= start_date) & (df["dtUTC"] <= end_date), ["dtUTC"]] + ) + (p0,) = ax.plot(time0, obs0, ".", color=ocols[0], label=f"Observed {lname}") ps.append(p0) - p0,=ax.plot(time0,mod0,'.',color=mcols[0],label=f'Modeled {lname}') + (p0,) = ax.plot(time0, mod0, ".", color=mcols[0], label=f"Modeled {lname}") ps.append(p0) else: - obs0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[obsvar]]) - mod0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[modvar]]) - time0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['dtUTC']]) - sep0=_deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[sepvar]]) - sepvals=np.sort(sepvals) - # less than min case: - ii=0 - iii=sep00: - #ll=u'{} < {} {}'.format(lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[0] + obs0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]) + & (df["dtUTC"] >= start_date) + & (df["dtUTC"] <= end_date), + [obsvar], + ] + ) + mod0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]) + & (df["dtUTC"] >= start_date) + & (df["dtUTC"] <= end_date), + [modvar], + ] + ) + time0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]) + & (df["dtUTC"] >= start_date) + & (df["dtUTC"] <= end_date), + ["dtUTC"], + ] + ) + sep0 = _deframe( + df.loc[ + (df[obsvar] == df[obsvar]) + & (df[modvar] == df[modvar]) + & (df[sepvar] == df[sepvar]) + & (df["dtUTC"] >= start_date) + & (df["dtUTC"] <= end_date), + [sepvar], + ] + ) + sepvals = np.sort(sepvals) + # less than min case: + ii = 0 + iii = sep0 < sepvals[ii] + if np.sum(iii) > 0: + # ll=u'{} < {} {}'.format(lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[0] else: - ll=u'{} $<$ {} {}'.format(lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(time0[iii],obs0[iii],'.',color=ocols[ii],label=f'Observed {ll}') + ll = "{} $<$ {} {}".format(lname, sepvals[ii], sepunits).strip() + (p0,) = ax.plot( + time0[iii], obs0[iii], ".", color=ocols[ii], label=f"Observed {ll}" + ) ps.append(p0) - p0,=ax.plot(time0[iii],mod0[iii],'.',color=mcols[ii],label=f'Modeled {ll}') + (p0,) = ax.plot( + time0[iii], mod0[iii], ".", color=mcols[ii], label=f"Modeled {ll}" + ) ps.append(p0) # between min and max: - for ii in range(1,len(sepvals)): - iii=np.logical_and(sep0=sepvals[ii-1]) - if np.sum(iii)>0: - #ll=u'{} {} \u2264 {} < {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[ii] + for ii in range(1, len(sepvals)): + iii = np.logical_and(sep0 < sepvals[ii], sep0 >= sepvals[ii - 1]) + if np.sum(iii) > 0: + # ll=u'{} {} \u2264 {} < {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[ii] else: - ll=u'{} {} $\leq$ {} $<$ {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(time0[iii],obs0[iii],'.',color=ocols[ii],label=f'Observed {ll}') + ll = "{} {} $\leq$ {} $<$ {} {}".format( + sepvals[ii - 1], sepunits, lname, sepvals[ii], sepunits + ).strip() + (p0,) = ax.plot( + time0[iii], obs0[iii], ".", color=ocols[ii], label=f"Observed {ll}" + ) ps.append(p0) - p0,=ax.plot(time0[iii],mod0[iii],'.',color=mcols[ii],label=f'Modeled {ll}') + (p0,) = ax.plot( + time0[iii], mod0[iii], ".", color=mcols[ii], label=f"Modeled {ll}" + ) ps.append(p0) # greater than max: - iii=sep0>=sepvals[ii] - if np.sum(iii)>0: - #ll=u'{} \u2265 {} {}'.format(lname,sepvals[ii],sepunits).strip() - if len(labels)>0: - ll=labels[ii+1] + iii = sep0 >= sepvals[ii] + if np.sum(iii) > 0: + # ll=u'{} \u2265 {} {}'.format(lname,sepvals[ii],sepunits).strip() + if len(labels) > 0: + ll = labels[ii + 1] else: - ll=u'{} $\geq$ {} {}'.format(lname,sepvals[ii],sepunits).strip() - p0,=ax.plot(time0[iii],obs0[iii],'.',color=ocols[ii+1],label=f'Observed {ll}') + ll = "{} $\geq$ {} {}".format(lname, sepvals[ii], sepunits).strip() + (p0,) = ax.plot( + time0[iii], obs0[iii], ".", color=ocols[ii + 1], label=f"Observed {ll}" + ) ps.append(p0) - p0,=ax.plot(time0[iii],mod0[iii],'.',color=mcols[ii+1],label=f'Modeled {ll}') + (p0,) = ax.plot( + time0[iii], mod0[iii], ".", color=mcols[ii + 1], label=f"Modeled {ll}" + ) ps.append(p0) - yearsFmt = mdates.DateFormatter('%d %b %y') + yearsFmt = mdates.DateFormatter("%d %b %y") ax.xaxis.set_major_formatter(yearsFmt) return ps def _deframe(x): # if array is pandas series or dataframe, return the values only - if isinstance(x,pd.Series) or isinstance(x,pd.DataFrame): - x=x.values.flatten() + if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame): + x = x.values.flatten() return x + def _flatten_nested_dict(tdic0): # used by displayStats function # tdic argument is nested dictionary of consistent structure - def _flatten_nested_dict_inner(tdic,ilist,data): + def _flatten_nested_dict_inner(tdic, ilist, data): # necessary because mutable defaults instantiated when function is defined; # need different entry point at start for el in tdic.keys(): - if isinstance(tdic[el],dict): - data=_flatten_nested_dict_inner(tdic[el],ilist+list((el,)),data) + if isinstance(tdic[el], dict): + data = _flatten_nested_dict_inner(tdic[el], ilist + list((el,)), data) else: - data.append(ilist+list((el,tdic[el]))) + data.append(ilist + list((el, tdic[el]))) return data - ilist0=list() - data0=list() - data0=_flatten_nested_dict_inner(tdic0,ilist0,data0) + + ilist0 = list() + data0 = list() + data0 = _flatten_nested_dict_inner(tdic0, ilist0, data0) return data0 -def displayStats(statdict,level='Subset',suborder=None): + +def displayStats(statdict, level="Subset", suborder=None): # stats dict starting from variable level - cols={'Subset':('Subset','Metric',''), - 'Variable':('Variable','Subset','Metric',''), - 'Year':('Year','Variable','Subset','Metric','')} - ind={'Subset':['Order','Subset','Metric'], - 'Variable':['Variable','Order','Subset','Metric'], - 'Year':['Variable','Subset','Metric']} - pcols={'Subset':['Metric'], - 'Variable':['Metric'], - 'Year':['Year','Metric']} - allrows=_flatten_nested_dict(statdict) - tdf=pd.DataFrame(allrows,columns=cols[level]) + cols = { + "Subset": ("Subset", "Metric", ""), + "Variable": ("Variable", "Subset", "Metric", ""), + "Year": ("Year", "Variable", "Subset", "Metric", ""), + } + ind = { + "Subset": ["Order", "Subset", "Metric"], + "Variable": ["Variable", "Order", "Subset", "Metric"], + "Year": ["Variable", "Subset", "Metric"], + } + pcols = {"Subset": ["Metric"], "Variable": ["Metric"], "Year": ["Year", "Metric"]} + allrows = _flatten_nested_dict(statdict) + tdf = pd.DataFrame(allrows, columns=cols[level]) if suborder is not None: - subD={suborder[ii]: ii for ii in range(0,len(suborder))} - tdf['Order']=[subD[tdf['Subset'][ii]] for ii in range(0,len(tdf['Subset']))] - tdf.set_index(ind[level],inplace=True) - tbl=pd.pivot_table(tdf,index=ind[level][:-1],columns=pcols[level]).rename_axis(index={'Order':None},columns={'Metric':None}).style.format({ - 'N': '{:d}', - 'Bias':'{:.3f}', - 'WSS':'{:.3f}', - 'RMSE':'{:.3f}'}) - return tbl,tdf - -def displayStatsFlex(statdict,cols,ind,pcols,suborder=None): + subD = {suborder[ii]: ii for ii in range(0, len(suborder))} + tdf["Order"] = [subD[tdf["Subset"][ii]] for ii in range(0, len(tdf["Subset"]))] + tdf.set_index(ind[level], inplace=True) + tbl = ( + pd.pivot_table(tdf, index=ind[level][:-1], columns=pcols[level]) + .rename_axis(index={"Order": None}, columns={"Metric": None}) + .style.format( + {"N": "{:d}", "Bias": "{:.3f}", "WSS": "{:.3f}", "RMSE": "{:.3f}"} + ) + ) + return tbl, tdf + + +def displayStatsFlex(statdict, cols, ind, pcols, suborder=None): # more flexible version of stats display # stats dict starting from variable level - allrows=_flatten_nested_dict(statdict) - tdf=pd.DataFrame(allrows,columns=cols) + allrows = _flatten_nested_dict(statdict) + tdf = pd.DataFrame(allrows, columns=cols) if suborder is not None: - subD={suborder[ii]: ii for ii in range(0,len(suborder))} - tdf['Order']=[subD[tdf['Subset'][ii]] for ii in range(0,len(tdf['Subset']))] - tdf.set_index(ind,inplace=True) - tbl=pd.pivot_table(tdf,index=ind[:-1],columns=pcols).rename_axis(index={'Order':None},columns={'Metric':None}).style.format({ - 'N': '{:d}', - 'Bias':'{:.3f}', - 'WSS':'{:.3f}', - 'RMSE':'{:.3f}'}) - return tbl,tdf + subD = {suborder[ii]: ii for ii in range(0, len(suborder))} + tdf["Order"] = [subD[tdf["Subset"][ii]] for ii in range(0, len(tdf["Subset"]))] + tdf.set_index(ind, inplace=True) + tbl = ( + pd.pivot_table(tdf, index=ind[:-1], columns=pcols) + .rename_axis(index={"Order": None}, columns={"Metric": None}) + .style.format( + {"N": "{:d}", "Bias": "{:.3f}", "WSS": "{:.3f}", "RMSE": "{:.3f}"} + ) + ) + return tbl, tdf + def utc_to_pac(timeArray): # UTC to Pacific time zone - return [pytz.utc.localize(ii).astimezone(pytz.timezone('Canada/Pacific')) for ii in timeArray] + return [ + pytz.utc.localize(ii).astimezone(pytz.timezone("Canada/Pacific")) + for ii in timeArray + ] + def pac_to_utc(pactime0): # input datetime object without tzinfo in Pacific Time and # output datetime object (or np array of them) without tzinfo in UTC - pactime=np.array(pactime0,ndmin=1) - if pactime.ndim>1: - raise Exception('Error: ndim>1') - out=np.empty(pactime.shape,dtype=object) - pac=pytz.timezone('Canada/Pacific') - utc=pytz.utc - for ii in range(0,len(pactime)): - itime=pactime[ii] - loc_t=pac.localize(itime) - utc_t=loc_t.astimezone(utc) - out[ii]=utc_t.replace(tzinfo=None) - return (out[0] if np.shape(pactime0)==() else out) + pactime = np.array(pactime0, ndmin=1) + if pactime.ndim > 1: + raise Exception("Error: ndim>1") + out = np.empty(pactime.shape, dtype=object) + pac = pytz.timezone("Canada/Pacific") + utc = pytz.utc + for ii in range(0, len(pactime)): + itime = pactime[ii] + loc_t = pac.localize(itime) + utc_t = loc_t.astimezone(utc) + out[ii] = utc_t.replace(tzinfo=None) + return out[0] if np.shape(pactime0) == () else out + def pdt_to_utc(pactime0): # input datetime object without tzinfo in Pacific Daylight Time and # output datetime object (or np array of them) without tzinfo in UTC # verified: PDT is GMT+7 at all times of year - pactime=np.array(pactime0,ndmin=1) - if pactime.ndim>1: - raise Exception('Error: ndim>1') - out=np.empty(pactime.shape,dtype=object) - pac=pytz.timezone('Etc/GMT+7') - utc=pytz.utc - for ii in range(0,len(pactime)): - itime=pactime[ii] - loc_t=pac.localize(itime) - utc_t=loc_t.astimezone(utc) - out[ii]=utc_t.replace(tzinfo=None) - return (out[0] if np.shape(pactime0)==() else out) + pactime = np.array(pactime0, ndmin=1) + if pactime.ndim > 1: + raise Exception("Error: ndim>1") + out = np.empty(pactime.shape, dtype=object) + pac = pytz.timezone("Etc/GMT+7") + utc = pytz.utc + for ii in range(0, len(pactime)): + itime = pactime[ii] + loc_t = pac.localize(itime) + utc_t = loc_t.astimezone(utc) + out[ii] = utc_t.replace(tzinfo=None) + return out[0] if np.shape(pactime0) == () else out + def pst_to_utc(pactime0): # input datetime object without tzinfo in Pacific Standard Time and # output datetime object (or np array of them) without tzinfo in UTC # verified: PST is GMT+8 at all times of year (GMT does not switch) - pactime=np.array(pactime0,ndmin=1) - if pactime.ndim>1: - raise Exception('Error: ndim>1') - out=np.empty(pactime.shape,dtype=object) - pac=pytz.timezone('Etc/GMT+8') - utc=pytz.utc - for ii in range(0,len(pactime)): - itime=pactime[ii] - loc_t=pac.localize(itime) - utc_t=loc_t.astimezone(utc) - out[ii]=utc_t.replace(tzinfo=None) - return (out[0] if np.shape(pactime0)==() else out) + pactime = np.array(pactime0, ndmin=1) + if pactime.ndim > 1: + raise Exception("Error: ndim>1") + out = np.empty(pactime.shape, dtype=object) + pac = pytz.timezone("Etc/GMT+8") + utc = pytz.utc + for ii in range(0, len(pactime)): + itime = pactime[ii] + loc_t = pac.localize(itime) + utc_t = loc_t.astimezone(utc) + out[ii] = utc_t.replace(tzinfo=None) + return out[0] if np.shape(pactime0) == () else out + def datetimeToDecDay(dtin0): # handle single datetimes or arrays - dtin=np.array(dtin0,ndmin=1) - if dtin.ndim>1: - raise Exception('Error: ndim>1') - out=np.empty(dtin.shape,dtype=object) - for ii in range(0,len(dtin)): - tdif=dtin[ii]-dt.datetime(1900,1,1) - out[ii]=tdif.days+tdif.seconds/(3600*24) - return (out[0] if np.shape(dtin0)==() else out) - -def printstats(datadf,obsvar,modvar): - N, modmean, obsmean, bias, RMSE, WSS = stats(datadf.loc[:,[obsvar]],datadf.loc[:,[modvar]]) - print(' N: {}\n bias: {}\n RMSE: {}\n WSS: {}'.format(N,bias,RMSE,WSS)) + dtin = np.array(dtin0, ndmin=1) + if dtin.ndim > 1: + raise Exception("Error: ndim>1") + out = np.empty(dtin.shape, dtype=object) + for ii in range(0, len(dtin)): + tdif = dtin[ii] - dt.datetime(1900, 1, 1) + out[ii] = tdif.days + tdif.seconds / (3600 * 24) + return out[0] if np.shape(dtin0) == () else out + + +def printstats(datadf, obsvar, modvar): + N, modmean, obsmean, bias, RMSE, WSS = stats( + datadf.loc[:, [obsvar]], datadf.loc[:, [modvar]] + ) + print(" N: {}\n bias: {}\n RMSE: {}\n WSS: {}".format(N, bias, RMSE, WSS)) return + def datetimeToYD(idt): - if type(idt)==dt.datetime: - yd=(idt-dt.datetime(idt.year-1,12,31)).days - else: # assume array or pandas, or acts like it - yd=[(ii-dt.datetime(ii.year-1,12,31)).days for ii in idt] + if type(idt) == dt.datetime: + yd = (idt - dt.datetime(idt.year - 1, 12, 31)).days + else: # assume array or pandas, or acts like it + yd = [(ii - dt.datetime(ii.year - 1, 12, 31)).days for ii in idt] return yd -def getChlNRatio(nmlfile='namelist_smelt_cfg',basedir=None,nam_fmt=None,idt=dt.datetime(2015,1,1)): - """ for a given run, load the bio namelist and extract the chl to nitrogen ratio for phytoplankton - """ + +def getChlNRatio( + nmlfile="namelist_smelt_cfg", + basedir=None, + nam_fmt=None, + idt=dt.datetime(2015, 1, 1), +): + """for a given run, load the bio namelist and extract the chl to nitrogen ratio for phytoplankton""" if not ((basedir and nam_fmt) or os.path.isfile(nmlfile)): - raise Exception('nmlfile must contain full namelist path or basedir and nam_fmt must be defined') + raise Exception( + "nmlfile must contain full namelist path or basedir and nam_fmt must be defined" + ) if basedir: - if nam_fmt=='nowcast': - nmlfile=os.path.join(basedir,idt.strftime('%d%b%y').lower(),nmlfile) - elif nam_fmt=='long': - nmlfile=os.path.join(basedir,nmlfile) + if nam_fmt == "nowcast": + nmlfile = os.path.join(basedir, idt.strftime("%d%b%y").lower(), nmlfile) + elif nam_fmt == "long": + nmlfile = os.path.join(basedir, nmlfile) else: - raise Exception('Invalid nam_fmt') + raise Exception("Invalid nam_fmt") with open(nmlfile) as nmlf: - nml=f90nml.read(nmlf) - return nml['nampisprod']['zz_rate_si_ratio_diat'] + nml = f90nml.read(nmlf) + return nml["nampisprod"]["zz_rate_si_ratio_diat"] -def load_Pheo_data(year,datadir='/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology'): - """ This function automatically loads the chlorophyll bottle data from WADE for a - given year specified by the user. The output is a pandas dataframe with all of - the necessary columns and groups needed for matching to the model data. + +def load_Pheo_data(year, datadir="/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology"): + """This function automatically loads the chlorophyll bottle data from WADE for a + given year specified by the user. The output is a pandas dataframe with all of + the necessary columns and groups needed for matching to the model data. """ ## duplicate Station/Date entries with different times seem to be always within a couple of hours, # so just take the first (next cell) - dfTime=pd.read_excel('/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx', - engine='openpyxl',sheet_name='EventDateTime') - test=dfTime.groupby(['FlightDate','SiteCode'])['TimeDown \n(Local - PST or PDT)'].count() + dfTime = pd.read_excel( + "/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx", + engine="openpyxl", + sheet_name="EventDateTime", + ) + test = dfTime.groupby(["FlightDate", "SiteCode"])[ + "TimeDown \n(Local - PST or PDT)" + ].count() # drop duplicate rows - dfTime.drop_duplicates(subset=['FlightDate','SiteCode'],keep='first',inplace=True) + dfTime.drop_duplicates( + subset=["FlightDate", "SiteCode"], keep="first", inplace=True + ) print(dfTime.keys()) - dfTime['dtPac']=[dt.datetime.combine(idate, itime) for idate, itime \ - in zip(dfTime['FlightDate'],dfTime['TimeDown \n(Local - PST or PDT)'])] - dfTime['dtUTC']=[pac_to_utc(ii) for ii in dfTime['dtPac']] + dfTime["dtPac"] = [ + dt.datetime.combine(idate, itime) + for idate, itime in zip( + dfTime["FlightDate"], dfTime["TimeDown \n(Local - PST or PDT)"] + ) + ] + dfTime["dtUTC"] = [pac_to_utc(ii) for ii in dfTime["dtPac"]] # PROCESS STATION LOCATION INFO (based on Parker's code) - sta_fn='/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx' - sheetname='Site Info' - sta_df =pd.read_excel(sta_fn,engine='openpyxl',sheet_name=sheetname) - sta_df.dropna(how='any',subset=['Lat_NAD83 (deg / dec_min)','Long_NAD83 (deg / dec_min)','Station'],inplace=True) - sta_df = sta_df.set_index('Station') + sta_fn = "/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx" + sheetname = "Site Info" + sta_df = pd.read_excel(sta_fn, engine="openpyxl", sheet_name=sheetname) + sta_df.dropna( + how="any", + subset=["Lat_NAD83 (deg / dec_min)", "Long_NAD83 (deg / dec_min)", "Station"], + inplace=True, + ) + sta_df = sta_df.set_index("Station") # get locations in decimal degrees for sta in sta_df.index: - lat_str = sta_df.loc[sta, 'Lat_NAD83 (deg / dec_min)'] - lat_deg = float(lat_str.split()[0]) + float(lat_str.split()[1])/60 - sta_df.loc[sta,'Lat'] = lat_deg + lat_str = sta_df.loc[sta, "Lat_NAD83 (deg / dec_min)"] + lat_deg = float(lat_str.split()[0]) + float(lat_str.split()[1]) / 60 + sta_df.loc[sta, "Lat"] = lat_deg # - lon_str = sta_df.loc[sta, 'Long_NAD83 (deg / dec_min)'] - lon_deg = float(lon_str.split()[0]) + float(lon_str.split()[1])/60 - sta_df.loc[sta,'Lon'] = -lon_deg - sta_df.pop('Lat_NAD83 (deg / dec_min)'); - sta_df.pop('Long_NAD83 (deg / dec_min)'); - fn='/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx' - sheetname='LabChlaPheo' - chlPheo =pd.read_excel(fn,engine='openpyxl',sheet_name=sheetname) - chlPheo.dropna(how='any',subset=['Date','Station','SamplingDepth'],inplace=True) + lon_str = sta_df.loc[sta, "Long_NAD83 (deg / dec_min)"] + lon_deg = float(lon_str.split()[0]) + float(lon_str.split()[1]) / 60 + sta_df.loc[sta, "Lon"] = -lon_deg + sta_df.pop("Lat_NAD83 (deg / dec_min)") + sta_df.pop("Long_NAD83 (deg / dec_min)") + fn = "/ocean/eolson/MEOPAR/obs/WADE/WDE_Data/OlsonSuchyAllen_UBC_PDR_P003790-010721.xlsx" + sheetname = "LabChlaPheo" + chlPheo = pd.read_excel(fn, engine="openpyxl", sheet_name=sheetname) + chlPheo.dropna(how="any", subset=["Date", "Station", "SamplingDepth"], inplace=True) # average over replicates - chlPheo2=pd.DataFrame(chlPheo.groupby(['Date','Station','SamplingDepth'],as_index=False).mean()) + chlPheo2 = pd.DataFrame( + chlPheo.groupby(["Date", "Station", "SamplingDepth"], as_index=False).mean() + ) # join to station info (lat/lon) - chlPheo3=pd.merge(left=sta_df,right=chlPheo2,how='right', - left_on='Station',right_on='Station') + chlPheo3 = pd.merge( + left=sta_df, right=chlPheo2, how="right", left_on="Station", right_on="Station" + ) # join to date/time - dfTime['dtUTC']=[pac_to_utc(dt.datetime.combine(idate,itime)) for idate,itime in \ - zip(dfTime['FlightDate'],dfTime['TimeDown \n(Local - PST or PDT)'])] - dfTime2=dfTime.loc[:,['FlightDate','SiteCode','dtUTC']] - chlPheoFinal=pd.merge(left=chlPheo3,right=dfTime2,how='left', - left_on=['Date','Station'],right_on=['FlightDate','SiteCode']) - #drop the 47 NA datetime values - chlPheoFinal.dropna(how='any',subset=['dtUTC'],inplace=True) - #Add extra columns for later use - chlPheoFinal['Z']=chlPheoFinal['SamplingDepth'] - chlPheoFinal['Year']=[ii.year for ii in chlPheoFinal['dtUTC']] - chlPheoFinal['YD']=datetimeToYD(chlPheoFinal['dtUTC']) - chlPheoYear=pd.DataFrame(chlPheoFinal.loc[chlPheoFinal.Year==year]) + dfTime["dtUTC"] = [ + pac_to_utc(dt.datetime.combine(idate, itime)) + for idate, itime in zip( + dfTime["FlightDate"], dfTime["TimeDown \n(Local - PST or PDT)"] + ) + ] + dfTime2 = dfTime.loc[:, ["FlightDate", "SiteCode", "dtUTC"]] + chlPheoFinal = pd.merge( + left=chlPheo3, + right=dfTime2, + how="left", + left_on=["Date", "Station"], + right_on=["FlightDate", "SiteCode"], + ) + # drop the 47 NA datetime values + chlPheoFinal.dropna(how="any", subset=["dtUTC"], inplace=True) + # Add extra columns for later use + chlPheoFinal["Z"] = chlPheoFinal["SamplingDepth"] + chlPheoFinal["Year"] = [ii.year for ii in chlPheoFinal["dtUTC"]] + chlPheoFinal["YD"] = datetimeToYD(chlPheoFinal["dtUTC"]) + chlPheoYear = pd.DataFrame(chlPheoFinal.loc[chlPheoFinal.Year == year]) return chlPheoYear -def load_WADE_data(year,datadir='/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology'): - """ This function automatically loads the nutrient bottle data from WADE for a given year - specified by the user. The output is a pandas dataframe with all of te necessary - columns and groups needed for matching to the model data. + +def load_WADE_data(year, datadir="/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology"): + """This function automatically loads the nutrient bottle data from WADE for a given year + specified by the user. The output is a pandas dataframe with all of te necessary + columns and groups needed for matching to the model data. """ - dfSta=pickle.load(open(os.path.join(datadir,'sta_df.p'),'rb')) - dfBot=pickle.load(open(os.path.join(datadir,f'Bottles_{str(year)}.p'),'rb')) - df=pd.merge(left=dfSta,right=dfBot,how='right', - left_on='Station',right_on='Station') + dfSta = pickle.load(open(os.path.join(datadir, "sta_df.p"), "rb")) + dfBot = pickle.load(open(os.path.join(datadir, f"Bottles_{str(year)}.p"), "rb")) + df = pd.merge( + left=dfSta, right=dfBot, how="right", left_on="Station", right_on="Station" + ) try: - len(df.loc[pd.isnull(df['Latitude'])]) == 0 + len(df.loc[pd.isnull(df["Latitude"])]) == 0 except: pass - print('Warning!, Stations found without Latitude or Longitude value!') + print("Warning!, Stations found without Latitude or Longitude value!") try: len(df) == len(dfBot) except: pass - print(f'Warning!, Merge completed incorrectly. length of bottle data = {len(dfBot)} length of merged data = {len(df)}') + print( + f"Warning!, Merge completed incorrectly. length of bottle data = {len(dfBot)} length of merged data = {len(df)}" + ) # where no time is provided, set time to midday Pacific time = ~ 20:00 UTC - df['UTCDateTime']=[iiD+dt.timedelta(hours=20) if pd.isnull(iiU) \ - else iiU for iiU,iiD in \ - zip(df['UTCDateTime'],df['Date'])] - df.rename(columns={'UTCDateTime':'dtUTC','Latitude':'Lat','Longitude':'Lon'},inplace=True) - df['Z']=-1*df['Z'] + df["UTCDateTime"] = [ + iiD + dt.timedelta(hours=20) if pd.isnull(iiU) else iiU + for iiU, iiD in zip(df["UTCDateTime"], df["Date"]) + ] + df.rename( + columns={"UTCDateTime": "dtUTC", "Latitude": "Lat", "Longitude": "Lon"}, + inplace=True, + ) + df["Z"] = -1 * df["Z"] df.head() - df['NO23']=df['NO3(uM)D']+df['NO2(uM)D'] # the model does not distinguish between NO2 and NO3 - df['Amm']=df['NH4(uM)D'] - df['Si']=df['SiOH4(uM)D'] - df['Year']=[ii.year for ii in df['dtUTC']] - df['YD']=datetimeToYD(df['dtUTC']) - return(df) + df["NO23"] = ( + df["NO3(uM)D"] + df["NO2(uM)D"] + ) # the model does not distinguish between NO2 and NO3 + df["Amm"] = df["NH4(uM)D"] + df["Si"] = df["SiOH4(uM)D"] + df["Year"] = [ii.year for ii in df["dtUTC"]] + df["YD"] = datetimeToYD(df["dtUTC"]) + return df -def load_CTD_data(year,datadir='/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology'): - """ Returns a dataframe containing CTD data for a given year merged with station data - """ - dfSta=pickle.load(open(os.path.join(datadir,'sta_df.p'),'rb')) - dfCTD0=pickle.load(open(os.path.join(datadir,f'Casts_{str(year)}.p'),'rb')) - dfCTD=pd.merge(left=dfSta,right=dfCTD0,how='right', - left_on='Station',right_on='Station') +def load_CTD_data(year, datadir="/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology"): + """Returns a dataframe containing CTD data for a given year merged with station data""" + dfSta = pickle.load(open(os.path.join(datadir, "sta_df.p"), "rb")) + dfCTD0 = pickle.load(open(os.path.join(datadir, f"Casts_{str(year)}.p"), "rb")) + dfCTD = pd.merge( + left=dfSta, right=dfCTD0, how="right", left_on="Station", right_on="Station" + ) try: - dfCTD.groupby(['Station','Year','YD','Z']).count()==[1] + dfCTD.groupby(["Station", "Year", "YD", "Z"]).count() == [1] except: pass - print('Only one cast per CTD station per day') + print("Only one cast per CTD station per day") # where no time is provided, set time to midday Pacific time = ~ 20:00 UTC - dfCTD['dtUTC']=[iiD+dt.timedelta(hours=20) for iiD in dfCTD['Date']] #Does this mean it also has that flaw where we are not sure when the data was collected? - dfCTD.rename(columns={'Latitude':'Lat','Longitude':'Lon'},inplace=True) - dfCTD['Z']=-1*dfCTD['Z'] + dfCTD["dtUTC"] = [ + iiD + dt.timedelta(hours=20) for iiD in dfCTD["Date"] + ] # Does this mean it also has that flaw where we are not sure when the data was collected? + dfCTD.rename(columns={"Latitude": "Lat", "Longitude": "Lon"}, inplace=True) + dfCTD["Z"] = -1 * dfCTD["Z"] # Calculate Absolute (Reference) Salinity (g/kg) and Conservative Temperature (deg C) from # Salinity (psu) and Temperature (deg C): - press=gsw.p_from_z(-1*dfCTD['Z'],dfCTD['Lat']) - dfCTD['SA']=gsw.SA_from_SP(dfCTD['Salinity'],press, - dfCTD['Lon'],dfCTD['Lat']) - dfCTD['CT']=gsw.CT_from_t(dfCTD['SA'],dfCTD['Temperature'],press) - dfCTD['Year']=[ii.year for ii in dfCTD['dtUTC']] - dfCTD['YD']=datetimeToYD(dfCTD['dtUTC']) - return(dfCTD) + press = gsw.p_from_z(-1 * dfCTD["Z"], dfCTD["Lat"]) + dfCTD["SA"] = gsw.SA_from_SP(dfCTD["Salinity"], press, dfCTD["Lon"], dfCTD["Lat"]) + dfCTD["CT"] = gsw.CT_from_t(dfCTD["SA"], dfCTD["Temperature"], press) + dfCTD["Year"] = [ii.year for ii in dfCTD["dtUTC"]] + dfCTD["YD"] = datetimeToYD(dfCTD["dtUTC"]) + return dfCTD diff --git a/SalishSeaTools/salishsea_tools/formatting_tools.py b/SalishSeaTools/salishsea_tools/formatting_tools.py index 46af4dc6..e6c21fd0 100644 --- a/SalishSeaTools/salishsea_tools/formatting_tools.py +++ b/SalishSeaTools/salishsea_tools/formatting_tools.py @@ -18,16 +18,16 @@ #: String to LaTeX notation mapping for units STR_LATEX_MAPPING = { - 'm/s': 'm/s', - 'm2/s': 'm$^2$ / s$', - 'degrees_east': '$^\circ$E', - 'degrees_north': '$^\circ$N', - 'degC': '$^\circ$C', - 'g kg-1': 'g / kg', - 'g/kg': 'g / kg', - 'mmol m-3': 'mmol / $m^{3}$', - 'mmol/m3': 'mmol / $m^{3}$', - 'm2/s3': 'm$^2$ / s$^3$' + "m/s": "m/s", + "m2/s": "m$^2$ / s$", + "degrees_east": "$^\circ$E", + "degrees_north": "$^\circ$N", + "degC": "$^\circ$C", + "g kg-1": "g / kg", + "g/kg": "g / kg", + "mmol m-3": "mmol / $m^{3}$", + "mmol/m3": "mmol / $m^{3}$", + "m2/s3": "m$^2$ / s$^3$", } @@ -42,5 +42,4 @@ def format_units(units): try: return STR_LATEX_MAPPING[units] except KeyError: - raise KeyError( - 'units not found in string to LaTeX mapping: {}'.format(units)) + raise KeyError("units not found in string to LaTeX mapping: {}".format(units)) diff --git a/SalishSeaTools/salishsea_tools/geo_tools.py b/SalishSeaTools/salishsea_tools/geo_tools.py index a6bf0dc5..70a89ae9 100644 --- a/SalishSeaTools/salishsea_tools/geo_tools.py +++ b/SalishSeaTools/salishsea_tools/geo_tools.py @@ -59,7 +59,7 @@ def haversine(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 - a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2 + a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2 c = 2 * np.arcsin(np.sqrt(a)) km = 6367 * c return km @@ -73,19 +73,24 @@ def _spiral_search_for_closest_water_point( jmax, imax = land_mask.shape # Limit on size of grid search - max_search_dist = max(50, int(model_lats.shape[1]/4)) + max_search_dist = max(50, int(model_lats.shape[1] / 4)) closest_point = None j_s, i_s = j, i # starting point is j, i dj, di = 0, -1 # move j_s, i_s in a square spiral centred at j, i - while (i_s-i) <= max_search_dist: - if any([(j_s-j) == (i_s-i), - ((j_s-j) < 0 and (j_s-j) == -(i_s-i)), - ((j_s-j) > 0 and (j_s-j) == 1-(i_s-i))]): + while (i_s - i) <= max_search_dist: + if any( + [ + (j_s - j) == (i_s - i), + ((j_s - j) < 0 and (j_s - j) == -(i_s - i)), + ((j_s - j) > 0 and (j_s - j) == 1 - (i_s - i)), + ] + ): # Hit the corner of the spiral- change direction dj, di = -di, dj - i_s, j_s = i_s+di, j_s+dj # Take a step to next square - if (i_s >= 0 + i_s, j_s = i_s + di, j_s + dj # Take a step to next square + if ( + i_s >= 0 and i_s < imax and j_s >= 0 and j_s < jmax @@ -93,7 +98,8 @@ def _spiral_search_for_closest_water_point( ): # Found a water point, how close is it? actual_dist = haversine( - lon, lat, model_lons[j_s, i_s], model_lats[j_s, i_s]) + lon, lat, model_lons[j_s, i_s], model_lats[j_s, i_s] + ) if closest_point is None: min_dist = actual_dist closest_point = (j_s, i_s) @@ -103,7 +109,7 @@ def _spiral_search_for_closest_water_point( closest_point = (j_s, i_s) # Assumes grids are square- reduces search radius to only # check grids that could potentially be closer than this - grid_dist = int(((i_s-i)**2 + (j_s-j)**2)**0.5) + grid_dist = int(((i_s - i) ** 2 + (j_s - j) ** 2) ** 0.5) if (grid_dist + 1) < max_search_dist: # Reduce stopping distance for spiral- # just need to check that no points closer than this one @@ -111,10 +117,11 @@ def _spiral_search_for_closest_water_point( if closest_point is not None: return closest_point else: - raise ValueError('lat/lon on land and no nearby water point found') + raise ValueError("lat/lon on land and no nearby water point found") + -def get_ij_coordinates(lat,lon,grid_loc='~/MEOPAR/grid/grid_from_lat_lon_mask999.nc'): - """ Finds the closest ii and jj model coordinates by matching Latitude and +def get_ij_coordinates(lat, lon, grid_loc="~/MEOPAR/grid/grid_from_lat_lon_mask999.nc"): + """Finds the closest ii and jj model coordinates by matching Latitude and Longitude to the new grid_from_lat_lon_mask999.nc file :arg float lat: The Latitude of the point in question in decimal degrees. @@ -124,7 +131,7 @@ def get_ij_coordinates(lat,lon,grid_loc='~/MEOPAR/grid/grid_from_lat_lon_mask999 :arg str grid_loc: The location of the grid_from_lat_lon nc file on your system. """ jjii = xr.open_dataset(grid_loc) - method = 'nearest' + method = "nearest" jj = jjii.jj.sel(lats=lat, lons=lon, method=method).item() ii = jjii.ii.sel(lats=lat, lons=lon, method=method).item() jjii.close() @@ -132,14 +139,19 @@ def get_ij_coordinates(lat,lon,grid_loc='~/MEOPAR/grid/grid_from_lat_lon_mask999 def find_closest_model_point( - lon, lat, model_lons, model_lats, grid='NEMO', land_mask=None, + lon, + lat, + model_lons, + model_lats, + grid="NEMO", + land_mask=None, tols={ - 'NEMO': {'tol_lon': 0.007, 'tol_lat': 0.004}, - 'GEM2.5': {'tol_lon': 0.018, 'tol_lat': 0.013}, - 'continental2.5': {'tol_lon': 0.018, 'tol_lat': 0.013}, + "NEMO": {"tol_lon": 0.007, "tol_lat": 0.004}, + "GEM2.5": {"tol_lon": 0.018, "tol_lat": 0.013}, + "continental2.5": {"tol_lon": 0.018, "tol_lat": 0.013}, }, checkTol=False, - raiseOutOfBounds=False + raiseOutOfBounds=False, ): """Returns the grid coordinates of the closest model point to a specified lon/lat. If land_mask is provided, returns the closest @@ -188,25 +200,35 @@ def find_closest_model_point( if grid not in tols: raise KeyError( - 'The provided grid type is not in tols. ' - 'Use another grid type or add your grid type to tols.') + "The provided grid type is not in tols. " + "Use another grid type or add your grid type to tols." + ) # Search for a grid point with longitude and latitude within # tolerance of measured location j_list, i_list = np.where( np.logical_and( - (np.logical_and(model_lons > lon - tols[grid]['tol_lon'], - model_lons < lon + tols[grid]['tol_lon'])), - (np.logical_and(model_lats > lat - tols[grid]['tol_lat'], - model_lats < lat + tols[grid]['tol_lat'])) + ( + np.logical_and( + model_lons > lon - tols[grid]["tol_lon"], + model_lons < lon + tols[grid]["tol_lon"], + ) + ), + ( + np.logical_and( + model_lats > lat - tols[grid]["tol_lat"], + model_lats < lat + tols[grid]["tol_lat"], + ) + ), ) ) if len(j_list) == 0: if raiseOutOfBounds: raise ValueError( - f'No model point found at ({lon, lat}). tol_lon/tol_lat too small or ' - 'lon/lat outside of domain.') + f"No model point found at ({lon, lat}). tol_lon/tol_lat too small or " + "lon/lat outside of domain." + ) else: return np.nan, np.nan try: @@ -221,8 +243,8 @@ def find_closest_model_point( lons = [model_lons[j_list[n], i_list[n]] for n in range(len(j_list))] lats = [model_lats[j_list[n], i_list[n]] for n in range(len(j_list))] dists = haversine( - np.array([lon] * i_list.size), np.array([lat] * j_list.size), - lons, lats) + np.array([lon] * i_list.size), np.array([lat] * j_list.size), lons, lats + ) n = dists.argmin() j, i = j_list.item(n), i_list.item(n) @@ -233,28 +255,37 @@ def find_closest_model_point( try: if checkTol: j2, i2 = _spiral_search_for_closest_water_point( - j, i, land_mask, lon, lat, model_lons, model_lats) - if (np.abs(model_lons[j2, i2] - lon) > tols[grid]['tol_lon']) or \ - (np.abs(model_lats[j2, i2] - lat) > tols[grid]['tol_lat']): + j, i, land_mask, lon, lat, model_lons, model_lats + ) + if (np.abs(model_lons[j2, i2] - lon) > tols[grid]["tol_lon"]) or ( + np.abs(model_lats[j2, i2] - lat) > tols[grid]["tol_lat"] + ): return np.nan, np.nan else: return j2, i2 else: return _spiral_search_for_closest_water_point( - j, i, land_mask, lon, lat, model_lons, model_lats) + j, i, land_mask, lon, lat, model_lons, model_lats + ) except ValueError: if raiseOutOfBounds: - raise ValueError( - 'lat/lon on land and no nearby water point found') + raise ValueError("lat/lon on land and no nearby water point found") else: return np.nan, np.nan -def closestPointArray(lons,lats, - model_lons, model_lats, tol2=1, grid='NEMO', land_mask=None, + +def closestPointArray( + lons, + lats, + model_lons, + model_lats, + tol2=1, + grid="NEMO", + land_mask=None, tols={ - 'NEMO': {'tol_lon': 0.0104, 'tol_lat': 0.00388}, - 'GEM2.5': {'tol_lon': 0.016, 'tol_lat': 0.012}, - } + "NEMO": {"tol_lon": 0.0104, "tol_lat": 0.00388}, + "GEM2.5": {"tol_lon": 0.016, "tol_lat": 0.012}, + }, ): """Wrapper on find_closest_model_point that is faster if you have many points to locate AND you expect the points to be ordered such that each point is likely close to the point ahead @@ -272,31 +303,42 @@ def closestPointArray(lons,lats, :returns: yinds, xinds: numpy arrays of same shape as input lons """ - tol2=int(tol2) # ensure integer indices - mj,mi=np.shape(model_lons) - outi=np.nan*np.ones(np.shape(lons)) - outj=np.nan*np.ones(np.shape(lons)) - ilast=np.nan - jlast=np.nan - for kk in range(0,len(lons)): + tol2 = int(tol2) # ensure integer indices + mj, mi = np.shape(model_lons) + outi = np.nan * np.ones(np.shape(lons)) + outj = np.nan * np.ones(np.shape(lons)) + ilast = np.nan + jlast = np.nan + for kk in range(0, len(lons)): if not np.isnan(ilast): - jjs=max(0,jlast-tol2-1) - jje=min(mj,jlast+1+tol2+1) - iis=max(0,ilast-tol2-1) - iie=min(mi,ilast+1+tol2+1) - jj,ii=find_closest_model_point(lons[kk],lats[kk], - model_lons[jjs:jje,iis:iie], - model_lats[jjs:jje,iis:iie], - land_mask=land_mask if land_mask is None else land_mask[jjs:jje,iis:iie]) - if np.isnan(jj) or jj==0 or jj==(jje-1) or ii==0 or ii==(iie-1): # if not found in expected grid swath or on edge - jj,ii=find_closest_model_point(lons[kk],lats[kk],model_lons,model_lats,land_mask=land_mask) + jjs = max(0, jlast - tol2 - 1) + jje = min(mj, jlast + 1 + tol2 + 1) + iis = max(0, ilast - tol2 - 1) + iie = min(mi, ilast + 1 + tol2 + 1) + jj, ii = find_closest_model_point( + lons[kk], + lats[kk], + model_lons[jjs:jje, iis:iie], + model_lats[jjs:jje, iis:iie], + land_mask=( + land_mask if land_mask is None else land_mask[jjs:jje, iis:iie] + ), + ) + if ( + np.isnan(jj) or jj == 0 or jj == (jje - 1) or ii == 0 or ii == (iie - 1) + ): # if not found in expected grid swath or on edge + jj, ii = find_closest_model_point( + lons[kk], lats[kk], model_lons, model_lats, land_mask=land_mask + ) else: - jj=jj+jjs - ii=ii+iis + jj = jj + jjs + ii = ii + iis else: - jj,ii=find_closest_model_point(lons[kk],lats[kk],model_lons,model_lats,land_mask=land_mask) - jlast=np.nan if np.isnan(jj) else int(jj) - ilast=np.nan if np.isnan(ii) else int(ii) - outj[kk]=jlast - outi[kk]=ilast + jj, ii = find_closest_model_point( + lons[kk], lats[kk], model_lons, model_lats, land_mask=land_mask + ) + jlast = np.nan if np.isnan(jj) else int(jj) + ilast = np.nan if np.isnan(ii) else int(ii) + outj[kk] = jlast + outi[kk] = ilast return outj, outi diff --git a/SalishSeaTools/salishsea_tools/grid_tools.py b/SalishSeaTools/salishsea_tools/grid_tools.py index 75276835..cbe4451f 100644 --- a/SalishSeaTools/salishsea_tools/grid_tools.py +++ b/SalishSeaTools/salishsea_tools/grid_tools.py @@ -32,10 +32,14 @@ import scipy.sparse as sp __all__ = [ - 'calculate_H', - 'calculate_adjustment_factor', 'calculate_time_dependent_grid', - 'time_dependent_grid_U', 'time_dependent_grid_V', 'build_GEM_mask', - 'build_matrix', 'use_matrix', + "calculate_H", + "calculate_adjustment_factor", + "calculate_time_dependent_grid", + "time_dependent_grid_U", + "time_dependent_grid_V", + "build_GEM_mask", + "build_matrix", + "use_matrix", ] @@ -53,7 +57,7 @@ def calculate_H(e3t0, tmask): """ - H = np.sum(e3t0*tmask, axis=0) + H = np.sum(e3t0 * tmask, axis=0) return H @@ -70,10 +74,10 @@ def calculate_adjustment_factor(H, ssh): :returns: the adjustment factor with dimensions (time, y, x) """ - with np.errstate(divide='ignore', invalid='ignore'): + with np.errstate(divide="ignore", invalid="ignore"): one_over_H = 1 / H one_over_H = np.nan_to_num(one_over_H) - adj = (1 + ssh * one_over_H) + adj = 1 + ssh * one_over_H return adj @@ -83,7 +87,7 @@ def calculate_time_dependent_grid( ssh, input_vars, ): - """ Calculate the time dependent vertical grids and scale factors for + """Calculate the time dependent vertical grids and scale factors for variable volume in NEMO. :arg e3t0: initial vertical scale factors on T-grid. @@ -115,14 +119,15 @@ def calculate_time_dependent_grid( # Time-dependent grids return_vars = {} for key in input_vars: - return_key = '{}t'.format(key[0:-1]) + return_key = "{}t".format(key[0:-1]) return_vars[return_key] = input_vars[key] * adj return return_vars -def time_dependent_grid_U(e3u0, e1u, e2u, e1t, e2t, umask, ssh, input_vars, - return_ssh=False): +def time_dependent_grid_U( + e3u0, e1u, e2u, e1t, e2t, umask, ssh, input_vars, return_ssh=False +): """Calculate time-dependent vertical grid spacing and depths on U-grid for variable volume in NEMO. @@ -171,24 +176,28 @@ def time_dependent_grid_U(e3u0, e1u, e2u, e1t, e2t, umask, ssh, input_vars, e1e2u = e1u * e2u e1e2t = e1t * e2t # Interpolate ssh to u grid - ssh_u[:, :, 0:-1] = 0.5 * umask[0, :, 0:-1] / e1e2u[:, 0:-1] * ( - e1e2t[:, 0:-1] * ssh[:, :, 0:-1] + e1e2t[:, 1:] * ssh[:, :, 1:] - ) + ssh_u[:, :, 0:-1] = ( + 0.5 + * umask[0, :, 0:-1] + / e1e2u[:, 0:-1] + * (e1e2t[:, 0:-1] * ssh[:, :, 0:-1] + e1e2t[:, 1:] * ssh[:, :, 1:]) + ) H = calculate_H(e3u0, umask) adj = calculate_adjustment_factor(H, ssh_u) adj = np.expand_dims(adj, axis=1) # Time-dependent grids return_vars = {} for key in input_vars: - return_key = '{}t'.format(key[0:-1]) + return_key = "{}t".format(key[0:-1]) return_vars[return_key] = input_vars[key] * adj if return_ssh: - return_vars['ssh_u'] = ssh_u + return_vars["ssh_u"] = ssh_u return return_vars -def time_dependent_grid_V(e3v0, e1v, e2v, e1t, e2t, vmask, ssh, input_vars, - return_ssh=False): +def time_dependent_grid_V( + e3v0, e1v, e2v, e1t, e2t, vmask, ssh, input_vars, return_ssh=False +): """Calculate time-dependent vertical grid spacing and depths on V-grid for variable volume in NEMO. @@ -237,40 +246,44 @@ def time_dependent_grid_V(e3v0, e1v, e2v, e1t, e2t, vmask, ssh, input_vars, e1e2v = e1v * e2v e1e2t = e1t * e2t # Interpolate ssh to V-grid - ssh_v[:, 0:-1, :] = 0.5 * vmask[0, 0:-1, :] / e1e2v[0:-1, :] * ( - e1e2t[0:-1, :] * ssh[:, 0:-1, :] + - e1e2t[1:, :] * ssh[:, 1:, :] - ) + ssh_v[:, 0:-1, :] = ( + 0.5 + * vmask[0, 0:-1, :] + / e1e2v[0:-1, :] + * (e1e2t[0:-1, :] * ssh[:, 0:-1, :] + e1e2t[1:, :] * ssh[:, 1:, :]) + ) H = calculate_H(e3v0, vmask) adj = calculate_adjustment_factor(H, ssh_v) adj = np.expand_dims(adj, axis=1) # Time-dependent grids return_vars = {} for key in input_vars: - return_key = '{}t'.format(key[0:-1]) + return_key = "{}t".format(key[0:-1]) return_vars[return_key] = input_vars[key] * adj if return_ssh: - return_vars['ssh_v'] = ssh_v + return_vars["ssh_v"] = ssh_v return return_vars def build_GEM_mask(grid_GEM, grid_NEMO, mask_NEMO): - """ - """ + """ """ # Evaluate each point on GEM grid mask_GEM = [] - msg = 'Building HRDPS mask' + msg = "Building HRDPS mask" for lon, lat in zip( - tqdm(grid_GEM['longitude'].values.flatten() - 360, desc=msg), - grid_GEM['latitude'].values.flatten(), + tqdm(grid_GEM["longitude"].values.flatten() - 360, desc=msg), + grid_GEM["latitude"].values.flatten(), ): # Find closest NEMO ji point try: j, i = geo_tools.find_closest_model_point( - lon, lat, grid_NEMO['longitude'], grid_NEMO['latitude'], + lon, + lat, + grid_NEMO["longitude"], + grid_NEMO["latitude"], ) except ValueError: j, i = np.nan, np.nan @@ -282,7 +295,7 @@ def build_GEM_mask(grid_GEM, grid_NEMO, mask_NEMO): mask_GEM.append(mask_NEMO[j, i].values) # Reshape - mask_GEM = np.array(mask_GEM, dtype='int').reshape(grid_GEM['longitude'].shape) + mask_GEM = np.array(mask_GEM, dtype="int").reshape(grid_GEM["longitude"].shape) return mask_GEM @@ -305,18 +318,23 @@ def build_matrix(weightsfile, opsfile): """ # Weights with nc.Dataset(weightsfile) as f: - s1 = f.variables['src01'][:]-1 # -1 for fortran-to-python indexing - s2 = f.variables['src02'][:]-1 - s3 = f.variables['src03'][:]-1 - s4 = f.variables['src04'][:]-1 - w1 = f.variables['wgt01'][:] - w2 = f.variables['wgt02'][:] - w3 = f.variables['wgt03'][:] - w4 = f.variables['wgt04'][:] + s1 = f.variables["src01"][:] - 1 # -1 for fortran-to-python indexing + s2 = f.variables["src02"][:] - 1 + s3 = f.variables["src03"][:] - 1 + s4 = f.variables["src04"][:] - 1 + w1 = f.variables["wgt01"][:] + w2 = f.variables["wgt02"][:] + w3 = f.variables["wgt03"][:] + w4 = f.variables["wgt04"][:] with nc.Dataset(opsfile) as f: - NO = f.dimensions['x'].size * f.dimensions['y'].size # number of operational grid points - NN, nemoshape = s1.size, s1.shape # number of NEMO grid points and shape of NEMO matrix + NO = ( + f.dimensions["x"].size * f.dimensions["y"].size + ) # number of operational grid points + NN, nemoshape = ( + s1.size, + s1.shape, + ) # number of NEMO grid points and shape of NEMO matrix # Build matrix n = np.array([x for x in range(0, NN)]) @@ -324,8 +342,8 @@ def build_matrix(weightsfile, opsfile): M2 = sp.csr_matrix((w2.flatten(), (n, s2.flatten())), (NN, NO)) M3 = sp.csr_matrix((w3.flatten(), (n, s3.flatten())), (NN, NO)) M4 = sp.csr_matrix((w4.flatten(), (n, s4.flatten())), (NN, NO)) - M = M1+M2+M3+M4 - return M,nemoshape + M = M1 + M2 + M3 + M4 + return M, nemoshape def use_matrix(opsfile, matrix, nemoshape, variable, time): @@ -354,10 +372,10 @@ def use_matrix(opsfile, matrix, nemoshape, variable, time): :rtype: :class:`~numpy:numpy.ndarray` """ with nc.Dataset(opsfile) as f: - odata = f.variables[variable][time, ...] # Load the 2D field + odata = f.variables[variable][time, ...] # Load the 2D field # Interpolate by matrix multiply - quite fast - ndata = matrix*odata.flatten() + ndata = matrix * odata.flatten() # Reshape to NEMO shaped array ndata = ndata.reshape(nemoshape) diff --git a/SalishSeaTools/salishsea_tools/gsw_calls.py b/SalishSeaTools/salishsea_tools/gsw_calls.py index 49cd7366..d12b0a90 100644 --- a/SalishSeaTools/salishsea_tools/gsw_calls.py +++ b/SalishSeaTools/salishsea_tools/gsw_calls.py @@ -15,10 +15,11 @@ def generic_gsw_caller( - gsw_function_name, input_vars, - matlab_gsw_dir='/ocean/rich/home/matlab/gsw3', + gsw_function_name, + input_vars, + matlab_gsw_dir="/ocean/rich/home/matlab/gsw3", ): - """ A generic function for calling matlab gsw functions. Only works with + """A generic function for calling matlab gsw functions. Only works with gsw functions that have a single variable as output. :arg str gsw_function_name: The name of the matlab gsw function. @@ -34,29 +35,30 @@ def generic_gsw_caller( # save inputs to a file for reading into matlab tmp_files = [] for count, var_data in enumerate(input_vars): - tmp_fname = 'input{}'.format(count) + tmp_fname = "input{}".format(count) tmp_files.append(tmp_fname) - np.savetxt(tmp_fname, var_data.flatten(), delimiter=',') + np.savetxt(tmp_fname, var_data.flatten(), delimiter=",") shape = input_vars[0].shape # create matlab wrapper gsw_function_name = ( - gsw_function_name if gsw_function_name.endswith('.m') - else '{}.m'.format(gsw_function_name)) - output = 'output_file' - matlab_wrapper_name = _create_matlab_wrapper(gsw_function_name, - output, - tmp_files, - matlab_gsw_dir) + gsw_function_name + if gsw_function_name.endswith(".m") + else "{}.m".format(gsw_function_name) + ) + output = "output_file" + matlab_wrapper_name = _create_matlab_wrapper( + gsw_function_name, output, tmp_files, matlab_gsw_dir + ) # create string of input arguments arg_strings = "('{}'".format(output) for tmp_fname in tmp_files: arg_strings += ",'{}'".format(tmp_fname) - arg_strings += ');exit' + arg_strings += ");exit" # create string for calling matlab - functioncall = '{}{}'.format(matlab_wrapper_name[:-2], arg_strings) + functioncall = "{}{}".format(matlab_wrapper_name[:-2], arg_strings) _run_matlab(functioncall) # load output from matlab - output_data = np.loadtxt(output, delimiter=',') + output_data = np.loadtxt(output, delimiter=",") # remove tmp files for f in tmp_files: os.remove(f) @@ -66,41 +68,42 @@ def generic_gsw_caller( def _create_matlab_wrapper( - gsw_function_name, outfile, input_files, matlab_gsw_dir, + gsw_function_name, + outfile, + input_files, + matlab_gsw_dir, ): # Create a matlab wrapper file - wrapper_file_name = 'mw_{}'.format(gsw_function_name) - f = open(wrapper_file_name, 'w') - header = 'function [] = {}({},'.format(wrapper_file_name[:-2], outfile) + wrapper_file_name = "mw_{}".format(gsw_function_name) + f = open(wrapper_file_name, "w") + header = "function [] = {}({},".format(wrapper_file_name[:-2], outfile) for input_file in input_files: header += "{},".format(input_file) - header = header[:-1] + ')\n' + header = header[:-1] + ")\n" f.write(header) # Add directories to matlab path - f.write('addpath {}\n'.format(matlab_gsw_dir)) - for subdir in ['html', 'library', 'thermodynamics_from_t', 'pdf']: - f.write('addpath {}\n'.format(os.path.join(matlab_gsw_dir, subdir))) + f.write("addpath {}\n".format(matlab_gsw_dir)) + for subdir in ["html", "library", "thermodynamics_from_t", "pdf"]: + f.write("addpath {}\n".format(os.path.join(matlab_gsw_dir, subdir))) # reading input files - input_args = '' + input_args = "" for count, input_file in enumerate(input_files): f.write("in{} = dlmread({},',');\n".format(count, input_file)) - input_args += 'in{},'.format(count) + input_args += "in{},".format(count) # call matlab gsw function - f.write('y = {}({});\n'.format(gsw_function_name[:-2], input_args[:-1])) + f.write("y = {}({});\n".format(gsw_function_name[:-2], input_args[:-1])) f.write("dlmwrite({},y,',');\n".format(outfile)) return wrapper_file_name def _run_matlab(functioncall): - cmd = shlex.split('matlab -nosplash -nodesktop -nodisplay -nojvm -r') + cmd = shlex.split("matlab -nosplash -nodesktop -nodisplay -nojvm -r") cmd.append(functioncall) - logger.debug('executing {}'.format(cmd)) + logger.debug("executing {}".format(cmd)) try: - cmd_output = sp.check_output( - cmd, stderr=sp.STDOUT, universal_newlines=True) + cmd_output = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True) except sp.CalledProcessError as e: - logger.error( - 'matlab command failed with return code {.returncode}'.format(e)) + logger.error("matlab command failed with return code {.returncode}".format(e)) cmd_output = e.output finally: for line in cmd_output.splitlines(): @@ -114,14 +117,12 @@ def _call_p_from_z(z, lat): zfile = "'zfile'" latfile = "'latfile'" for f, var in zip([zfile, latfile], [z, lat]): - np.savetxt(f[1:-1], var.flatten(), delimiter=',') + np.savetxt(f[1:-1], var.flatten(), delimiter=",") shape = z.shape - functioncall = 'mw_gsw_p_from_z({},{},{});exit'.format(fname, - zfile, - latfile) + functioncall = "mw_gsw_p_from_z({},{},{});exit".format(fname, zfile, latfile) _run_matlab(functioncall) - pressure = np.loadtxt(fname[1:-1], delimiter=',') + pressure = np.loadtxt(fname[1:-1], delimiter=",") for f in [fname, zfile, latfile]: os.remove(f[1:-1]) return pressure.reshape(shape) @@ -130,15 +131,27 @@ def _call_p_from_z(z, lat): def _call_SR_from_SP(SP): fname = "'SRout'" SPfile = "'SPfile'" - for f, var in zip([SPfile, ], [SP, ]): - np.savetxt(f[1:-1], var.flatten(), delimiter=',') + for f, var in zip( + [ + SPfile, + ], + [ + SP, + ], + ): + np.savetxt(f[1:-1], var.flatten(), delimiter=",") shape = SP.shape - functioncall = 'mw_gsw_SR_from_SP({},{});exit'.format(fname, - SPfile,) + functioncall = "mw_gsw_SR_from_SP({},{});exit".format( + fname, + SPfile, + ) _run_matlab(functioncall) - sal_ref = np.loadtxt(fname[1:-1], delimiter=',') - for f in [fname, SPfile, ]: + sal_ref = np.loadtxt(fname[1:-1], delimiter=",") + for f in [ + fname, + SPfile, + ]: os.remove(f[1:-1]) return sal_ref.reshape(shape) @@ -149,21 +162,17 @@ def _call_SA_from_SP(SP, p, long, lat): pfile = "'pfile'" longfile = "'longfile'" latfile = "'latfile'" - for f, var in zip([SPfile, pfile, longfile, latfile], - [SP, p, long, lat]): - np.savetxt(f[1:-1], var.flatten(), delimiter=',') + for f, var in zip([SPfile, pfile, longfile, latfile], [SP, p, long, lat]): + np.savetxt(f[1:-1], var.flatten(), delimiter=",") shape = SP.shape - functioncall = 'mw_gsw_SA_from_SP({},{},{},{},{});exit'.format(fname, - SPfile, - pfile, - longfile, - latfile) + functioncall = "mw_gsw_SA_from_SP({},{},{},{},{});exit".format( + fname, SPfile, pfile, longfile, latfile + ) _run_matlab(functioncall) - SA = np.loadtxt(fname[1:-1], delimiter=',') + SA = np.loadtxt(fname[1:-1], delimiter=",") - for f in [fname, SPfile, pfile, - longfile, latfile]: + for f in [fname, SPfile, pfile, longfile, latfile]: os.remove(f[1:-1]) return SA.reshape(shape) @@ -173,16 +182,13 @@ def _call_CT_from_PT(SA, PT): fname = "'CTout'" SAfile = "'SAfile'" PTfile = "'PTfile'" - for f, var in zip([SAfile, PTfile], - [SA, PT]): - np.savetxt(f[1:-1], var.flatten(), delimiter=',') + for f, var in zip([SAfile, PTfile], [SA, PT]): + np.savetxt(f[1:-1], var.flatten(), delimiter=",") shape = PT.shape - functioncall = 'mw_gsw_CT_from_pt({},{},{});exit'.format(fname, - SAfile, - PTfile) + functioncall = "mw_gsw_CT_from_pt({},{},{});exit".format(fname, SAfile, PTfile) _run_matlab(functioncall) - CT = np.loadtxt(fname[1:-1], delimiter=',') + CT = np.loadtxt(fname[1:-1], delimiter=",") for f in [fname, SAfile, PTfile]: os.remove(f[1:-1]) diff --git a/SalishSeaTools/salishsea_tools/hg_commands.py b/SalishSeaTools/salishsea_tools/hg_commands.py index 8927fc16..e864ed83 100644 --- a/SalishSeaTools/salishsea_tools/hg_commands.py +++ b/SalishSeaTools/salishsea_tools/hg_commands.py @@ -3,7 +3,9 @@ This is a utility library that is used by other Python packages and modules developed for the Salish Sea MEOPAR project. """ + from __future__ import absolute_import + """ Copyright 2013-2021 The Salish Sea MEOPAR contributors and The University of British Columbia @@ -24,10 +26,10 @@ __all__ = [ - 'commit', - 'default_url', - 'heads', - 'parents', + "commit", + "default_url", + "heads", + "parents", ] @@ -38,7 +40,7 @@ def commit(logfile): :arg logfile: Name of the file containing the commit message. :type logfile: str """ - cmd = ['hg', 'commit', '--logfile', logfile] + cmd = ["hg", "commit", "--logfile", logfile] subprocess.check_call(cmd) @@ -56,18 +58,17 @@ def default_url(repo=None): :returns: Output of the command or :py:obj:`None`. """ - cmd = ['hg'] + cmd = ["hg"] if repo is not None: - cmd.extend(['-R', repo]) - cmd.extend(['paths', 'default']) + cmd.extend(["-R", repo]) + cmd.extend(["paths", "default"]) try: - return subprocess.check_output( - cmd, universal_newlines=True).strip() + return subprocess.check_output(cmd, universal_newlines=True).strip() except subprocess.CalledProcessError: return None -def heads(repo, revs=['.']): +def heads(repo, revs=["."]): """Return the result of the :command:`hg -R repo heads revs` command. :arg repo: Repository root directory. @@ -81,7 +82,7 @@ def heads(repo, revs=['.']): :returns: Output of the command. :rtype: str """ - cmd = ['hg', '-R', repo, 'heads'] + revs + cmd = ["hg", "-R", repo, "heads"] + revs return subprocess.check_output(cmd, universal_newlines=True) @@ -121,13 +122,13 @@ def parents(repo=None, rev=None, file=None, verbose=False): :returns: Output of the command. :rtype: str """ - cmd = ['hg', 'parents'] + cmd = ["hg", "parents"] if repo is not None: - cmd.extend(['-R', repo]) + cmd.extend(["-R", repo]) if rev is not None: - cmd.extend(['-r', rev]) + cmd.extend(["-r", rev]) if file is not None: cmd.append(file) if verbose: - cmd.append('-v') + cmd.append("-v") return subprocess.check_output(cmd, universal_newlines=True) diff --git a/SalishSeaTools/salishsea_tools/loadDataFRP.py b/SalishSeaTools/salishsea_tools/loadDataFRP.py index 91b1b4be..5ed3c261 100644 --- a/SalishSeaTools/salishsea_tools/loadDataFRP.py +++ b/SalishSeaTools/salishsea_tools/loadDataFRP.py @@ -9,100 +9,110 @@ import netCDF4 as nc import gsw -# list CTD cnv files associated with cast numbers -cnvlist19={1:'fraser2017101.cnv', - 2:'fraser2017102.cnv', - 3:'fraser2017103.cnv', - 4:'fraser2017104.cnv', - 5:'fraser2017105.cnv', - 6:'fraser2017106.cnv', - 7:'fraser2017107.cnv', - 8:'fraser2017108.cnv', - 9:'fraser2017109.cnv', - 10:'fraser2017110.cnv', - 11:'fraser2017111.cnv', - 12:'fraser2017112.cnv', - 13:'fraser2017113.cnv', - 14.1:'fraser2017114.cnv', - 14.2:'fraser2017114.cnv', - 15:'fraser2017115.cnv', - 16:'fraser2017116.cnv', - 17:'fraser2017117.cnv', - 18:'fraser2017118.cnv', - 19:'fraser2017119.cnv', - 20:'fraser2017120.cnv', - 21:'fraser2017121.cnv', - 22:'fraser2017122.cnv', - 23:'fraser2017123.cnv', - 24:'fraser2017124.cnv'} - -cnvlist25={1:'fraser2017001.cnv', - 2:'fraser2017002.cnv', - 3:'fraser2017003.cnv', - 4:'fraser2017004.cnv', - 5:'fraser2017005.cnv', - 6:'fraser2017006.cnv', - 7:'fraser2017007.cnv', - 8:'fraser2017008.cnv', - 9:'fraser2017009.cnv', - 10:'fraser2017010.cnv', - 11:'fraser2017011.cnv', - 12:'fraser2017012.cnv', - 13:'fraser2017013.cnv', - 14.1:'fraser2017014.cnv', - 14.2:'fraser2017014.cnv', - 15:'fraser2017015.cnv', - 16:'fraser2017016.cnv', - 17:'fraser2017017.cnv', - 18:'fraser2017018.cnv', - 19:'fraser2017019.cnv', - 20:'fraser2017020.cnv', - 21:'fraser2017021.cnv', - 22:'fraser2017022.cnv', - 23:'fraser2017023.cnv', - 24:'fraser2017024.cnv'} +# list CTD cnv files associated with cast numbers +cnvlist19 = { + 1: "fraser2017101.cnv", + 2: "fraser2017102.cnv", + 3: "fraser2017103.cnv", + 4: "fraser2017104.cnv", + 5: "fraser2017105.cnv", + 6: "fraser2017106.cnv", + 7: "fraser2017107.cnv", + 8: "fraser2017108.cnv", + 9: "fraser2017109.cnv", + 10: "fraser2017110.cnv", + 11: "fraser2017111.cnv", + 12: "fraser2017112.cnv", + 13: "fraser2017113.cnv", + 14.1: "fraser2017114.cnv", + 14.2: "fraser2017114.cnv", + 15: "fraser2017115.cnv", + 16: "fraser2017116.cnv", + 17: "fraser2017117.cnv", + 18: "fraser2017118.cnv", + 19: "fraser2017119.cnv", + 20: "fraser2017120.cnv", + 21: "fraser2017121.cnv", + 22: "fraser2017122.cnv", + 23: "fraser2017123.cnv", + 24: "fraser2017124.cnv", +} + +cnvlist25 = { + 1: "fraser2017001.cnv", + 2: "fraser2017002.cnv", + 3: "fraser2017003.cnv", + 4: "fraser2017004.cnv", + 5: "fraser2017005.cnv", + 6: "fraser2017006.cnv", + 7: "fraser2017007.cnv", + 8: "fraser2017008.cnv", + 9: "fraser2017009.cnv", + 10: "fraser2017010.cnv", + 11: "fraser2017011.cnv", + 12: "fraser2017012.cnv", + 13: "fraser2017013.cnv", + 14.1: "fraser2017014.cnv", + 14.2: "fraser2017014.cnv", + 15: "fraser2017015.cnv", + 16: "fraser2017016.cnv", + 17: "fraser2017017.cnv", + 18: "fraser2017018.cnv", + 19: "fraser2017019.cnv", + 20: "fraser2017020.cnv", + 21: "fraser2017021.cnv", + 22: "fraser2017022.cnv", + 23: "fraser2017023.cnv", + 24: "fraser2017024.cnv", +} + class Cast: - def __init__(self,fpath): - mSta,mLat,mLon,df=readcnv(fpath) - self.sta=mSta - self.lat=mLat - self.lon=mLon - self.df=df - self.source=fpath + def __init__(self, fpath): + mSta, mLat, mLon, df = readcnv(fpath) + self.sta = mSta + self.lat = mLat + self.lon = mLon + self.df = df + self.source = fpath + class zCast: - def __init__(self,updf,downdf): - self.uCast=updf - self.dCast=downdf + def __init__(self, updf, downdf): + self.uCast = updf + self.dCast = downdf + class rawCast: def __init__(self): - self.uCast=dict() - self.dCast=dict() + self.uCast = dict() + self.dCast = dict() + class dataPair: - def __init__(self,zval,varval): - self.z=zval - self.val=varval + def __init__(self, zval, varval): + self.z = zval + self.val = varval + def fmtVarName(strx): - """ transform string into one that meets python naming conventions""" - vName=re.sub('[^a-zA-Z0-9_\-\s/]','',strx.strip()) - vName=re.sub('[\s/]','_',vName) - vName=re.sub('-','_',vName) - if re.match('[0-9]',vName): - vName='_'+vName + """transform string into one that meets python naming conventions""" + vName = re.sub("[^a-zA-Z0-9_\-\s/]", "", strx.strip()) + vName = re.sub("[\s/]", "_", vName) + vName = re.sub("-", "_", vName) + if re.match("[0-9]", vName): + vName = "_" + vName return vName -#def rolling_window(a, window): + +# def rolling_window(a, window): # # source: https://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html # # use example: np.mean(rolling_window(x, 3), -1) # shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) # strides = a.strides + (a.strides[-1],) # return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) # -#def rolling_window_padded(a,window): +# def rolling_window_padded(a,window): # # extend rolling window to be same lenth as input array by duplicating first and last values # # even values not symmetric # test=rolling_window(a,window) @@ -114,211 +124,244 @@ def fmtVarName(strx): # window+=-1 # return test -def slidingWindowEval(x,func,window,axis=0): + +def slidingWindowEval(x, func, window, axis=0): # x is input array # func is function to carry out over window # window is window size # axis is axis to act along, in case of multiple # if window is even, results will be shifted left by 1/2 unit - x1=np.lib.stride_tricks.sliding_window_view(x, window, axis) - b=func(x1,-1) + x1 = np.lib.stride_tricks.sliding_window_view(x, window, axis) + b = func(x1, -1) # the rest of the code pads the front and back to return an array of the same shape as the original - nfront=np.floor((window-1)/2) - nback=np.floor((window-1)/2)+(window-1)%2 - inxf=[slice(None)]*np.ndim(b) - inxf[axis]=slice(0,1,1) - inxb=[slice(None)]*np.ndim(b) - inxb[axis]=slice(np.shape(b)[axis]-1,np.shape(b)[axis],1) - repsf=np.ones(np.ndim(b),dtype=int) - repsf[axis]=int(nfront) - repsb=np.ones(np.ndim(b),dtype=int) - repsb[axis]=int(nback) - x2=np.concatenate((np.tile(b[tuple(inxf)],repsf),b,np.tile(b[tuple(inxb)],repsb)),axis=axis) + nfront = np.floor((window - 1) / 2) + nback = np.floor((window - 1) / 2) + (window - 1) % 2 + inxf = [slice(None)] * np.ndim(b) + inxf[axis] = slice(0, 1, 1) + inxb = [slice(None)] * np.ndim(b) + inxb[axis] = slice(np.shape(b)[axis] - 1, np.shape(b)[axis], 1) + repsf = np.ones(np.ndim(b), dtype=int) + repsf[axis] = int(nfront) + repsb = np.ones(np.ndim(b), dtype=int) + repsb[axis] = int(nback) + x2 = np.concatenate( + (np.tile(b[tuple(inxf)], repsf), b, np.tile(b[tuple(inxb)], repsb)), axis=axis + ) return x2 -def amp(var,dim=0): - return np.nanmax(var,dim)-np.nanmin(var,dim) +def amp(var, dim=0): + return np.nanmax(var, dim) - np.nanmin(var, dim) + def turbQC(x): # turbidity sensor produced erroneous zero readings interspersed with real data when too close to surface # remove suspect values from analysis # - median filter alone was not enough - # remove a point if the max-min of the surrounding 5 point window - # is greater than 1/3 the maximum turbidity value of the cast + # remove a point if the max-min of the surrounding 5 point window + # is greater than 1/3 the maximum turbidity value of the cast # (remove data within 5 points of a large jump) - #ii1=amp(rolling_window_padded(x,5),-1)>.33*np.nanmax(x) - ii1=slidingWindowEval(x,amp,5)>.33*np.nanmax(x) # was .5 + # ii1=amp(rolling_window_padded(x,5),-1)>.33*np.nanmax(x) + ii1 = slidingWindowEval(x, amp, 5) > 0.33 * np.nanmax(x) # was .5 # remove data within 5 points of a near-zero turbidity value - #ii2=np.nanmin(rolling_window_padded(x,5),-1)<.3 - ii2=slidingWindowEval(x,np.nanmin,5)<.3 - y=np.copy(x) - y[np.logical_or(ii1,ii2,)]=np.nan - y=ssig.medfilt(y,3) + # ii2=np.nanmin(rolling_window_padded(x,5),-1)<.3 + ii2 = slidingWindowEval(x, np.nanmin, 5) < 0.3 + y = np.copy(x) + y[ + np.logical_or( + ii1, + ii2, + ) + ] = np.nan + y = ssig.medfilt(y, 3) return y + def readcnv(fpath): - alphnumlist=list(string.ascii_letters)+list(string.digits) + alphnumlist = list(string.ascii_letters) + list(string.digits) # define regexes for reading headers: - reSta=re.compile('(?<=\*\*\sStation:)\s?([0-9])+\s?') # assumes numeric station identifiers - reLat=re.compile('(?<=\*\*\sLatitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([NS])') - reLon=re.compile('(?<=\*\*\sLongitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([EW])') + reSta = re.compile( + "(?<=\*\*\sStation:)\s?([0-9])+\s?" + ) # assumes numeric station identifiers + reLat = re.compile("(?<=\*\*\sLatitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([NS])") + reLon = re.compile("(?<=\*\*\sLongitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([EW])") # start_time = May 08 2002 09:39:10 - reST=re.compile('(?<=\#\sstart_time\s=).*') - #reTZ=re.compile('(?<=\*\*\s...\s\(Time\)\s=).*') - #reCr=re.compile('(?<=\*\*\sCruise:).*') - reNam=re.compile('(?<=\#\sname\s)([0-9]+)\s=\s(.*)\:\s?(.*)\s?') - + reST = re.compile("(?<=\#\sstart_time\s=).*") + # reTZ=re.compile('(?<=\*\*\s...\s\(Time\)\s=).*') + # reCr=re.compile('(?<=\*\*\sCruise:).*') + reNam = re.compile("(?<=\#\sname\s)([0-9]+)\s=\s(.*)\:\s?(.*)\s?") + # define regex for finding searching: - spStart=re.compile('^\s*[0-9]') # starts with space characters followed by digit - - headers=list() - #lineno=0 - mSta=None - mLat=None - mLon=None - with open(fpath, 'rt', encoding="ISO-8859-1") as f: + spStart = re.compile("^\s*[0-9]") # starts with space characters followed by digit + + headers = list() + # lineno=0 + mSta = None + mLat = None + mLon = None + with open(fpath, "rt", encoding="ISO-8859-1") as f: for fline in f: - if fline.startswith('**'): + if fline.startswith("**"): if reSta.search(fline): - mSta=reSta.search(fline).groups() + mSta = reSta.search(fline).groups() if reLat.search(fline): - mLat=reLat.search(fline).groups() + mLat = reLat.search(fline).groups() if reLon.search(fline): - mLon=reLon.search(fline).groups() + mLon = reLon.search(fline).groups() elif reNam.search(fline): headers.append(fmtVarName(reNam.search(fline).groups(1)[1])) - elif fline.startswith('*END*'): + elif fline.startswith("*END*"): break - #lineno+=1 - #still in with file open - df=pd.read_csv(f,delim_whitespace=True,names=headers) + # lineno+=1 + # still in with file open + df = pd.read_csv(f, delim_whitespace=True, names=headers) # file closed - - return mSta,mLat,mLon,df -def bindepth(inP,inV,edges,targets=[],prebin=False): + return mSta, mLat, mLon, df + + +def bindepth(inP, inV, edges, targets=[], prebin=False): # calculate depth-associated variables # 1st calculate bin averages of depth and variable # then use np interp to estimate at-grid-point values # edges must be monotonically increasing - if prebin==True: - newP,newV=bindepth(inP,inV,np.arange(edges[0],edges[-1],.05),prebin=False) - inP=newP - inV=newV - inP=inP[~np.isnan(inV)] - inV=inV[~np.isnan(inV)] - binned=np.digitize(inP,edges) - Pa=np.empty(len(edges)-1) - Va=np.empty(len(edges)-1) + if prebin == True: + newP, newV = bindepth( + inP, inV, np.arange(edges[0], edges[-1], 0.05), prebin=False + ) + inP = newP + inV = newV + inP = inP[~np.isnan(inV)] + inV = inV[~np.isnan(inV)] + binned = np.digitize(inP, edges) + Pa = np.empty(len(edges) - 1) + Va = np.empty(len(edges) - 1) if len(targets) == 0: - Pi=.5*(edges[:-1]+edges[1:]) + Pi = 0.5 * (edges[:-1] + edges[1:]) else: - Pi=targets[:(len(edges)-1)] - Vi=np.empty(len(edges)-1) - for jj in range(1,len(edges)): - ll=(binned==jj) #&(~np.isnan(inV)) - if np.sum(ll)>0: - Pa[jj-1]=np.mean(inP[ll]) - Va[jj-1]=np.mean(inV[ll]) + Pi = targets[: (len(edges) - 1)] + Vi = np.empty(len(edges) - 1) + for jj in range(1, len(edges)): + ll = binned == jj # &(~np.isnan(inV)) + if np.sum(ll) > 0: + Pa[jj - 1] = np.mean(inP[ll]) + Va[jj - 1] = np.mean(inV[ll]) else: - Pa[jj-1]=np.nan - Va[jj-1]=np.nan + Pa[jj - 1] = np.nan + Va[jj - 1] = np.nan # linearly extrapolate some values, but not beyond range of original data - pnew=Pa[0]-(Pa[1]-Pa[0]) - vnew=Va[0]-(Va[1]-Va[0]) - Pa=np.concatenate(([pnew],Pa)) - Va=np.concatenate(([vnew],Va)) - Vi=np.interp(Pi,Pa[~np.isnan(Va)],Va[~np.isnan(Va)],right=np.nan,left=np.nan) - Vi[Pi>np.max(inP)]=np.nan - Vi[Pi np.max(inP)] = np.nan + Vi[Pi < np.min(inP)] = np.nan return Pi, Vi + def cXfromX(X): - X=np.array(X) - X[np.isnan(X)]=-5 - Y=np.nan*X - iii=(X>0)&(X<100) - Y[iii]=-np.log(X[iii]/100.0)/.25 + X = np.array(X) + X[np.isnan(X)] = -5 + Y = np.nan * X + iii = (X > 0) & (X < 100) + Y[iii] = -np.log(X[iii] / 100.0) / 0.25 return Y -def turbReg(m,Cx,fl): - return np.maximum(0.0,m[0]*Cx-m[1]*fl-m[2]) + +def turbReg(m, Cx, fl): + return np.maximum(0.0, m[0] * Cx - m[1] * fl - m[2]) + def turbFit(df0): # calculate conversion factor for sb19 ctd turbidity to ALS bottle turbidity # force through (0,0) - x=df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['sb19Turb_uncorrected'].values - x=x[:,np.newaxis] - y=df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['ALS_Turb_NTU'] - tinv=np.linalg.lstsq(x,y,rcond=None)[0] - tcor=1.0/tinv + x = df0.loc[(df0.ALS_Turb_NTU > 0) & (df0.sb19Turb_uncorrected > 0)][ + "sb19Turb_uncorrected" + ].values + x = x[:, np.newaxis] + y = df0.loc[(df0.ALS_Turb_NTU > 0) & (df0.sb19Turb_uncorrected > 0)]["ALS_Turb_NTU"] + tinv = np.linalg.lstsq(x, y, rcond=None)[0] + tcor = 1.0 / tinv return tcor -def loadDataFRP_init(exp='all'): - if exp not in {'exp1', 'exp2', 'exp3', 'all'}: - print('option exp='+exp+' is not defined.') + +def loadDataFRP_init(exp="all"): + if exp not in {"exp1", "exp2", "exp3", "all"}: + print("option exp=" + exp + " is not defined.") raise - with open('/ocean/shared/SalishSeaCastData/FRPlume/stationsDigitizedFinal.csv','r') as fa: - df0_a=pd.read_csv(fa,header=0,na_values='None') - with open('/ocean/shared/SalishSeaCastData/FRPlume/util/stationsDigitized_ancillary.csv','r') as fb: - df0_b=pd.read_csv(fb,header=0,na_values='None') - df0=pd.merge(df0_a,df0_b,how='left',on=['Station','Date']) + with open( + "/ocean/shared/SalishSeaCastData/FRPlume/stationsDigitizedFinal.csv", "r" + ) as fa: + df0_a = pd.read_csv(fa, header=0, na_values="None") + with open( + "/ocean/shared/SalishSeaCastData/FRPlume/util/stationsDigitized_ancillary.csv", + "r", + ) as fb: + df0_b = pd.read_csv(fb, header=0, na_values="None") + df0 = pd.merge(df0_a, df0_b, how="left", on=["Station", "Date"]) # if values present, calculate correction factor for sb19 turbidity (divide sb19 turbidity by tcor) # fit true turb to observed turb # calculate here while all values present - if np.sum(df0.sb19Turb_uncorrected>0)>0: - tcor=turbFit(df0) + if np.sum(df0.sb19Turb_uncorrected > 0) > 0: + tcor = turbFit(df0) else: - tcor=np.nan - if exp=='exp1': - df0=df0.drop(df0.index[df0.Date != 20170410]) - elif exp=='exp2': - df0=df0.drop(df0.index[df0.Date != 20170531]) - elif exp=='exp3': - df0=df0.drop(df0.index[df0.Date != 20171101]) - - basedir1='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170410/' - basedir2='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170531/' - basedir3='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20171101/' - dir19='19-4561/4_derive' - dir25='25-0363/4_derive' - dir19T10='19-4561/4b_deriveTEOS10' - dir25T10='25-0363/4a_deriveTEOS10' - - clist=[] - if (exp=='exp1' or exp=='all'): - clist = clist + list(range(1,10)) - if (exp=='exp2' or exp=='all'): - clist = clist + [10,11,12,13,14.1,14.2,15,16,17,18] - if (exp=='exp3' or exp=='all'): - clist = clist + list(range(19,25)) - - fpath19=dict() - fpath25=dict() + tcor = np.nan + if exp == "exp1": + df0 = df0.drop(df0.index[df0.Date != 20170410]) + elif exp == "exp2": + df0 = df0.drop(df0.index[df0.Date != 20170531]) + elif exp == "exp3": + df0 = df0.drop(df0.index[df0.Date != 20171101]) + + basedir1 = "/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170410/" + basedir2 = "/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170531/" + basedir3 = "/ocean/shared/SalishSeaCastData/FRPlume/ctd/20171101/" + dir19 = "19-4561/4_derive" + dir25 = "25-0363/4_derive" + dir19T10 = "19-4561/4b_deriveTEOS10" + dir25T10 = "25-0363/4a_deriveTEOS10" + + clist = [] + if exp == "exp1" or exp == "all": + clist = clist + list(range(1, 10)) + if exp == "exp2" or exp == "all": + clist = clist + [10, 11, 12, 13, 14.1, 14.2, 15, 16, 17, 18] + if exp == "exp3" or exp == "all": + clist = clist + list(range(19, 25)) + + fpath19 = dict() + fpath25 = dict() for ii in clist: - if ii<10: - fpath19[ii]=os.path.join(basedir1,dir19T10,cnvlist19[ii]) - fpath25[ii]=os.path.join(basedir1,dir25T10,cnvlist25[ii]) - elif ii<19: - fpath19[ii]=os.path.join(basedir2,dir19T10,cnvlist19[ii]) - fpath25[ii]=os.path.join(basedir2,dir25T10,cnvlist25[ii]) + if ii < 10: + fpath19[ii] = os.path.join(basedir1, dir19T10, cnvlist19[ii]) + fpath25[ii] = os.path.join(basedir1, dir25T10, cnvlist25[ii]) + elif ii < 19: + fpath19[ii] = os.path.join(basedir2, dir19T10, cnvlist19[ii]) + fpath25[ii] = os.path.join(basedir2, dir25T10, cnvlist25[ii]) else: - fpath19[ii]=os.path.join(basedir3,dir19T10,cnvlist19[ii]) - fpath25[ii]=os.path.join(basedir3,dir25T10,cnvlist25[ii]) - - cast19=dict() - cast25=dict() + fpath19[ii] = os.path.join(basedir3, dir19T10, cnvlist19[ii]) + fpath25[ii] = os.path.join(basedir3, dir25T10, cnvlist25[ii]) + + cast19 = dict() + cast25 = dict() for ii in clist: - cast19[ii]=Cast(fpath19[ii]) - cast25[ii]=Cast(fpath25[ii]) + cast19[ii] = Cast(fpath19[ii]) + cast25[ii] = Cast(fpath25[ii]) return df0, clist, tcor, cast19, cast25 -def loadDataFRP(exp='all',sel='narrow',dp=1.0,form='binned',vert='P', - meshPath='/data/eolson/results/MEOPAR/NEMO-forcing-new/grid/mesh_mask201702.nc'): + +def loadDataFRP( + exp="all", + sel="narrow", + dp=1.0, + form="binned", + vert="P", + meshPath="/data/eolson/results/MEOPAR/NEMO-forcing-new/grid/mesh_mask201702.nc", +): # exp determines which sampling date to load (or all) # sel determines whether to use narrow data selection, which is a more conservative estimate, # or 'wide' data selection, which includes more near-surface data but can include @@ -327,79 +370,89 @@ def loadDataFRP(exp='all',sel='narrow',dp=1.0,form='binned',vert='P', # form can be 'binned','raw' or 'SSCgrid' and determines if binned and how # vert determines vertical variable: 'P' or 'Z' # meshPath is SSC mesh file location for binning to model grid (form='SSCgrid') - if exp not in {'exp1', 'exp2', 'exp3', 'all'}: - print('option exp='+exp+' is not defined.') + if exp not in {"exp1", "exp2", "exp3", "all"}: + print("option exp=" + exp + " is not defined.") raise - if sel == 'narrow': - prebin=False - elif sel == 'wide': - prebin=True + if sel == "narrow": + prebin = False + elif sel == "wide": + prebin = True else: - print('option sel='+sel+' is not defined.') + print("option sel=" + sel + " is not defined.") raise df0, clist, tcor, cast19, cast25 = loadDataFRP_init(exp=exp) - parDZ=.78 - xmisDZ=.36 - turbDZ=.67 - pshiftdict={'gsw_ctA0':0.0,'gsw_srA0':0.0,'xmiss':xmisDZ,'seaTurbMtr':turbDZ,'par':parDZ, - 'wetStar':0.0,'sbeox0ML_L':0.0,'seaTurbMtrnoQC':turbDZ,'turb_uncor':turbDZ,'turb':turbDZ} + parDZ = 0.78 + xmisDZ = 0.36 + turbDZ = 0.67 + pshiftdict = { + "gsw_ctA0": 0.0, + "gsw_srA0": 0.0, + "xmiss": xmisDZ, + "seaTurbMtr": turbDZ, + "par": parDZ, + "wetStar": 0.0, + "sbeox0ML_L": 0.0, + "seaTurbMtrnoQC": turbDZ, + "turb_uncor": turbDZ, + "turb": turbDZ, + } # for SSC grid version, load model grid variables - if form=='SSCgrid': - with nc.Dataset(meshPath,'r') as mesh: - tmask=mesh.variables['tmask'][0,:,:,:] - gdept=mesh.variables['gdept_0'][0,:,:,:] - gdepw=mesh.variables['gdepw_0'][0,:,:,:] - nav_lat=mesh.variables['nav_lat'][:,:] - nav_lon=mesh.variables['nav_lon'][:,:] - - zCasts=dict() + if form == "SSCgrid": + with nc.Dataset(meshPath, "r") as mesh: + tmask = mesh.variables["tmask"][0, :, :, :] + gdept = mesh.variables["gdept_0"][0, :, :, :] + gdepw = mesh.variables["gdepw_0"][0, :, :, :] + nav_lat = mesh.variables["nav_lat"][:, :] + nav_lon = mesh.variables["nav_lon"][:, :] + + zCasts = dict() for nn in clist: - ip=np.argmax(cast25[nn].df['prSM'].values) - ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0] - pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0] - pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0] - pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0] - pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0] - lat=df0.loc[df0.Station==nn]['LatDecDeg'].values[0] - lon=df0.loc[df0.Station==nn]['LonDecDeg'].values[0] - if sel=='narrow': - pS=pS_tur - pE=pE_tur - elif sel=='wide': - pS=pS_pr - pE=pE_pr - - cast19[nn].df['seaTurbMtrnoQC']=cast19[nn].df['seaTurbMtr'] - cast19[nn].df['turb_uncor']=turbQC(cast19[nn].df['seaTurbMtr']) - cast19[nn].df['turb']=cast19[nn].df['turb_uncor']*1.0/tcor - - if vert=='Z': + ip = np.argmax(cast25[nn].df["prSM"].values) + ilag = df0.loc[df0.Station == nn, "ishift_sub19"].values[0] + pS_pr = df0.loc[df0.Station == nn, "pS_pr"].values[0] + pE_pr = df0.loc[df0.Station == nn, "pE_pr"].values[0] + pS_tur = df0.loc[df0.Station == nn, "pStart25"].values[0] + pE_tur = df0.loc[df0.Station == nn, "pEnd25"].values[0] + lat = df0.loc[df0.Station == nn]["LatDecDeg"].values[0] + lon = df0.loc[df0.Station == nn]["LonDecDeg"].values[0] + if sel == "narrow": + pS = pS_tur + pE = pE_tur + elif sel == "wide": + pS = pS_pr + pE = pE_pr + + cast19[nn].df["seaTurbMtrnoQC"] = cast19[nn].df["seaTurbMtr"] + cast19[nn].df["turb_uncor"] = turbQC(cast19[nn].df["seaTurbMtr"]) + cast19[nn].df["turb"] = cast19[nn].df["turb_uncor"] * 1.0 / tcor + + if vert == "Z": # calc Z from p - cast25[nn].df['Z']=-1*gsw.z_from_p(cast25[nn].df['prSM'].values,lat) - cast19[nn].df['Z']=-1*gsw.z_from_p(cast19[nn].df['prdM'].values,lat) - zvar25='Z' + cast25[nn].df["Z"] = -1 * gsw.z_from_p(cast25[nn].df["prSM"].values, lat) + cast19[nn].df["Z"] = -1 * gsw.z_from_p(cast19[nn].df["prdM"].values, lat) + zvar25 = "Z" else: - zvar25='prSM' - pmax=cast25[nn].df.loc[ip,zvar25] - if form=='binned' or form=='raw': - edges=np.arange(dp/2,pmax+dp,dp) - targets=[] # use default value of 1/2-way between edges - elif form=='SSCgrid': - jj, ii=geo_tools.find_closest_model_point(lon,lat, nav_lon, nav_lat) - edges=gdepw[:,jj,ii] # model w grid - targets=gdept[:,jj,ii] # model T grid - edges=edges[edges 30) max_day_index = len(days) grid_heights = grid_t.deptht_bounds.values[:, 1] - grid_t.deptht_bounds.values[:, 0] depths = grid_t.deptht.values min_depth_index = np.argmax(depths > min_depth) - if(max_depth and max_depth < max(depths)): + if max_depth and max_depth < max(depths): max_depth_index = np.argmax(depths > max_depth) else: max_depth_index = len(depths) - tracer_quantity_array = ((grid_t[tracer_name].values)*(grid_heights.reshape((1, 40, 1, 1)))) - total_tracer_at_depth = tracer_quantity_array[min_day_index:max_day_index, min_depth_index:max_depth_index, :, :].sum() - mean_tracer = total_tracer_at_depth/sum(grid_heights[min_depth_index:max_depth_index])/(max_day_index - min_day_index) - mean_tracer = mean_tracer/15 # number of non-zero grid elements in 5x5 model - return(mean_tracer) + tracer_quantity_array = (grid_t[tracer_name].values) * ( + grid_heights.reshape((1, 40, 1, 1)) + ) + total_tracer_at_depth = tracer_quantity_array[ + min_day_index:max_day_index, min_depth_index:max_depth_index, :, : + ].sum() + mean_tracer = ( + total_tracer_at_depth + / sum(grid_heights[min_depth_index:max_depth_index]) + / (max_day_index - min_day_index) + ) + mean_tracer = mean_tracer / 15 # number of non-zero grid elements in 5x5 model + return mean_tracer def mean_NH4_at_depth(grid_t): - return(mean_tracer_at_depth(grid_t, "NH4")) + return mean_tracer_at_depth(grid_t, "NH4") def mean_NO3_at_depth(grid_t): - return(mean_tracer_at_depth(grid_t, "NO3")) + return mean_tracer_at_depth(grid_t, "NO3") + def mean_NO3_at_20m(grid_t): - return(mean_tracer_at_depth(grid_t, "NO3", 15, 25)) + return mean_tracer_at_depth(grid_t, "NO3", 15, 25) + def mean_DON_at_depth(grid_t): - return(mean_tracer_at_depth(grid_t, "DOC")) + return mean_tracer_at_depth(grid_t, "DOC") def mean_PON_at_depth(grid_t): - return(mean_tracer_at_depth(grid_t, "POC")) + return mean_tracer_at_depth(grid_t, "POC") def time_of_peak_PHY2(grid_t): t = np.array([float(x) for x in grid_t.time_centered.values]) - days = (t[:] - t[0])/10**9/3600/24 + days = (t[:] - t[0]) / 10**9 / 3600 / 24 grid_heights = grid_t.deptht_bounds.values[:, 1] - grid_t.deptht_bounds.values[:, 0] - phy2_quantity_array = ((grid_t["PHY2"].values)*(grid_heights.reshape((1, 40, 1, 1)))) + phy2_quantity_array = (grid_t["PHY2"].values) * ( + grid_heights.reshape((1, 40, 1, 1)) + ) total_phy2 = phy2_quantity_array.sum((1, 2, 3)) bloom_time = days[int(np.argmax(total_phy2))] - return(bloom_time) + return bloom_time def time_surface_NO3_drops_below_4(grid_t): t = np.array([float(x) for x in grid_t.time_centered.values]) - days = (t[:] - t[0])/10**9/3600/24 + days = (t[:] - t[0]) / 10**9 / 3600 / 24 grid_heights = grid_t.deptht_bounds.values[:, 1] - grid_t.deptht_bounds.values[:, 0] - mean_surface_NO3 = np.sum(((grid_t.variables["NO3"][:, :, 1, 1]*grid_heights.reshape((1, 40)))[:, :10]/sum(grid_heights[:10])), axis = 1) + mean_surface_NO3 = np.sum( + ( + (grid_t.variables["NO3"][:, :, 1, 1] * grid_heights.reshape((1, 40)))[ + :, :10 + ] + / sum(grid_heights[:10]) + ), + axis=1, + ) bloom_time = days[int(np.argmax(mean_surface_NO3 < 4))] - return(bloom_time) + return bloom_time def peak_3_day_biomass(grid_t): t = np.array([float(x) for x in grid_t.time_centered.values]) - days = (t[:] - t[0])/10**9/3600/24 + days = (t[:] - t[0]) / 10**9 / 3600 / 24 grid_heights = grid_t.deptht_bounds.values[:, 1] - grid_t.deptht_bounds.values[:, 0] reshaped_grid_heights = grid_heights.reshape((1, 40, 1, 1)) N = np.argmax(days > 3) # time steps for 3 days - - - + primary_producers = ["PHY", "PHY2", "MYRI"] - depth_integrated = np.zeros(grid_t.dims['time_counter']) + depth_integrated = np.zeros(grid_t.dims["time_counter"]) for tracer in primary_producers: - depth_integrated = depth_integrated + np.sum((grid_t[tracer].values)*reshaped_grid_heights, axis = (1, 2, 3)) + depth_integrated = depth_integrated + np.sum( + (grid_t[tracer].values) * reshaped_grid_heights, axis=(1, 2, 3) + ) - time_averaged = np.convolve(depth_integrated, np.ones((N,))/N, mode='valid') - return(time_averaged.max()) + time_averaged = np.convolve(depth_integrated, np.ones((N,)) / N, mode="valid") + return time_averaged.max() diff --git a/SalishSeaTools/salishsea_tools/namelist.py b/SalishSeaTools/salishsea_tools/namelist.py index 47b3f254..63f5160e 100644 --- a/SalishSeaTools/salishsea_tools/namelist.py +++ b/SalishSeaTools/salishsea_tools/namelist.py @@ -26,6 +26,7 @@ GNU Lesser General Public License, Version 3 (https://www.gnu.org/copyleft/lesser.html) """ + QUOTE_CHARS = ["'", '"'] @@ -33,6 +34,7 @@ class Token(object): """ Base class for all token types. """ + def __str__(self): name = self.__class__.__name__ if hasattr(self, "value"): @@ -97,9 +99,7 @@ def auto_token(value): """ value = value.strip() if value.startswith("&"): - return ( - GroupEndToken() if value[1:] == 'end' - else GroupStartToken(value[1:])) + return GroupEndToken() if value[1:] == "end" else GroupStartToken(value[1:]) elif value.lower() == ".true.": return BooleanToken(True) elif value.lower() == ".false.": @@ -220,7 +220,7 @@ def group_generator(tokens): current_assignment.append(token) -def parse_assignment(assignment, group): +def parse_assignment(assignment, group): """ Parses all tokens for one assignment. Will write the result to the passed group dictionary. @@ -234,11 +234,12 @@ def parse_assignment(assignment, group): if isinstance(assignment[1], AssignmentToken): values = assignment[2:] array_assignment = False - elif all(( - isinstance(assignment[1], ArrayIndexToken), - isinstance(assignment[2], AssignmentToken), - - )): + elif all( + ( + isinstance(assignment[1], ArrayIndexToken), + isinstance(assignment[2], AssignmentToken), + ) + ): array_index = assignment[1].value - 1 values = assignment[3:] array_assignment = True @@ -259,7 +260,7 @@ def parse_assignment(assignment, group): raise IndexError(msg) group[assignment[0].value] = [values] except IndexError: - msg = 'Array elements must be asigned in order' + msg = "Array elements must be asigned in order" raise IndexError(msg) diff --git a/SalishSeaTools/salishsea_tools/nc_tools.py b/SalishSeaTools/salishsea_tools/nc_tools.py index caadc4d1..305cabac 100644 --- a/SalishSeaTools/salishsea_tools/nc_tools.py +++ b/SalishSeaTools/salishsea_tools/nc_tools.py @@ -40,7 +40,7 @@ from salishsea_tools import hg_commands as hg -def get_hindcast_prefix(date, res='h', version='201905'): +def get_hindcast_prefix(date, res="h", version="201905"): """Construct hindcast results prefix given the date, resolution and version e.g., /results/SalishSea/nowcast-green.201905/ddmmmyy/SalishSea_1h_YYYYMMDD_YYYYMMDD @@ -58,14 +58,19 @@ def get_hindcast_prefix(date, res='h', version='201905'): """ # Make NEMO hindcast path - path, datestr = f'SalishSea/nowcast-green.{version}', date.strftime('%d%b%y').lower() - for root in ['/results', '/results2']: + path, datestr = ( + f"SalishSea/nowcast-green.{version}", + date.strftime("%d%b%y").lower(), + ) + for root in ["/results", "/results2"]: testpath = os.path.join(root, path, datestr) if os.path.exists(testpath): path = testpath break else: - raise ValueError(f"No hindcast {version} record found for the specified date {date.strftime('%Y-%b-%d')}") + raise ValueError( + f"No hindcast {version} record found for the specified date {date.strftime('%Y-%b-%d')}" + ) prefix = os.path.join(path, f"SalishSea_1{res}_{date.strftime('%Y%m%d_%Y%m%d')}") return prefix @@ -83,14 +88,16 @@ def get_GEM_path(date): """ # Make GEM path - path, datestr = '/results/forcing/atmospheric/GEM2.5', date.strftime('y%Ym%md%d') - for config, prefix in zip(['operational', 'gemlam'], ['ops', 'gemlam']): - testpath = os.path.join(path, config, f'{prefix}_{datestr}.nc') + path, datestr = "/results/forcing/atmospheric/GEM2.5", date.strftime("y%Ym%md%d") + for config, prefix in zip(["operational", "gemlam"], ["ops", "gemlam"]): + testpath = os.path.join(path, config, f"{prefix}_{datestr}.nc") if os.path.exists(testpath): path = testpath break else: - raise ValueError(f"No GEM2.5 record found for the specified date {date.strftime('%Y-%b-%d')}") + raise ValueError( + f"No GEM2.5 record found for the specified date {date.strftime('%Y-%b-%d')}" + ) return path @@ -107,11 +114,13 @@ def get_WW3_path(date): """ # Make WW3 path - path = '/opp/wwatch3/nowcast' - datestr = [date.strftime(fmt) for fmt in ('%d%b%y', '%Y%m%d_%Y%m%d')] - path = os.path.join(path, datestr[0].lower(), f'SoG_ww3_fields_{datestr[1]}.nc') + path = "/opp/wwatch3/nowcast" + datestr = [date.strftime(fmt) for fmt in ("%d%b%y", "%Y%m%d_%Y%m%d")] + path = os.path.join(path, datestr[0].lower(), f"SoG_ww3_fields_{datestr[1]}.nc") if not os.path.exists(path): - raise ValueError(f"No WW3 record found for the specified date {date.strftime('%Y-%b-%d')}") + raise ValueError( + f"No WW3 record found for the specified date {date.strftime('%Y-%b-%d')}" + ) return path @@ -145,8 +154,8 @@ def dataset_from_path(path, *args, **kwargs): try: return nc.Dataset(str(path), *args, **kwargs) except RuntimeError as e: - if str(e) == 'No such file or directory': - raise IOError('No such file or directory') + if str(e) == "No such file or directory": + raise IOError("No such file or directory") else: raise @@ -157,9 +166,9 @@ def show_dataset_attrs(dataset): :arg dataset: netcdf dataset object :type dataset: :py:class:`netCDF4.Dataset` """ - print('file format: {}'.format(dataset.file_format)) + print("file format: {}".format(dataset.file_format)) for attr in dataset.ncattrs(): - print('{}: {}'.format(attr, dataset.getncattr(attr))) + print("{}: {}".format(attr, dataset.getncattr(attr))) def show_dimensions(dataset): @@ -201,7 +210,7 @@ def show_variable_attrs(dataset, *vars): print(var) -def time_origin(dataset, time_var='time_counter'): +def time_origin(dataset, time_var="time_counter"): """Return the time_var.time_origin value. :arg dataset: netcdf dataset object @@ -218,30 +227,34 @@ def time_origin(dataset, time_var='time_counter'): time_counter = dataset.variables[time_var] except KeyError: raise KeyError( - 'dataset does not have {time_var} variable'.format( - time_var=time_var)) + "dataset does not have {time_var} variable".format(time_var=time_var) + ) try: # netCDF4 dataset time_orig = time_counter.time_origin.title() except AttributeError: try: # xarray dataset - time_orig = time_counter.attrs['time_origin'].title() + time_orig = time_counter.attrs["time_origin"].title() except KeyError: raise AttributeError( - 'NetCDF: ' - '{time_var} variable does not have ' - 'time_origin attribute'.format(time_var=time_var)) + "NetCDF: " + "{time_var} variable does not have " + "time_origin attribute".format(time_var=time_var) + ) value = arrow.get( time_orig, - ['YYYY-MMM-DD HH:mm:ss', - 'DD-MMM-YYYY HH:mm:ss', - 'DD-MMM-YYYY HH:mm', - 'YYYY-MM-DD HH:mm:ss']) + [ + "YYYY-MMM-DD HH:mm:ss", + "DD-MMM-YYYY HH:mm:ss", + "DD-MMM-YYYY HH:mm", + "YYYY-MM-DD HH:mm:ss", + ], + ) return value -def timestamp(dataset, tindex, time_var='time_counter'): +def timestamp(dataset, tindex, time_var="time_counter"): """Return the time stamp of the tindex time_counter value(s) in dataset. The time stamp is calculated by adding the time_counter[tindex] value @@ -270,15 +283,14 @@ def timestamp(dataset, tindex, time_var='time_counter'): try: results.append(time_orig + timedelta(seconds=time_counter[i].item())) except IndexError: - raise IndexError( - 'time_counter variable has no tindex={}'.format(tindex)) + raise IndexError("time_counter variable has no tindex={}".format(tindex)) if len(results) > 1: return results else: return results[0] -def get_datetimes(dataset, time_var='time_counter'): +def get_datetimes(dataset, time_var="time_counter"): """Return the datetime array for a dataset This is a wrapper around nc_tools.timestamp that automatically @@ -296,9 +308,11 @@ def get_datetimes(dataset, time_var='time_counter'): """ # Get arrow objects - time_stamps = timestamp(dataset, - np.arange(dataset.variables['time_counter'].shape[0]), - time_var=time_var) + time_stamps = timestamp( + dataset, + np.arange(dataset.variables["time_counter"].shape[0]), + time_var=time_var, + ) # Get datetime.datetime objects datetimes = np.array([time_stamp.datetime for time_stamp in time_stamps]) @@ -317,13 +331,13 @@ def xarraytime_to_datetime(xarraytime): :rtype: :py:class:`numpy.ndarray` of :py:class:`datetime.datetime` """ - datetime_obj = xarraytime.values.astype('datetime64[s]').astype(datetime) + datetime_obj = xarraytime.values.astype("datetime64[s]").astype(datetime) return datetime_obj def ssh_timeseries_at_point( - grid_T, j, i, datetimes=False, time_var='time_counter', ssh_var='sossheig' + grid_T, j, i, datetimes=False, time_var="time_counter", ssh_var="sossheig" ): """Return the sea surface height and time counter values at a single grid point from a NEMO tracer results dataset. @@ -358,7 +372,7 @@ def ssh_timeseries_at_point( time = timestamp(grid_T, range(len(ssh)), time_var=time_var) if datetimes: time = np.array([a.datetime for a in time]) - ssh_ts = namedtuple('ssh_ts', 'ssh, time') + ssh_ts = namedtuple("ssh_ts", "ssh, time") return ssh_ts(ssh, np.array(time)) @@ -391,12 +405,12 @@ def uv_wind_timeseries_at_point(grid_weather, j, i, datetimes=False): values. :rtype: :py:class:`collections.namedtuple` """ - u_wind = grid_weather.variables['u_wind'][:, j, i] - v_wind = grid_weather.variables['v_wind'][:, j, i] + u_wind = grid_weather.variables["u_wind"][:, j, i] + v_wind = grid_weather.variables["v_wind"][:, j, i] time = timestamp(grid_weather, range(len(u_wind))) if datetimes: time = np.array([a.datetime for a in time]) - wind_ts = namedtuple('wind_ts', 'u, v, time') + wind_ts = namedtuple("wind_ts", "u, v, time") return wind_ts(u_wind, v_wind, np.array(time)) @@ -405,7 +419,7 @@ def init_dataset_attrs( title, notebook_name, nc_filepath, - comment='', + comment="", quiet=False, ): """Initialize the required global attributes of the netCDF dataset. @@ -446,22 +460,33 @@ def init_dataset_attrs( :type quiet: Boolean """ reqd_attrs = ( - ('Conventions', 'CF-1.6'), - ('title', title), - ('institution', ('Dept of Earth, Ocean & Atmospheric Sciences, ' - 'University of British Columbia')), - ('source', _notebook_hg_url(notebook_name)), - ('references', _nc_file_hg_url(nc_filepath)), - ('history', ( - '[{:%Y-%m-%d %H:%M:%S}] Created netCDF4 zlib=True dataset.' - .format(datetime.now()))), - ('comment', comment), + ("Conventions", "CF-1.6"), + ("title", title), + ( + "institution", + ( + "Dept of Earth, Ocean & Atmospheric Sciences, " + "University of British Columbia" + ), + ), + ("source", _notebook_hg_url(notebook_name)), + ("references", _nc_file_hg_url(nc_filepath)), + ( + "history", + ( + "[{:%Y-%m-%d %H:%M:%S}] Created netCDF4 zlib=True dataset.".format( + datetime.now() + ) + ), + ), + ("comment", comment), ) for name, value in reqd_attrs: if name in dataset.ncattrs(): if not quiet: - print('Existing attribute value found, not overwriting: {}' - .format(name)) + print( + "Existing attribute value found, not overwriting: {}".format(name) + ) else: dataset.setncattr(name, value) if not quiet: @@ -487,19 +512,25 @@ def _notebook_hg_url(notebook_name): :rtype: str """ if not notebook_name: - return 'REQUIRED' + return "REQUIRED" default_url = hg.default_url() try: - bitbucket, repo_path = default_url.partition('bitbucket.org')[1:] + bitbucket, repo_path = default_url.partition("bitbucket.org")[1:] except AttributeError: - return 'REQUIRED' + return "REQUIRED" repo = os.path.split(repo_path)[-1] local_path = os.getcwd().partition(repo)[-1] - if not notebook_name.endswith('.ipynb'): - notebook_name += '.ipynb' + if not notebook_name.endswith(".ipynb"): + notebook_name += ".ipynb" url = os.path.join( - 'https://', bitbucket, repo_path[1:], 'src', 'tip', - local_path[1:], notebook_name) + "https://", + bitbucket, + repo_path[1:], + "src", + "tip", + local_path[1:], + notebook_name, + ) return url @@ -517,19 +548,18 @@ def _nc_file_hg_url(nc_filepath): :returns: The Bitbucket URL for the nc_filepath netCDF file :rtype: str """ - rel_path = ''.join(nc_filepath.rpartition('../')[:2]) + rel_path = "".join(nc_filepath.rpartition("../")[:2]) try: repo_path = nc_filepath.split(rel_path)[1] except ValueError: - return 'REQUIRED' - repo, filepath = repo_path.split('/', 1) + return "REQUIRED" + repo, filepath = repo_path.split("/", 1) default_url = hg.default_url(os.path.join(rel_path, repo)) try: - bitbucket, repo_path = default_url.partition('bitbucket.org')[1:] + bitbucket, repo_path = default_url.partition("bitbucket.org")[1:] except AttributeError: - return 'REQUIRED' - url = os.path.join( - 'https://', bitbucket, repo_path[1:], 'src', 'tip', filepath) + return "REQUIRED" + url = os.path.join("https://", bitbucket, repo_path[1:], "src", "tip", filepath) return url @@ -541,31 +571,43 @@ def check_dataset_attrs(dataset): :type dataset: :py:class:`netCDF4.Dataset` """ reqd_dataset_attrs = ( - 'Conventions', 'title', 'institution', 'source', 'references', - 'history', 'comment') - reqd_variable_attrs = ('units', 'long_name') + "Conventions", + "title", + "institution", + "source", + "references", + "history", + "comment", + ) + reqd_variable_attrs = ("units", "long_name") for attr in reqd_dataset_attrs: if attr not in dataset.ncattrs(): - print('Missing required dataset attribute: {}'.format(attr)) + print("Missing required dataset attribute: {}".format(attr)) continue - if attr != 'comment': + if attr != "comment": value = dataset.getncattr(attr) - if value in ('', 'REQUIRED'): - print('Missing value for dataset attribute: {}'.format(attr)) + if value in ("", "REQUIRED"): + print("Missing value for dataset attribute: {}".format(attr)) for var_name, var in dataset.variables.items(): for attr in reqd_variable_attrs: if attr not in var.ncattrs(): - print('Missing required variable attribute for {}: {}' - .format(var_name, attr)) + print( + "Missing required variable attribute for {}: {}".format( + var_name, attr + ) + ) continue value = var.getncattr(attr) if not value: - print('Missing value for variable attribute for {}: {}' - .format(var_name, attr)) + print( + "Missing value for variable attribute for {}: {}".format( + var_name, attr + ) + ) def generate_pressure_file(filename, p_file, t_file, alt_file, day): - """ Generates a file with CGRF pressure corrected to sea level. + """Generates a file with CGRF pressure corrected to sea level. :arg filename: full path name where the corrected pressure should be saved :type filename: string @@ -584,14 +626,14 @@ def generate_pressure_file(filename, p_file, t_file, alt_file, day): """ # load data f = nc.Dataset(p_file) - press = f.variables['atmpres'] + press = f.variables["atmpres"] f = nc.Dataset(t_file) - temp = f.variables['tair'] - time = f.variables['time_counter'] - lon = f.variables['nav_lon'] - lat = f.variables['nav_lat'] + temp = f.variables["tair"] + time = f.variables["time_counter"] + lon = f.variables["nav_lon"] + lat = f.variables["nav_lat"] f = nc.Dataset(alt_file) - alt = f.variables['alt'] + alt = f.variables["alt"] # correct pressure press_corr = np.zeros(press.shape) @@ -599,28 +641,30 @@ def generate_pressure_file(filename, p_file, t_file, alt_file, day): press_corr[k, :, :] = _slp(alt, press[k, :, :], temp[k, :, :]) # Create netcdf - slp_file = nc.Dataset(filename, 'w', zlib=True) - description = 'corrected sea level pressure' + slp_file = nc.Dataset(filename, "w", zlib=True) + description = "corrected sea level pressure" # dataset attributes init_dataset_attrs( slp_file, title=( - 'CGRF {} forcing dataset for {}' - .format(description, day.format('YYYY-MM-DD'))), - notebook_name='', - nc_filepath='', + "CGRF {} forcing dataset for {}".format( + description, day.format("YYYY-MM-DD") + ) + ), + notebook_name="", + nc_filepath="", comment=( - 'Processed and adjusted from ' - 'goapp.ocean.dal.ca::canadian_GDPS_reforecasts_v1 files.'), + "Processed and adjusted from " + "goapp.ocean.dal.ca::canadian_GDPS_reforecasts_v1 files." + ), quiet=True, ) # dimensions - slp_file.createDimension('time_counter', 0) - slp_file.createDimension('y', press_corr.shape[1]) - slp_file.createDimension('x', press_corr.shape[2]) + slp_file.createDimension("time_counter", 0) + slp_file.createDimension("y", press_corr.shape[1]) + slp_file.createDimension("x", press_corr.shape[2]) # time - time_counter = slp_file.createVariable( - 'time_counter', 'double', ('time_counter',)) + time_counter = slp_file.createVariable("time_counter", "double", ("time_counter",)) time_counter.calendar = time.calendar time_counter.long_name = time.long_name time_counter.title = time.title @@ -628,14 +672,14 @@ def generate_pressure_file(filename, p_file, t_file, alt_file, day): time_counter[:] = time[:] time_counter.valid_range = time.valid_range # lat/lon variables - nav_lat = slp_file.createVariable('nav_lat', 'float32', ('y', 'x')) + nav_lat = slp_file.createVariable("nav_lat", "float32", ("y", "x")) nav_lat.long_name = lat.long_name nav_lat.units = lat.units nav_lat.valid_max = lat.valid_max nav_lat.valid_min = lat.valid_min nav_lat.nav_model = lat.nav_model nav_lat[:] = lat - nav_lon = slp_file.createVariable('nav_lon', 'float32', ('y', 'x')) + nav_lon = slp_file.createVariable("nav_lon", "float32", ("y", "x")) nav_lon.long_name = lon.long_name nav_lon.units = lon.units nav_lon.valid_max = lon.valid_max @@ -643,9 +687,8 @@ def generate_pressure_file(filename, p_file, t_file, alt_file, day): nav_lon.nav_model = lon.nav_model nav_lon[:] = lon # Pressure - atmpres = slp_file.createVariable( - 'atmpres', 'float32', ('time_counter', 'y', 'x')) - atmpres.long_name = 'Sea Level Pressure' + atmpres = slp_file.createVariable("atmpres", "float32", ("time_counter", "y", "x")) + atmpres.long_name = "Sea Level Pressure" atmpres.units = press.units atmpres.valid_min = press.valid_min atmpres.valid_max = press.valid_max @@ -657,7 +700,7 @@ def generate_pressure_file(filename, p_file, t_file, alt_file, day): def generate_pressure_file_ops(filename, p_file, t_file, alt_file, day): - """ Generates a file with CGRF pressure corrected to sea level. + """Generates a file with CGRF pressure corrected to sea level. :arg filename: full path name where the corrected pressure should be saved :type filename: string @@ -676,16 +719,16 @@ def generate_pressure_file_ops(filename, p_file, t_file, alt_file, day): """ # load data f = nc.Dataset(p_file) - press = f.variables['atmpres'] + press = f.variables["atmpres"] f = nc.Dataset(t_file) - temp = f.variables['tair'] - time = f.variables['time_counter'] - lon = f.variables['nav_lon'] - lat = f.variables['nav_lat'] + temp = f.variables["tair"] + time = f.variables["time_counter"] + lon = f.variables["nav_lon"] + lat = f.variables["nav_lat"] f = nc.Dataset(alt_file) - alt = f.variables['HGT_surface'] - lat_a = f.variables['latitude'] - lon_a = f.variables['longitude'] + alt = f.variables["HGT_surface"] + lat_a = f.variables["latitude"] + lon_a = f.variables["longitude"] alt, lon_a, lat_a = _truncate_height(alt, lon_a, lat_a, lon, lat) @@ -695,44 +738,42 @@ def generate_pressure_file_ops(filename, p_file, t_file, alt_file, day): press_corr[k, :, :] = _slp(alt, press[k, :, :], temp[k, :, :]) # Create netcdf - slp_file = nc.Dataset(filename, 'w', zlib=True) - description = 'corrected sea level pressure' + slp_file = nc.Dataset(filename, "w", zlib=True) + description = "corrected sea level pressure" # dataset attributes init_dataset_attrs( slp_file, title=( - 'GRIB2 {} forcing dataset for {}' - .format(description, day.format('YYYY-MM-DD'))), - notebook_name='', - nc_filepath='', - comment=( - 'Processed and adjusted from ' - 'GEM 2.5km operational model'), + "GRIB2 {} forcing dataset for {}".format( + description, day.format("YYYY-MM-DD") + ) + ), + notebook_name="", + nc_filepath="", + comment=("Processed and adjusted from " "GEM 2.5km operational model"), quiet=True, ) # dimensions - slp_file.createDimension('time_counter', 0) - slp_file.createDimension('y', press_corr.shape[1]) - slp_file.createDimension('x', press_corr.shape[2]) + slp_file.createDimension("time_counter", 0) + slp_file.createDimension("y", press_corr.shape[1]) + slp_file.createDimension("x", press_corr.shape[2]) # time - time_counter = slp_file.createVariable( - 'time_counter', 'double', ('time_counter',)) + time_counter = slp_file.createVariable("time_counter", "double", ("time_counter",)) time_counter.long_name = time.long_name time_counter.units = time.units time_counter[:] = time[:] # lat/lon variables - nav_lat = slp_file.createVariable('nav_lat', 'float32', ('y', 'x')) + nav_lat = slp_file.createVariable("nav_lat", "float32", ("y", "x")) nav_lat.long_name = lat.long_name nav_lat.units = lat.units nav_lat[:] = lat - nav_lon = slp_file.createVariable('nav_lon', 'float32', ('y', 'x')) + nav_lon = slp_file.createVariable("nav_lon", "float32", ("y", "x")) nav_lon.long_name = lon.long_name nav_lon.units = lon.units nav_lon[:] = lon # Pressure - atmpres = slp_file.createVariable( - 'atmpres', 'float32', ('time_counter', 'y', 'x')) - atmpres.long_name = 'Sea Level Pressure' + atmpres = slp_file.createVariable("atmpres", "float32", ("time_counter", "y", "x")) + atmpres.long_name = "Sea Level Pressure" atmpres.units = press.units atmpres[:] = press_corr[:] @@ -745,32 +786,40 @@ def _slp(Z, P, T): gam = 0.0098 # lapse rate(deg/m) p0 = 101000 # average sea surface heigh in Pa - ps = P * (gam * (Z / T) + 1)**(g / gam / R) + ps = P * (gam * (Z / T) + 1) ** (g / gam / R) return ps def _truncate_height(alt1, lon1, lat1, lon2, lat2): - """ Truncates the height file over our smaller domain. + """Truncates the height file over our smaller domain. alt1, lon1, lat1, are the height, longitude and latitude of the larger domain. lon2, lat2 are the longitude and latitude of the smaller domain. - returns h,lons,lats, the height, longiutde and latitude over the smaller domain. """ + returns h,lons,lats, the height, longiutde and latitude over the smaller domain.""" # bottom left (i,j) - i = np.where(np.logical_and(np.abs(lon1 - lon2[0, 0]) < 10**(-5), - np.abs(lat1 - lat2[0, 0]) < 10**(-5))) + i = np.where( + np.logical_and( + np.abs(lon1 - lon2[0, 0]) < 10 ** (-5), + np.abs(lat1 - lat2[0, 0]) < 10 ** (-5), + ) + ) i_st = i[1] j_st = i[0] # top right - i = np.where(np.logical_and(np.abs(lon1 - lon2[-1, -1]) < 10**(-5), - np.abs(lat1 - lat2[-1, -1]) < 10**(-5))) + i = np.where( + np.logical_and( + np.abs(lon1 - lon2[-1, -1]) < 10 ** (-5), + np.abs(lat1 - lat2[-1, -1]) < 10 ** (-5), + ) + ) i_ed = i[1] j_ed = i[0] - h_small = alt1[0, j_st:j_ed + 1, i_st:i_ed + 1] - lat_small = lat1[j_st:j_ed + 1, i_st:i_ed + 1] - lon_small = lon1[j_st:j_ed + 1, i_st:i_ed + 1] + h_small = alt1[0, j_st : j_ed + 1, i_st : i_ed + 1] + lat_small = lat1[j_st : j_ed + 1, i_st : i_ed + 1] + lon_small = lon1[j_st : j_ed + 1, i_st : i_ed + 1] return h_small, lon_small, lat_small @@ -797,7 +846,7 @@ def combine_subdomain(filenames, outfilename): shapes = _define_shapes(filenames) # Initialize - new = nc.Dataset(outfilename, 'w') + new = nc.Dataset(outfilename, "w") _initialize_dimensions(new, nc.Dataset(filenames[0, 0])) newvars = _initialize_variables(new, nc.Dataset(filenames[0, 0])) @@ -836,14 +885,14 @@ def _define_shapes(filenames): for i in np.arange(filenames.shape[1]): name = filenames[j, i] f = nc.Dataset(name) - x = f.dimensions['x'].__len__() - y = f.dimensions['y'].__len__() + x = f.dimensions["x"].__len__() + y = f.dimensions["y"].__len__() shapes[name] = {} - shapes[name]['iss'] = iss - shapes[name]['iee'] = iss+x - shapes[name]['jss'] = jss - shapes[name]['jee'] = jss+y - iss = iss+x + shapes[name]["iss"] = iss + shapes[name]["iee"] = iss + x + shapes[name]["jss"] = jss + shapes[name]["jee"] = jss + y + iss = iss + x jss = jss + y return shapes @@ -862,7 +911,7 @@ def _initialize_dimensions(newfile, oldfile): """ for dimname in oldfile.dimensions: dim = oldfile.dimensions[dimname] - if dimname == 'x' or dimname == 'y': + if dimname == "x" or dimname == "y": newdim = newfile.createDimension(dimname) else: newdim = newfile.createDimension(dimname, size=dim.__len__()) @@ -911,11 +960,11 @@ def _concatentate_variables(filenames, shapes, variables): newvar = variables[varname] f = nc.Dataset(name) oldvar = f.variables[varname] - x1 = shapes[name]['iss'] - x2 = shapes[name]['iee'] - y1 = shapes[name]['jss'] - y2 = shapes[name]['jee'] - if 'x' in newvar.dimensions: + x1 = shapes[name]["iss"] + x2 = shapes[name]["iee"] + y1 = shapes[name]["jss"] + y2 = shapes[name]["jee"] + if "x" in newvar.dimensions: newvar[..., y1:y2, x1:x2] = oldvar[..., :, :] @@ -988,9 +1037,9 @@ def __init__(self, files): # Open the first dataset and set a few class variables d0 = self._dsmgr[0] -# self.description = d0.description + # self.description = d0.description self.file_format = d0.file_format - self.filepath = files + self.filepath = files # Find the time dimension name for dim in d0.dimensions: @@ -1012,7 +1061,9 @@ def __init__(self, files): for vname in vars0: if vars0[vname].dimensions[0] == timedimname: # We concatenate this variable - self.variables[vname] = self.scVariable(vars0[vname], vname, self._dsmgr, fi, li) + self.variables[vname] = self.scVariable( + vars0[vname], vname, self._dsmgr, fi, li + ) else: # Passthrough this variable to the first file self.variables[vname] = vars0[vname] @@ -1033,10 +1084,11 @@ class scDatasetManager(object): """ Manages datasets by opening/closing them on demand """ + def __init__(self, files): - self._files = files + self._files = files self._MAXOPEN = getrlimit(RLIMIT_NOFILE)[0] // 5 - self._dslist = [(-1, None)] * self._MAXOPEN + self._dslist = [(-1, None)] * self._MAXOPEN def __getitem__(self, di): """ @@ -1053,7 +1105,7 @@ def __getitem__(self, di): # Repurpose slot si for the requested dataset ds.close() # Now open the requested dataset and store it in slot si - ds = nc.Dataset(self._files[di], 'r') + ds = nc.Dataset(self._files[di], "r") self._dslist[si] = (di, ds) return ds @@ -1069,17 +1121,18 @@ class scVariable(object): - We aim to have correct indexing, and set a few class variables such as shape and dimensions correctly. Attribute handling, etc is not implemented. """ + def __init__(self, v0, vname, datasets, fi, li): self.ds = datasets self._fi = fi self._li = li # Set a few class variables - self.name = vname + self.name = vname self.dimensions = v0.dimensions - self.dtype = v0.dtype - self.ndim = v0.ndim - self.shape = (len(self._fi), ) + v0.shape[1:] + self.dtype = v0.dtype + self.ndim = v0.ndim + self.shape = (len(self._fi),) + v0.shape[1:] def __getitem__(self, initems): """ @@ -1087,23 +1140,23 @@ def __getitem__(self, initems): """ # Make the input iterable if not isinstance(initems, tuple): - initems = initems, + initems = (initems,) # Convert any ellipsis to slice - items = [slice(None,None,None)]*self.ndim + items = [slice(None, None, None)] * self.ndim for i, item in enumerate(initems): if item is not Ellipsis: items[i] = item else: for j, item in enumerate(reversed(initems)): if item is not Ellipsis: - items[self.ndim-j-1] = item + items[self.ndim - j - 1] = item else: break break # Find the time indices - ti = items[0] # global time indices to extract, may be int or slice + ti = items[0] # global time indices to extract, may be int or slice fi = self._fi[ti] # index of each file (dataset) to draw from li = self._li[ti] # local time index for each dataset @@ -1116,30 +1169,42 @@ def __getitem__(self, initems): if self.ndim == 3: out = self.ds[fi].variables[self.name][li, items[1], items[2]] if self.ndim == 4: - out = self.ds[fi].variables[self.name][li, items[1], items[2], items[3]] + out = self.ds[fi].variables[self.name][ + li, items[1], items[2], items[3] + ] return out # If we need to concatenate, then we need to determine the output # array size. This approach is an ugly hack but it works. sizo = [1] * self.ndim # assume one in each dimension - rdim = [] # list of dimensions to remove + rdim = [] # list of dimensions to remove for ii, item in enumerate(items): if type(item) is int or type(item) is np.int64: rdim += [ii] - else: # update output size at this dim if not an integer index - tmp = [None] * self.shape[ii] # build a dummy array - sizo[ii] = len(tmp[item]) # index the dummy array, record length - out = np.zeros(sizo, self.dtype) # allocate output array with matching data type - out = np.squeeze(out, axis=tuple(rdim)) # remove unwanted singleton dimensions + else: # update output size at this dim if not an integer index + tmp = [None] * self.shape[ii] # build a dummy array + sizo[ii] = len(tmp[item]) # index the dummy array, record length + out = np.zeros( + sizo, self.dtype + ) # allocate output array with matching data type + out = np.squeeze( + out, axis=tuple(rdim) + ) # remove unwanted singleton dimensions # Now we read each time index sequentially and fill the output array for ii in range(len(fi)): if self.ndim == 1: out[ii] = self.ds[fi[ii]].variables[self.name][li[ii]] if self.ndim == 2: - out[ii, ...] = self.ds[fi[ii]].variables[self.name][li[ii], items[1]] + out[ii, ...] = self.ds[fi[ii]].variables[self.name][ + li[ii], items[1] + ] if self.ndim == 3: - out[ii, ...] = self.ds[fi[ii]].variables[self.name][li[ii], items[1], items[2]] + out[ii, ...] = self.ds[fi[ii]].variables[self.name][ + li[ii], items[1], items[2] + ] if self.ndim == 4: - out[ii, ...] = self.ds[fi[ii]].variables[self.name][li[ii], items[1], items[2], items[3]] + out[ii, ...] = self.ds[fi[ii]].variables[self.name][ + li[ii], items[1], items[2], items[3] + ] return out diff --git a/SalishSeaTools/salishsea_tools/onc_sog_adcps.py b/SalishSeaTools/salishsea_tools/onc_sog_adcps.py index 7872d855..feea7276 100644 --- a/SalishSeaTools/salishsea_tools/onc_sog_adcps.py +++ b/SalishSeaTools/salishsea_tools/onc_sog_adcps.py @@ -20,7 +20,7 @@ import arrow -adcp = namedtuple('ADCP', 'device_id, sensor_id') +adcp = namedtuple("ADCP", "device_id, sensor_id") adcps = { # Keys are instrument serial numbers 8580: adcp(device_id=65, sensor_id=95), # rdi adcp 150 khz wh @@ -32,49 +32,64 @@ } -deployment = namedtuple('Deployment', 'id, start, end, serial_no, site_id') +deployment = namedtuple("Deployment", "id, start, end, serial_no, site_id") deployments = { # Keys are the same as in :py:data:`~salishsea_tools.places.PLACES` - 'Central node': { - 'location id': 4, - 'history': [ + "Central node": { + "location id": 4, + "history": [ deployment( - id='VIP-14', + id="VIP-14", start=arrow.get(2016, 5, 3), - end=arrow.now().to('Canada/Pacific'), - serial_no=8580, site_id=1000661), + end=arrow.now().to("Canada/Pacific"), + serial_no=8580, + site_id=1000661, + ), deployment( - id='VIP-13', - start=arrow.get(2015, 8, 31), end=arrow.get(2016, 5, 3), - serial_no=8580, site_id=1000479), + id="VIP-13", + start=arrow.get(2015, 8, 31), + end=arrow.get(2016, 5, 3), + serial_no=8580, + site_id=1000479, + ), ], }, - 'East node': { - 'location id': 3, - 'history': [ + "East node": { + "location id": 3, + "history": [ deployment( - id='VIP-14', + id="VIP-14", start=arrow.get(2016, 5, 1), - end=arrow.now().to('Canada/Pacific'), - serial_no=8497, site_id=1000670), + end=arrow.now().to("Canada/Pacific"), + serial_no=8497, + site_id=1000670, + ), deployment( - id='VIP-13', - start=arrow.get(2015, 8, 27), end=arrow.get(2016, 5, 1), - serial_no=8497, site_id=1000475), - ] + id="VIP-13", + start=arrow.get(2015, 8, 27), + end=arrow.get(2016, 5, 1), + serial_no=8497, + site_id=1000475, + ), + ], }, - 'Delta BBL node': { - 'location id': 14, - 'history': [ + "Delta BBL node": { + "location id": 14, + "history": [ deployment( - id='BBL-SG-05', + id="BBL-SG-05", start=arrow.get(2016, 5, 1), - end=arrow.now().to('Canada/Pacific'), - serial_no=17955, site_id=1000668), + end=arrow.now().to("Canada/Pacific"), + serial_no=17955, + site_id=1000668, + ), deployment( - id='BBL-SG-04', - start=arrow.get(2015, 8, 30), end=arrow.get(2016, 5, 1), - serial_no=17955, site_id=1000474), - ] + id="BBL-SG-04", + start=arrow.get(2015, 8, 30), + end=arrow.get(2016, 5, 1), + serial_no=17955, + site_id=1000474, + ), + ], }, } diff --git a/SalishSeaTools/salishsea_tools/places.py b/SalishSeaTools/salishsea_tools/places.py index 292d59ad..b0303e64 100644 --- a/SalishSeaTools/salishsea_tools/places.py +++ b/SalishSeaTools/salishsea_tools/places.py @@ -36,528 +36,523 @@ #: presentation of Salish Sea NEMO model results. PLACES = { # Tide gauge stations - 'Campbell River': { + "Campbell River": { # deg E, deg N - 'lon lat': (-125.24, 50.04), + "lon lat": (-125.24, 50.04), # Canadian Hydrographic Service (CHS) or NOAA - 'stn number': 8074, + "stn number": 8074, # m above chart datum - 'mean sea lvl': 2.916, + "mean sea lvl": 2.916, # m above chart datum - 'hist max sea lvl': 5.35, + "hist max sea lvl": 5.35, # indices of nearest weather forcing grid point # j is the latitude (y) direction, i is the longitude (x) direction - 'wind grid ji': (190, 102), + "wind grid ji": (190, 102), # indices of nearest NEMO model grid point # j is the latitude (y) direction, i is the longitude (x) direction - 'NEMO grid ji': (747, 125), + "NEMO grid ji": (747, 125), # indices of nearest wave model grid point # j is the latitude (y) direction, i is the longitude (x) direction - 'ww3 grid ji': (453, 109) - }, - 'Cherry Point': { - 'lon lat': (-122.766667, 48.866667), - 'stn number': 9449424, - 'mean sea lvl': 3.543, - 'hist max sea lvl': 5.846, - 'wind grid ji': (122, 166), - 'NEMO grid ji': (343, 342), - 'ww3 grid ji': (193, 462), - - }, - 'Friday Harbor': { - 'lon lat': (-123.016667, 48.55), - 'stn number': 9449880, - 'mean sea lvl': 2.561, - 'hist max sea lvl': 4.572, - 'wind grid ji': (108, 155), - 'NEMO grid ji': (300, 267), - 'ww3 grid ji': (124, 427), - }, - 'Halfmoon Bay': { - 'lon lat': (-123.912, 49.511), - 'stn number': 7830, - 'NEMO grid ji': (549, 254), - 'wind grid ji': (158, 136), - 'ww3 grid ji': (331, 297), - 'mean sea lvl': 3.14, - 'hist max sea lvl': 5.61, # copied from Point Atkinson - }, - 'Nanaimo': { - 'lon lat': (-123.93, 49.16), - 'stn number': 7917, - 'mean sea lvl': 3.08, - 'hist max sea lvl': 5.47, - 'wind grid ji': (142, 133), - 'NEMO grid ji': (484, 208), # current a little different - 'ww3 grid ji': (261, 298), - - }, - 'Neah Bay': { - 'lon lat': (-124.6, 48.4), - 'stn number': 9443090, - 'mean sea lvl': 1.925, - 'hist max sea lvl': 4.359, - 'wind grid ji': (111, 105), - 'NEMO grid ji': (384, 15), - 'ww3 grid ji': (89, 200), - }, - 'New Westminster': { - 'lon lat': (-122.90535, 49.203683), - 'stn number': 7654, - 'mean sea lvl': 1.3, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'hist max sea lvl': 4.66, - 'NEMO grid ji': (423, 363), - 'wind grid ji': (138, 164), + "ww3 grid ji": (453, 109), + }, + "Cherry Point": { + "lon lat": (-122.766667, 48.866667), + "stn number": 9449424, + "mean sea lvl": 3.543, + "hist max sea lvl": 5.846, + "wind grid ji": (122, 166), + "NEMO grid ji": (343, 342), + "ww3 grid ji": (193, 462), + }, + "Friday Harbor": { + "lon lat": (-123.016667, 48.55), + "stn number": 9449880, + "mean sea lvl": 2.561, + "hist max sea lvl": 4.572, + "wind grid ji": (108, 155), + "NEMO grid ji": (300, 267), + "ww3 grid ji": (124, 427), + }, + "Halfmoon Bay": { + "lon lat": (-123.912, 49.511), + "stn number": 7830, + "NEMO grid ji": (549, 254), + "wind grid ji": (158, 136), + "ww3 grid ji": (331, 297), + "mean sea lvl": 3.14, + "hist max sea lvl": 5.61, # copied from Point Atkinson + }, + "Nanaimo": { + "lon lat": (-123.93, 49.16), + "stn number": 7917, + "mean sea lvl": 3.08, + "hist max sea lvl": 5.47, + "wind grid ji": (142, 133), + "NEMO grid ji": (484, 208), # current a little different + "ww3 grid ji": (261, 298), + }, + "Neah Bay": { + "lon lat": (-124.6, 48.4), + "stn number": 9443090, + "mean sea lvl": 1.925, + "hist max sea lvl": 4.359, + "wind grid ji": (111, 105), + "NEMO grid ji": (384, 15), + "ww3 grid ji": (89, 200), + }, + "New Westminster": { + "lon lat": (-122.90535, 49.203683), + "stn number": 7654, + "mean sea lvl": 1.3, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "hist max sea lvl": 4.66, + "NEMO grid ji": (423, 363), + "wind grid ji": (138, 164), # no nearby waves }, - 'Patricia Bay': { - 'lon lat': (-123.4515, 48.6536), - 'stn number': 7277, - 'mean sea lvl': 2.256, - 'hist max sea lvl': 4.38, - 'NEMO grid ji': (351, 214), - 'wind grid ji': (115, 143), - 'ww3 grid ji': (145, 363), - }, - 'Point Atkinson': { - 'lon lat': (-123.25, 49.33), - 'stn number': 7795, - 'mean sea lvl': 3.09, - 'hist max sea lvl': 5.61, - 'wind grid ji': (146, 155), - 'NEMO grid ji': (468, 329), - 'ww3 grid ji': (296, 393), - }, - 'Port Renfrew': { - 'lon lat': (-124.421, 48.555), - 'stn number': 8525, - 'mean sea lvl': 1.937, - 'hist max sea lvl': 4.359, # from Neah Bay - 'NEMO grid ji': (401, 61), - 'wind grid ji': (117, 112), - 'ww3 grid ji': (123, 226), - }, - 'Sandy Cove': { - 'lon lat': (-123.23, 49.34), - 'stn number': 7786, - 'NEMO grid ji': (468, 333), - 'wind grid ji': (146, 155), - 'ww3 grid ji': (294, 396), - 'mean sea lvl': 3.1, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'hist max sea lvl': 5.61, # from Pt. Atkinson - }, - 'Squamish': { - 'lon lat': (-123.155, 49.694), - 'stn number': 7811, - 'NEMO grid ji': (532, 389), - 'wind grid ji': (162, 160), - 'ww3 grid ji': (370, 404), - 'mean sea lvl': 3.14, - 'hist max sea lvl': 5.61 # from Pt. Atkkinson - }, - 'Victoria': { - 'lon lat': (-123.3707, 48.424666), - 'stn number': 7120, - 'mean sea lvl': 1.8810, - 'hist max sea lvl': 3.76, - 'wind grid ji': (104, 144), - 'NEMO grid ji': (302, 196), - 'ww3 grid ji': (90, 374), - }, - 'Woodwards Landing': { - 'lon lat': (-123.0754, 49.1251), - 'stn number': 7610, - 'hist max sea lvl': 4.66, # based on New West - 'mean sea lvl': 1.84, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'NEMO grid ji': (414, 329), - 'wind grid ji': (135, 138), - }, - 'Boundary Bay': { - 'lon lat': (-122.925, 49.0), - 'stn number': None, - 'hist max sea lvl': 5.61, # based on Point Atk - 'mean sea lvl': 3.09, # based on Point Atk - 'NEMO grid ji': (380, 335), - 'wind grid ji': (129, 162), - 'ww3 grid ji': (222, 439), + "Patricia Bay": { + "lon lat": (-123.4515, 48.6536), + "stn number": 7277, + "mean sea lvl": 2.256, + "hist max sea lvl": 4.38, + "NEMO grid ji": (351, 214), + "wind grid ji": (115, 143), + "ww3 grid ji": (145, 363), + }, + "Point Atkinson": { + "lon lat": (-123.25, 49.33), + "stn number": 7795, + "mean sea lvl": 3.09, + "hist max sea lvl": 5.61, + "wind grid ji": (146, 155), + "NEMO grid ji": (468, 329), + "ww3 grid ji": (296, 393), + }, + "Port Renfrew": { + "lon lat": (-124.421, 48.555), + "stn number": 8525, + "mean sea lvl": 1.937, + "hist max sea lvl": 4.359, # from Neah Bay + "NEMO grid ji": (401, 61), + "wind grid ji": (117, 112), + "ww3 grid ji": (123, 226), + }, + "Sandy Cove": { + "lon lat": (-123.23, 49.34), + "stn number": 7786, + "NEMO grid ji": (468, 333), + "wind grid ji": (146, 155), + "ww3 grid ji": (294, 396), + "mean sea lvl": 3.1, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "hist max sea lvl": 5.61, # from Pt. Atkinson + }, + "Squamish": { + "lon lat": (-123.155, 49.694), + "stn number": 7811, + "NEMO grid ji": (532, 389), + "wind grid ji": (162, 160), + "ww3 grid ji": (370, 404), + "mean sea lvl": 3.14, + "hist max sea lvl": 5.61, # from Pt. Atkkinson + }, + "Victoria": { + "lon lat": (-123.3707, 48.424666), + "stn number": 7120, + "mean sea lvl": 1.8810, + "hist max sea lvl": 3.76, + "wind grid ji": (104, 144), + "NEMO grid ji": (302, 196), + "ww3 grid ji": (90, 374), + }, + "Woodwards Landing": { + "lon lat": (-123.0754, 49.1251), + "stn number": 7610, + "hist max sea lvl": 4.66, # based on New West + "mean sea lvl": 1.84, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "NEMO grid ji": (414, 329), + "wind grid ji": (135, 138), + }, + "Boundary Bay": { + "lon lat": (-122.925, 49.0), + "stn number": None, + "hist max sea lvl": 5.61, # based on Point Atk + "mean sea lvl": 3.09, # based on Point Atk + "NEMO grid ji": (380, 335), + "wind grid ji": (129, 162), + "ww3 grid ji": (222, 439), }, - # VHFR FVCOM model tide guage stations - 'Calamity Point': { - 'lon lat': (-123.1276, 49.31262), - 'stn number': 7724, - 'mean sea lvl': 3.001, # same as Vancouver Harbour; from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'NEMO grid ji': None, - 'wind grid ji': (456, 344), - 'ww3 grid ji': None, - }, - 'Vancouver Harbour': { - 'lon lat': (-123.1069, 49.28937), - 'stn number': 7735, - 'mean sea lvl': 3.001, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'NEMO grid ji': None, - 'wind grid ji': (143, 159), - 'ww3 grid ji': None, - }, - 'Port Moody': { - 'lon lat': (-122.8658, 49.28814), - 'stn number': 7755, - 'mean sea lvl': 3.143, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'NEMO grid ji': None, - 'wind grid ji': (142, 166), - 'ww3 grid ji': None, - }, - 'Indian Arm Head': { - 'lon lat': (-122.8864, 49.4615), - 'stn number': 7774, - 'mean sea lvl': 3.052, # from Marlene Jefferies via 20mar18 email from Michael Dunphy - 'NEMO grid ji': None, - 'wind grid ji': (150, 167), - 'ww3 grid ji': None, + "Calamity Point": { + "lon lat": (-123.1276, 49.31262), + "stn number": 7724, + "mean sea lvl": 3.001, # same as Vancouver Harbour; from Marlene Jefferies via 20mar18 email from Michael Dunphy + "NEMO grid ji": None, + "wind grid ji": (456, 344), + "ww3 grid ji": None, + }, + "Vancouver Harbour": { + "lon lat": (-123.1069, 49.28937), + "stn number": 7735, + "mean sea lvl": 3.001, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "NEMO grid ji": None, + "wind grid ji": (143, 159), + "ww3 grid ji": None, + }, + "Port Moody": { + "lon lat": (-122.8658, 49.28814), + "stn number": 7755, + "mean sea lvl": 3.143, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "NEMO grid ji": None, + "wind grid ji": (142, 166), + "ww3 grid ji": None, + }, + "Indian Arm Head": { + "lon lat": (-122.8864, 49.4615), + "stn number": 7774, + "mean sea lvl": 3.052, # from Marlene Jefferies via 20mar18 email from Michael Dunphy + "NEMO grid ji": None, + "wind grid ji": (150, 167), + "ww3 grid ji": None, }, - # VHFR FVCOM model HADCP station - '2nd Narrows Rail Bridge': { - 'lon lat': (-123.0247222, 49.2938889), - 'stn number': 3160171, # AIS MMSI (Maritime Mobile Service Identity) - 'mean sea lvl': None, - 'NEMO grid ji': None, - 'wind grid ji': (143, 161), - 'ww3 grid ji': None, + "2nd Narrows Rail Bridge": { + "lon lat": (-123.0247222, 49.2938889), + "stn number": 3160171, # AIS MMSI (Maritime Mobile Service Identity) + "mean sea lvl": None, + "NEMO grid ji": None, + "wind grid ji": (143, 161), + "ww3 grid ji": None, }, - # Ferry terminals - 'Tsawwassen': { - 'lon lat': (-123.132722, 49.006165), - 'stn number': None, - 'mean sea lvl': None, - 'NEMO grid ji': (396, 305), - 'wind grid ji': (130, 155), - 'ww3 grid ji': None, - 'in berth radius': 0.0015, - }, - 'Duke Pt.': { - 'lon lat': (-123.89095676900132, 49.16340592936349), - 'stn number': None, - 'mean sea lvl': None, - 'NEMO grid ji': (481, 213), - 'wind grid ji': (142, 134), - 'ww3 grid ji': None, - 'in berth radius': 0.002, - }, - 'Horseshoe Bay': { - 'lon lat': (-123.2728, 49.3742), - 'stn number': None, - 'mean sea lvl': None, - 'NEMO grid ji': (478, 331), - 'wind grid ji': (148, 154), - 'ww3 grid ji': None, - }, - 'Departure Bay': { - 'lon lat': (-123.8909, 49.1632), - 'stn number': None, - 'mean sea lvl': None, - 'NEMO grid ji': (481, 213), - 'wind grid ji': (142, 134), - 'ww3 grid ji': None, - }, - 'Swartz Bay': { - 'lon lat': (-123.4102, 48.6882), - 'stn number': None, - 'mean sea lvl': None, - 'NEMO grid ji': (354, 225), - 'wind grid ji': (117, 144), - 'ww3 grid ji': None, + "Tsawwassen": { + "lon lat": (-123.132722, 49.006165), + "stn number": None, + "mean sea lvl": None, + "NEMO grid ji": (396, 305), + "wind grid ji": (130, 155), + "ww3 grid ji": None, + "in berth radius": 0.0015, + }, + "Duke Pt.": { + "lon lat": (-123.89095676900132, 49.16340592936349), + "stn number": None, + "mean sea lvl": None, + "NEMO grid ji": (481, 213), + "wind grid ji": (142, 134), + "ww3 grid ji": None, + "in berth radius": 0.002, + }, + "Horseshoe Bay": { + "lon lat": (-123.2728, 49.3742), + "stn number": None, + "mean sea lvl": None, + "NEMO grid ji": (478, 331), + "wind grid ji": (148, 154), + "ww3 grid ji": None, + }, + "Departure Bay": { + "lon lat": (-123.8909, 49.1632), + "stn number": None, + "mean sea lvl": None, + "NEMO grid ji": (481, 213), + "wind grid ji": (142, 134), + "ww3 grid ji": None, + }, + "Swartz Bay": { + "lon lat": (-123.4102, 48.6882), + "stn number": None, + "mean sea lvl": None, + "NEMO grid ji": (354, 225), + "wind grid ji": (117, 144), + "ww3 grid ji": None, }, - # Cities - 'Vancouver': { - 'lon lat': (-123.1207, 49.2827), + "Vancouver": { + "lon lat": (-123.1207, 49.2827), }, - 'Hope': { - 'lon lat': (-121.4419, 49.3858), + "Hope": { + "lon lat": (-121.4419, 49.3858), }, - # Provinces and states - 'British Columbia': { - 'lon lat': (-123.6, 49.9), + "British Columbia": { + "lon lat": (-123.6, 49.9), }, - 'Washington State': { - 'lon lat': (-123.8, 47.8), + "Washington State": { + "lon lat": (-123.8, 47.8), }, - # Bodies of water - 'Pacific Ocean': { - 'lon lat': (-125.6, 48.1), + "Pacific Ocean": { + "lon lat": (-125.6, 48.1), }, - 'Juan de Fuca Strait': { - 'lon lat': (-124.7, 48.47), + "Juan de Fuca Strait": { + "lon lat": (-124.7, 48.47), }, - 'Puget Sound': { - 'lon lat': (-122.67, 48), + "Puget Sound": { + "lon lat": (-122.67, 48), }, - 'Strait of Georgia': { - 'lon lat': (-123.8, 49.3), + "Strait of Georgia": { + "lon lat": (-123.8, 49.3), }, - 'Central SJDF': { - 'lon lat': (-123.9534, 48.281677), - 'NEMO grid ji': (315,95), - 'GEM2.5 grid ji': (101, 124), + "Central SJDF": { + "lon lat": (-123.9534, 48.281677), + "NEMO grid ji": (315, 95), + "GEM2.5 grid ji": (101, 124), }, # if you have a better location in mind for Baynes Sound, please update! # if not, I will after I hear from Debbie/Evie -EO - 'Baynes Sound': { - 'lon lat': (-124.86022, 49.60356), - 'NEMO grid ji': (635, 126), + "Baynes Sound": { + "lon lat": (-124.86022, 49.60356), + "NEMO grid ji": (635, 126), }, # STRATOGEM STATION S3(lat,lon)=(49 7.5 N, 123 33.5 W) - 'S3': { - 'lon lat': (-123.558, 49.125), - 'NEMO grid ji': (450, 258), - 'GEM2.5 grid ji': (138, 144), + "S3": { + "lon lat": (-123.558, 49.125), + "NEMO grid ji": (450, 258), + "GEM2.5 grid ji": (138, 144), }, # Hakai STATION QU39 (lat,lon)=(50.0307 N, 125.0992 W) - 'QU39': { - 'lon lat': (-125.0992, 50.0307), - 'NEMO grid ji': (736, 144), - 'GEM2.5 grid ji': (189, 106), + "QU39": { + "lon lat": (-125.0992, 50.0307), + "NEMO grid ji": (736, 144), + "GEM2.5 grid ji": (189, 106), }, # SJDF station for bloom timing - 'SJDF': { - 'lon lat': (-124.07, 48.31), - 'NEMO grid ji': (329, 81), - 'GEM2.5 grid ji': (103, 121), + "SJDF": { + "lon lat": (-124.07, 48.31), + "NEMO grid ji": (329, 81), + "GEM2.5 grid ji": (103, 121), }, # Tereza's cluster stations, aligned with Vector Stations where possible. - 'Cluster_1': { - 'NEMO grid ji': (241, 212), - 'lon lat': (48.215, -123.099), - 'Vector Stn': '64' - }, - 'Cluster_2': { - 'NEMO grid ji': (294, 127), - 'lon lat': (48.261, -123.717), - 'Vector Stn': '69' - }, - 'Cluster_3': { - 'NEMO grid ji': (376, 291), - 'lon lat': (48.899, -123.138), - 'Vector Stn': '45' - }, - 'Cluster_4': { - 'NEMO grid ji': (282, 305), - 'lon lat': (48.555, -122.750), - 'Vector Stn': '53' - }, - 'Cluster_5': { - 'NEMO grid ji': (344, 271), - 'lon lat': (48.735, -123.135), - 'Vector Stn': '57' - }, - 'Cluster_6': { - 'NEMO grid ji': (320, 68), - 'lon lat': (48.249, -124.110), - 'Vector Stn': '73' - }, - 'Cluster_7': { - 'NEMO grid ji': (504, 246), - 'lon lat': (49.317, -123.801), - 'Vector Stn': '27' - }, - 'Cluster_8': { - 'NEMO grid ji': (646, 168), - 'lon lat': (49.726, -124.679), - 'Vector Stn': '12' - }, - 'Cluster_9': { - 'NEMO grid ji': (423, 300), - 'lon lat': (49.101, -123.249), + "Cluster_1": { + "NEMO grid ji": (241, 212), + "lon lat": (48.215, -123.099), + "Vector Stn": "64", + }, + "Cluster_2": { + "NEMO grid ji": (294, 127), + "lon lat": (48.261, -123.717), + "Vector Stn": "69", + }, + "Cluster_3": { + "NEMO grid ji": (376, 291), + "lon lat": (48.899, -123.138), + "Vector Stn": "45", + }, + "Cluster_4": { + "NEMO grid ji": (282, 305), + "lon lat": (48.555, -122.750), + "Vector Stn": "53", + }, + "Cluster_5": { + "NEMO grid ji": (344, 271), + "lon lat": (48.735, -123.135), + "Vector Stn": "57", + }, + "Cluster_6": { + "NEMO grid ji": (320, 68), + "lon lat": (48.249, -124.110), + "Vector Stn": "73", + }, + "Cluster_7": { + "NEMO grid ji": (504, 246), + "lon lat": (49.317, -123.801), + "Vector Stn": "27", + }, + "Cluster_8": { + "NEMO grid ji": (646, 168), + "lon lat": (49.726, -124.679), + "Vector Stn": "12", + }, + "Cluster_9": { + "NEMO grid ji": (423, 300), + "lon lat": (49.101, -123.249), }, - # VENUS - 'Central node': { + "Central node": { # location from Ocean Networks Canada (ONC) website - 'lon lat': (-123.425825, 49.040066666), + "lon lat": (-123.425825, 49.040066666), # depth in metres from ONC website - 'depth': 294, + "depth": 294, # corresponding python vertical grid index - 'NEMO grid k': 34, + "NEMO grid k": 34, # NEMO python grid indices: j in y direction, i in x direction - 'NEMO grid ji': (424, 266), + "NEMO grid ji": (424, 266), # HRDPS python grid indices: j in y direction, i in x direction - 'wind grid ji': (133, 147), + "wind grid ji": (133, 147), # ONC data web services API station code - 'ONC stationCode': 'SCVIP', + "ONC stationCode": "SCVIP", }, - 'Delta BBL node': { + "Delta BBL node": { # ONC's description is "Delta/Lower Slope/Bottom Boundary Layer" - 'lon lat': (-123.339633, 49.074766), - 'depth': 143, - 'NEMO grid k': 28, - 'NEMO grid ji': (424, 283), - 'wind grid ji': (134, 150), - 'ONC stationCode': 'LSBBL', - }, - 'Delta DDL node': { + "lon lat": (-123.339633, 49.074766), + "depth": 143, + "NEMO grid k": 28, + "NEMO grid ji": (424, 283), + "wind grid ji": (134, 150), + "ONC stationCode": "LSBBL", + }, + "Delta DDL node": { # ONC's description is "Delta/Upper Slope/Delta Dynamics Laboratory" - 'lon lat': (-123.32972, 49.08495), - 'depth': 107, - 'NEMO grid k': 27, - 'NEMO grid ji': (426, 286), - 'wind grid ji': (135, 150), - 'ONC stationCode': 'USDDL', - }, - 'East node': { - 'lon lat': (-123.316836666, 49.04316), - 'depth': 164, - 'NEMO grid k': 29, - 'NEMO grid ji': (417, 283), - 'wind grid ji': (133, 150), - 'ONC stationCode': 'SEVIP', + "lon lat": (-123.32972, 49.08495), + "depth": 107, + "NEMO grid k": 27, + "NEMO grid ji": (426, 286), + "wind grid ji": (135, 150), + "ONC stationCode": "USDDL", + }, + "East node": { + "lon lat": (-123.316836666, 49.04316), + "depth": 164, + "NEMO grid k": 29, + "NEMO grid ji": (417, 283), + "wind grid ji": (133, 150), + "ONC stationCode": "SEVIP", }, - # Lightstations - 'Ballenas Islands': { - 'lon lat': (-124.160, 49.350), - 'NEMO grid ji': (536, 197), - 'wind grid ji': (152, 127), - }, - 'Discovery Island': { - 'lon lat': (-123.226, 48.425), - 'NEMO grid ji': (291, 219), - 'wind grid ji': (104, 148), - }, - 'Entrance Island': { - 'lon lat': (-123.811, 49.209), - 'NEMO grid ji': (484, 231), - 'wind grid ji': (143, 137), - }, - 'Race Rocks': { - 'lon lat': (-123.531, 48.298), - 'NEMO grid ji': (288, 159), - 'wind grid ji': (99, 137), - }, - 'Sand Heads': { - 'lon lat': (-123.30, 49.10), - 'stn number': 7594, # Marlene's coordinates for Tide Station are slightly different. Leaving as is. - 'NEMO grid ji': (426, 293), # match Domain file - 'mean sea lvl': 2.875, - 'hist max sea lvl': 5.61-3.09+2.875, # based on Point Atk. - 'GEM2.5 grid ji': (135, 151), - 'wind grid ji': (135, 151), - 'ww3 grid ji': (246, 385), - }, - 'Saturna Island': { - 'lon lat': (-123.045, 48.784), - 'NEMO grid ji': (347, 290), - 'wind grid ji': (119, 156), - }, - 'Sisters Islet': { - 'lon lat': (-124.43, 49.49), - 'NEMO grid ji': (582, 175), - 'GEM2.5 grid ji': (160, 120), - 'wind grid ji': (160, 120), + "Ballenas Islands": { + "lon lat": (-124.160, 49.350), + "NEMO grid ji": (536, 197), + "wind grid ji": (152, 127), + }, + "Discovery Island": { + "lon lat": (-123.226, 48.425), + "NEMO grid ji": (291, 219), + "wind grid ji": (104, 148), + }, + "Entrance Island": { + "lon lat": (-123.811, 49.209), + "NEMO grid ji": (484, 231), + "wind grid ji": (143, 137), + }, + "Race Rocks": { + "lon lat": (-123.531, 48.298), + "NEMO grid ji": (288, 159), + "wind grid ji": (99, 137), + }, + "Sand Heads": { + "lon lat": (-123.30, 49.10), + "stn number": 7594, # Marlene's coordinates for Tide Station are slightly different. Leaving as is. + "NEMO grid ji": (426, 293), # match Domain file + "mean sea lvl": 2.875, + "hist max sea lvl": 5.61 - 3.09 + 2.875, # based on Point Atk. + "GEM2.5 grid ji": (135, 151), + "wind grid ji": (135, 151), + "ww3 grid ji": (246, 385), + }, + "Saturna Island": { + "lon lat": (-123.045, 48.784), + "NEMO grid ji": (347, 290), + "wind grid ji": (119, 156), + }, + "Sisters Islet": { + "lon lat": (-124.43, 49.49), + "NEMO grid ji": (582, 175), + "GEM2.5 grid ji": (160, 120), + "wind grid ji": (160, 120), }, - # Wind stations - 'Esquimalt': { - 'lon lat': (-123.439, 48.432), - 'NEMO grid ji': (307, 189), - 'wind grid ji': (105, 141), + "Esquimalt": { + "lon lat": (-123.439, 48.432), + "NEMO grid ji": (307, 189), + "wind grid ji": (105, 141), }, - 'Pam Rocks': { - 'lon lat': (-123.299, 49.488), - 'NEMO grid ji': (502, 341), - 'wind grid ji': (153, 154), + "Pam Rocks": { + "lon lat": (-123.299, 49.488), + "NEMO grid ji": (502, 341), + "wind grid ji": (153, 154), }, - # Wave buoys - 'Halibut Bank': { - 'lon lat': (-123.72, 49.34), - 'NEMO grid ji': (503, 261), - 'GEM2.5 grid ji': (149, 141), - 'wind grid ji': (149, 141), - 'EC buoy number': 46146, - }, - 'Sentry Shoal': { - 'lon lat': (-125.0, 49.92), - 'NEMO grid ji': (707, 145), - 'GEM2.5 grid ji': (183, 107), - 'wind grid ji': (183, 107), - 'EC buoy number': 46131, + "Halibut Bank": { + "lon lat": (-123.72, 49.34), + "NEMO grid ji": (503, 261), + "GEM2.5 grid ji": (149, 141), + "wind grid ji": (149, 141), + "EC buoy number": 46146, + }, + "Sentry Shoal": { + "lon lat": (-125.0, 49.92), + "NEMO grid ji": (707, 145), + "GEM2.5 grid ji": (183, 107), + "wind grid ji": (183, 107), + "EC buoy number": 46131, }, - # Seasonal Chl sensor at town dock - 'Egmont': { - 'lon lat': (-123.93, 49.75), - 'NEMO grid ji': (598,282), + "Egmont": { + "lon lat": (-123.93, 49.75), + "NEMO grid ji": (598, 282), }, - # Airports - 'Comox Airport': { - 'lon lat': (-124.900, 49.717), - 'NEMO grid ji': (660, 134), - 'wind grid ji' : (173, 108), + "Comox Airport": { + "lon lat": (-124.900, 49.717), + "NEMO grid ji": (660, 134), + "wind grid ji": (173, 108), }, - 'Squamish Airport': { - 'lon lat': (-123.161, 49.783), - 'wind grid ji' : (166, 161), + "Squamish Airport": { + "lon lat": (-123.161, 49.783), + "wind grid ji": (166, 161), }, - 'YVR': { - 'lon lat': (-123.184, 49.195), - 'GEM2.5 grid ji' : (139, 155), - 'wind grid ji' : (139, 155), + "YVR": { + "lon lat": (-123.184, 49.195), + "GEM2.5 grid ji": (139, 155), + "wind grid ji": (139, 155), }, # ORCA Buoys - 'Hoodsport': { - 'lon lat': (-123.1126, 47.4218), - 'NEMO grid ji': (89, 114), - }, - 'Twanoh': { - 'lon lat': (-123.0083, 47.3750), - 'NEMO grid ji': (72, 123), - }, - 'DabobBay': { - 'lon lat': (-122.8029, 47.8031), - 'NEMO grid ji': (141, 205), - }, - 'PointWells': { - 'lon lat': (-122.3972, 47.7612), - 'NEMO grid ji': (104, 259), - }, - 'CarrInlet': { - 'lon lat': (-122.7300, 47.2800), - 'NEMO grid ji': (34, 152), - }, - 'Hansville': { - 'lon lat': (-122.627, 47.90733), - 'NEMO grid ji': (148, 243), - }, - 'Dockton': { - 'lon lat': (-122.45722, 47.37611), - 'NEMO grid ji': (34, 204), - }, - 'PointWilliams': { - 'lon lat': (-122.40612, 47.53716), - 'NEMO grid ji': (62, 231), - } + "Hoodsport": { + "lon lat": (-123.1126, 47.4218), + "NEMO grid ji": (89, 114), + }, + "Twanoh": { + "lon lat": (-123.0083, 47.3750), + "NEMO grid ji": (72, 123), + }, + "DabobBay": { + "lon lat": (-122.8029, 47.8031), + "NEMO grid ji": (141, 205), + }, + "PointWells": { + "lon lat": (-122.3972, 47.7612), + "NEMO grid ji": (104, 259), + }, + "CarrInlet": { + "lon lat": (-122.7300, 47.2800), + "NEMO grid ji": (34, 152), + }, + "Hansville": { + "lon lat": (-122.627, 47.90733), + "NEMO grid ji": (148, 243), + }, + "Dockton": { + "lon lat": (-122.45722, 47.37611), + "NEMO grid ji": (34, 204), + }, + "PointWilliams": { + "lon lat": (-122.40612, 47.53716), + "NEMO grid ji": (62, 231), + }, } # Aliases: -PLACES['Sandheads'] = PLACES['Sand Heads'] +PLACES["Sandheads"] = PLACES["Sand Heads"] #: Names of tide gauge sites, #: ordered from south and west to north and east. #: These names are keys of the :py:data:`~salishsea_tools.places.PLACES` dict. TIDE_GAUGE_SITES = ( - 'Neah Bay', 'Victoria', 'Cherry Point', 'Point Atkinson', 'Nanaimo', - 'Campbell River', + "Neah Bay", + "Victoria", + "Cherry Point", + "Point Atkinson", + "Nanaimo", + "Campbell River", ) #: Other tide sites, no wind data at these (just to keep the number of arrows under control) SUPP_TIDE_SITES = ( - 'Friday Harbor', 'Halfmoon Bay', 'Patricia Bay', 'Port Renfrew', - 'Squamish', 'Boundary Bay', 'Sand Heads', + "Friday Harbor", + "Halfmoon Bay", + "Patricia Bay", + "Port Renfrew", + "Squamish", + "Boundary Bay", + "Sand Heads", ) @@ -569,41 +564,78 @@ def DispGeoLocs(): """ from mpl_toolkits.basemap import Basemap from matplotlib import pyplot as plt - places2=PLACES.copy() - places2.pop('Sandheads') - places2.pop('Departure Bay') - width = 300000; lon_0 = -124.3; lat_0 = 49 - fig=plt.figure(figsize=(20,20)) - m = Basemap(width=width,height=width,projection='aeqd', resolution='h', - lat_0=lat_0,lon_0=lon_0) + + places2 = PLACES.copy() + places2.pop("Sandheads") + places2.pop("Departure Bay") + width = 300000 + lon_0 = -124.3 + lat_0 = 49 + fig = plt.figure(figsize=(20, 20)) + m = Basemap( + width=width, + height=width, + projection="aeqd", + resolution="h", + lat_0=lat_0, + lon_0=lon_0, + ) m.drawmapboundary() m.drawcoastlines(linewidth=0.5) m.drawrivers() - m.drawparallels(range(40,60,2)) - m.drawmeridians(range(-130,-110,2)) - #plt.title('EC River Stations') + m.drawparallels(range(40, 60, 2)) + m.drawmeridians(range(-130, -110, 2)) + # plt.title('EC River Stations') # map stations: for pl in places2.keys(): - if 'lon lat' in places2[pl].keys(): - lon,lat=places2[pl]['lon lat'] - if (47 25% of Jervis # watershed Jervis = 0.25 -prop_dict['jervis'] = { - 'SkwawkaHunaechin': { - 'prop': Jervis * 0.20, 'i': 692, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Loquilts': { - 'prop': Jervis * 0.04, 'i': 674, 'j': 347, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Potato': { - 'prop': Jervis * 0.04, 'i': 666, 'j': 349, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deserted': { - 'prop': Jervis * 0.10, 'i': 653, 'j': 353, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stakawus': { - 'prop': Jervis * 0.04, 'i': 651, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Crabapple': { - 'prop': Jervis * 0.04, 'i': 665, 'j': 342, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Osgood': { - 'prop': Jervis * 0.04, 'i': 652, 'j': 323, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lausmann': { - 'prop': Jervis * 0.03, 'i': 690, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Slane': { - 'prop': Jervis * 0.03, 'i': 687, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Smanit': { - 'prop': Jervis * 0.04, 'i': 681, 'j': 334, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Glacial': { - 'prop': Jervis * 0.05, 'i': 649, 'j': 310, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Seshal': { - 'prop': Jervis * 0.05, 'i': 652, 'j': 318, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brittain': { - 'prop': Jervis * 0.10, 'i': 652, 'j': 301, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'VancouverHigh': { - 'prop': Jervis * 0.10, 'i': 628, 'j': 312, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Perketts': { - 'prop': Jervis * 0.05, 'i': 619, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Treat': { - 'prop': Jervis * 0.05, 'i': 612, 'j': 302, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sechelt': { - 'prop': 0.17, 'i': 593, 'j': 285, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Powell': { - 'prop': 0.32, 'i': 667, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lois': { - 'prop': 0.10, 'i': 629, 'j': 227, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Haslam': { - 'prop': 0.02, 'i': 633, 'j': 219, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chapman': { - 'prop': 0.02, 'i': 522, 'j': 273, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lapan': { - 'prop': 0.02, 'i': 620, 'j': 283, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nelson': { - 'prop': 0.02, 'i': 604, 'j': 262, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Wakefield': { - 'prop': 0.02, 'i': 534, 'j': 264, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Halfmoon': { - 'prop': 0.02, 'i': 549, 'j': 254, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'MyersKleindaleAnderson': { - 'prop': 0.04, 'i': 571, 'j': 248, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["jervis"] = { + "SkwawkaHunaechin": { + "prop": Jervis * 0.20, + "i": 692, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Loquilts": { + "prop": Jervis * 0.04, + "i": 674, + "j": 347, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Potato": { + "prop": Jervis * 0.04, + "i": 666, + "j": 349, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deserted": { + "prop": Jervis * 0.10, + "i": 653, + "j": 353, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stakawus": { + "prop": Jervis * 0.04, + "i": 651, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Crabapple": { + "prop": Jervis * 0.04, + "i": 665, + "j": 342, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Osgood": { + "prop": Jervis * 0.04, + "i": 652, + "j": 323, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lausmann": { + "prop": Jervis * 0.03, + "i": 690, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Slane": { + "prop": Jervis * 0.03, + "i": 687, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Smanit": { + "prop": Jervis * 0.04, + "i": 681, + "j": 334, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Glacial": { + "prop": Jervis * 0.05, + "i": 649, + "j": 310, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Seshal": { + "prop": Jervis * 0.05, + "i": 652, + "j": 318, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brittain": { + "prop": Jervis * 0.10, + "i": 652, + "j": 301, + "di": 1, + "dj": 1, + "depth": 1, + }, + "VancouverHigh": { + "prop": Jervis * 0.10, + "i": 628, + "j": 312, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Perketts": { + "prop": Jervis * 0.05, + "i": 619, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Treat": { + "prop": Jervis * 0.05, + "i": 612, + "j": 302, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sechelt": { + "prop": 0.17, + "i": 593, + "j": 285, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Powell": { + "prop": 0.32, + "i": 667, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lois": { + "prop": 0.10, + "i": 629, + "j": 227, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Haslam": { + "prop": 0.02, + "i": 633, + "j": 219, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chapman": { + "prop": 0.02, + "i": 522, + "j": 273, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lapan": { + "prop": 0.02, + "i": 620, + "j": 283, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nelson": { + "prop": 0.02, + "i": 604, + "j": 262, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Wakefield": { + "prop": 0.02, + "i": 534, + "j": 264, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Halfmoon": { + "prop": 0.02, + "i": 549, + "j": 254, + "di": 1, + "dj": 1, + "depth": 1, + }, + "MyersKleindaleAnderson": { + "prop": 0.04, + "i": 571, + "j": 248, + "di": 1, + "dj": 1, + "depth": 1, + }, +} -prop_dict['toba'] = { - 'Toba': { - 'prop': 0.50, 'i': 775, 'j': 311, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Theodosia': { - 'prop': 0.12, 'i': 713, 'j': 197, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Quatam': { - 'prop': 0.09, 'i': 794, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brem': { - 'prop': 0.09, 'i': 785, 'j': 260, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tahumming': { - 'prop': 0.08, 'i': 777, 'j': 309, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Racine': { - 'prop': 0.04, 'i': 770, 'j': 272, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Homfray': { - 'prop': 0.03, 'i': 754, 'j': 245, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Forbes': { - 'prop': 0.03, 'i': 742, 'j': 247, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chusan': { - 'prop': 0.02, 'i': 773, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["toba"] = { + "Toba": { + "prop": 0.50, + "i": 775, + "j": 311, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Theodosia": { + "prop": 0.12, + "i": 713, + "j": 197, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Quatam": { + "prop": 0.09, + "i": 794, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brem": { + "prop": 0.09, + "i": 785, + "j": 260, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tahumming": { + "prop": 0.08, + "i": 777, + "j": 309, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Racine": { + "prop": 0.04, + "i": 770, + "j": 272, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Homfray": { + "prop": 0.03, + "i": 754, + "j": 245, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Forbes": { + "prop": 0.03, + "i": 742, + "j": 247, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chusan": { + "prop": 0.02, + "i": 773, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, +} -prop_dict['bute'] = { - 'Homathko': { - 'prop': 0.58, 'i': 896, 'j': 293, 'di': 1, 'dj': 3, 'depth': 2, - }, - 'Southgate': { - 'prop': 0.35, 'i': 885, 'j': 297, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Orford': { - 'prop': 0.07, 'i': 830, 'j': 250, 'di': 1, 'dj': 1, 'depth': 1, - }} - -prop_dict['evi_s'] = { - 'Cowichan1': { - 'prop': 0.5*0.22, 'i': 384, 'j': 201, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Cowichan2': { - 'prop': 0.5*0.22, 'i': 383, 'j': 200, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius1': { - 'prop': 0.5 * 0.13, 'i': 414, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius2': { - 'prop': 0.5 * 0.13, 'i': 417, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanaimo1': { - 'prop': 0.67 * 0.14, 'i': 477, 'j': 207, 'di': 1, 'dj': 2, 'depth': 1, - }, - 'Nanaimo2': { - 'prop': 0.33 * 0.14, 'i': 477, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NorNanaimo': { - 'prop': 0.02, 'i': 486, 'j': 208, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Goldstream': { - 'prop': 0.08, 'i': 329, 'j': 182, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanoose': { - 'prop': 0.02, 'i': 521, 'j': 183, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Englishman': { - 'prop': 0.05, 'i': 542, 'j': 175, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'FrenchCreek': { - 'prop': 0.01, 'i': 551, 'j': 168, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'LittleQualicum': { - 'prop': 0.05, 'i': 564, 'j': 149, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Qualicum': { - 'prop': 0.02, 'i': 578, 'j': 138, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SouthDenman': { - 'prop': 0.05, 'i': 602, 'j': 122, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tsable': { - 'prop': 0.03, 'i': 616, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Trent': { - 'prop': 0.01, 'i': 649, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Puntledge': { - 'prop': 0.14, 'i': 656, 'j': 119, 'di': 1, 'dj': 2, 'depth': 1, - }, - 'BlackCreek': { - 'prop': 0.03, 'i': 701, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["bute"] = { + "Homathko": { + "prop": 0.58, + "i": 896, + "j": 293, + "di": 1, + "dj": 3, + "depth": 2, + }, + "Southgate": { + "prop": 0.35, + "i": 885, + "j": 297, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Orford": { + "prop": 0.07, + "i": 830, + "j": 250, + "di": 1, + "dj": 1, + "depth": 1, + }, +} +prop_dict["evi_s"] = { + "Cowichan1": { + "prop": 0.5 * 0.22, + "i": 384, + "j": 201, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cowichan2": { + "prop": 0.5 * 0.22, + "i": 383, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius1": { + "prop": 0.5 * 0.13, + "i": 414, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius2": { + "prop": 0.5 * 0.13, + "i": 417, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanaimo1": { + "prop": 0.67 * 0.14, + "i": 477, + "j": 207, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Nanaimo2": { + "prop": 0.33 * 0.14, + "i": 477, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NorNanaimo": { + "prop": 0.02, + "i": 486, + "j": 208, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Goldstream": { + "prop": 0.08, + "i": 329, + "j": 182, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanoose": { + "prop": 0.02, + "i": 521, + "j": 183, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Englishman": { + "prop": 0.05, + "i": 542, + "j": 175, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FrenchCreek": { + "prop": 0.01, + "i": 551, + "j": 168, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleQualicum": { + "prop": 0.05, + "i": 564, + "j": 149, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Qualicum": { + "prop": 0.02, + "i": 578, + "j": 138, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SouthDenman": { + "prop": 0.05, + "i": 602, + "j": 122, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tsable": { + "prop": 0.03, + "i": 616, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Trent": { + "prop": 0.01, + "i": 649, + "j": 121, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Puntledge": { + "prop": 0.14, + "i": 656, + "j": 119, + "di": 1, + "dj": 2, + "depth": 1, + }, + "BlackCreek": { + "prop": 0.03, + "i": 701, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, +} diff --git a/SalishSeaTools/salishsea_tools/river_202101.py b/SalishSeaTools/salishsea_tools/river_202101.py index 9b34e0a4..a69d9054 100644 --- a/SalishSeaTools/salishsea_tools/river_202101.py +++ b/SalishSeaTools/salishsea_tools/river_202101.py @@ -22,45 +22,296 @@ prop_dict = {} # dictionary of rivers in Howe watershed # depth at First Narrows, Fig 4.5 Isachsen thesis -prop_dict['howe'] = { - 'Squamish': { - 'prop': 0.706947808647832, 'i': 534, 'j': 384, 'di': 1, 'dj': 2, 'depth': 2, - }, - 'Jericho': {'prop': 0.002474317330267412, 'i': 453, 'j': 329, 'di': 1, 'dj': 1, 'depth': 1}, - 'FalseCreek': {'prop': 0.005054676831831999, 'i': 450, 'j': 337, 'di': 1, 'dj': 1, 'depth': 1}, - 'FirstNarrows': {'prop': 0.1294681892089998, 'i': 457, 'j': 343, 'di': 1, 'dj': 1, 'depth': 8}, - 'Capilano': {'prop': 0.00918474034551144, 'i': 458, 'j': 343, 'di': 1, 'dj': 1, 'depth': 1}, - 'Lawson': {'prop': 0.0013338988915802515, 'i': 461, 'j': 341, 'di': 1, 'dj': 1, 'depth': 1}, - 'Marr': {'prop': 0.0011794866070598039, 'i': 464, 'j': 338, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rodgers': {'prop':0.0009074233240685963, 'i': 465, 'j': 337, 'di': 2, 'dj': 1, 'depth': 1}, - 'Westmount': {'prop': 0.00044693984618303996, 'i': 467, 'j': 336, 'di': 1, 'dj': 1, 'depth': 1}, - 'Cypress': {'prop': 0.0029598788514702654, 'i': 469, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1}, - 'Eagle': {'prop': 0.0018306227466038597, 'i': 474, 'j': 329, 'di': 1, 'dj': 1, 'depth': 1}, - 'Whyte': {'prop': 0.0010827463806132587, 'i': 478, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1}, - 'Disbrow': {'prop': 0.0005525355241273846, 'i': 481, 'j': 337, 'di': 1, 'dj': 1, 'depth': 1}, - 'Sclufield': {'prop': 0.00044835374180033564, 'i': 482, 'j': 339, 'di': 1, 'dj': 1, 'depth': 1}, - 'Turpin': {'prop': 0.0014176163952359158, 'i': 485, 'j': 343, 'di': 1, 'dj': 1, 'depth': 1}, - 'Harvey': {'prop': 0.0038900733365332023, 'i': 492, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1}, - 'Deeks': {'prop': 0.0032407975859592725, 'i': 501, 'j': 350, 'di': 1, 'dj': 1, 'depth': 1}, - 'Kallahn': {'prop': 0.0024854796640881673, 'i': 510, 'j': 358, 'di': 1, 'dj': 1, 'depth': 1}, - 'Furry': {'prop': 0.012349261983695549, 'i': 515, 'j': 364, 'di': 1, 'dj': 1, 'depth': 1}, - 'Britannia': {'prop': 0.006394156890322628, 'i': 523, 'j': 373, 'di': 1, 'dj': 1, 'depth': 1}, - 'Gonzalos': {'prop': 0.002790583455188811, 'i': 527, 'j': 381, 'di': 1, 'dj': 1, 'depth': 1}, - 'Shannon': {'prop': 0.0033375378124058178, 'i': 529, 'j': 386, 'di': 1, 'dj': 1, 'depth': 1}, - 'Stawanus': {'prop': 0.010745606691447047, 'i': 530, 'j': 387, 'di': 1, 'dj': 1, 'depth': 1}, - 'Woodfibre': {'prop': 0.013573397926038377, 'i': 533, 'j': 370, 'di': 1, 'dj': 2, 'depth': 1}, - 'Foulger': {'prop': 0.0032147521403775104, 'i': 530, 'j': 367, 'di': 1, 'dj': 1, 'depth': 1}, - 'Ellesmere': {'prop': 0.0028538366801730904, 'i': 521, 'j': 361, 'di': 1, 'dj': 1, 'depth': 1}, - 'Potlatch': {'prop': 0.006500199061619803, 'i': 522, 'j': 350, 'di': 1, 'dj': 1, 'depth': 1}, - 'McNab': {'prop': 0.013525027812815104, 'i': 522, 'j': 338, 'di': 1, 'dj': 1, 'depth': 1}, - 'Bain': {'prop': 0.0024817588861479156, 'i': 522, 'j': 326, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rainy': {'prop': 0.013266433745967606, 'i': 522, 'j': 319, 'di': 1, 'dj': 1, 'depth': 1}, - 'McNair': {'prop': 0.010051681605590097, 'i': 520, 'j': 316, 'di': 1, 'dj': 1, 'depth': 1}, - 'Twin': {'prop': 0.003638920825566209, 'i': 513, 'j': 313, 'di': 1, 'dj': 1, 'depth': 1}, - 'Langdale': {'prop': 0.005008167107578853, 'i': 504, 'j': 309, 'di': 1, 'dj': 1, 'depth': 1}, - 'Chester': {'prop': 0.004572836088569398, 'i': 501, 'j': 291, 'di': 1, 'dj': 1, 'depth': 1}, - 'Roberts': {'prop': 0.009084279341124641, 'i': 512, 'j': 282, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rume': {'prop': 0.0017059766856054263, 'i': 517, 'j': 279, 'di': 1, 'dj': 1, 'depth': 1}} +prop_dict["howe"] = { + "Squamish": { + "prop": 0.706947808647832, + "i": 534, + "j": 384, + "di": 1, + "dj": 2, + "depth": 2, + }, + "Jericho": { + "prop": 0.002474317330267412, + "i": 453, + "j": 329, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FalseCreek": { + "prop": 0.005054676831831999, + "i": 450, + "j": 337, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FirstNarrows": { + "prop": 0.1294681892089998, + "i": 457, + "j": 343, + "di": 1, + "dj": 1, + "depth": 8, + }, + "Capilano": { + "prop": 0.00918474034551144, + "i": 458, + "j": 343, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lawson": { + "prop": 0.0013338988915802515, + "i": 461, + "j": 341, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Marr": { + "prop": 0.0011794866070598039, + "i": 464, + "j": 338, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rodgers": { + "prop": 0.0009074233240685963, + "i": 465, + "j": 337, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Westmount": { + "prop": 0.00044693984618303996, + "i": 467, + "j": 336, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cypress": { + "prop": 0.0029598788514702654, + "i": 469, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Eagle": { + "prop": 0.0018306227466038597, + "i": 474, + "j": 329, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Whyte": { + "prop": 0.0010827463806132587, + "i": 478, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Disbrow": { + "prop": 0.0005525355241273846, + "i": 481, + "j": 337, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sclufield": { + "prop": 0.00044835374180033564, + "i": 482, + "j": 339, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Turpin": { + "prop": 0.0014176163952359158, + "i": 485, + "j": 343, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Harvey": { + "prop": 0.0038900733365332023, + "i": 492, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deeks": { + "prop": 0.0032407975859592725, + "i": 501, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Kallahn": { + "prop": 0.0024854796640881673, + "i": 510, + "j": 358, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Furry": { + "prop": 0.012349261983695549, + "i": 515, + "j": 364, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Britannia": { + "prop": 0.006394156890322628, + "i": 523, + "j": 373, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Gonzalos": { + "prop": 0.002790583455188811, + "i": 527, + "j": 381, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Shannon": { + "prop": 0.0033375378124058178, + "i": 529, + "j": 386, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stawanus": { + "prop": 0.010745606691447047, + "i": 530, + "j": 387, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodfibre": { + "prop": 0.013573397926038377, + "i": 533, + "j": 370, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Foulger": { + "prop": 0.0032147521403775104, + "i": 530, + "j": 367, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Ellesmere": { + "prop": 0.0028538366801730904, + "i": 521, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Potlatch": { + "prop": 0.006500199061619803, + "i": 522, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNab": { + "prop": 0.013525027812815104, + "i": 522, + "j": 338, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Bain": { + "prop": 0.0024817588861479156, + "i": 522, + "j": 326, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rainy": { + "prop": 0.013266433745967606, + "i": 522, + "j": 319, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNair": { + "prop": 0.010051681605590097, + "i": 520, + "j": 316, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Twin": { + "prop": 0.003638920825566209, + "i": 513, + "j": 313, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Langdale": { + "prop": 0.005008167107578853, + "i": 504, + "j": 309, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chester": { + "prop": 0.004572836088569398, + "i": 501, + "j": 291, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Roberts": { + "prop": 0.009084279341124641, + "i": 512, + "j": 282, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rume": { + "prop": 0.0017059766856054263, + "i": 517, + "j": 279, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # Assume that 50% of the area of the JdF watershed defined by Morrison # et al (2011) is on north side of JdF (Canada side) @@ -69,88 +320,224 @@ # al (2011) is on south side of JdF (US side) USFlux = 0.50 # dictionary of rivers in Juan de Fuca watershed -prop_dict['jdf'] = { - 'SanJuan': { - 'prop': 0.33 * CAFlux, 'i': 401, 'j': 63, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Gordon': { - 'prop': 0.14 * CAFlux, 'i': 404, 'j': 64, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Loss': { - 'prop': 0.05 * CAFlux, 'i': 376, 'j': 72, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jordan': { - 'prop': 0.05 * CAFlux, 'i': 349, 'j': 97, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Muir': { - 'prop': 0.05 * CAFlux, 'i': 327, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tugwell': { - 'prop': 0.05 * CAFlux, 'i': 325, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sooke': { - 'prop': 0.33 * CAFlux, 'i': 316, 'j': 144, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Elwha': { - 'prop': 0.60 * 0.50 * USFlux, 'i': 261, 'j': 134, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tumwater': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 248, 'j': 151, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Valley': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 247, 'j': 152, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Ennis': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 244, 'j': 156, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Morse': { - 'prop': 0.60 * 0.07 * USFlux, 'i': 240, 'j': 164, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Bagley': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 239, 'j': 165, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Siebert': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 235, 'j': 174, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'McDonald': { - 'prop': 0.60 * 0.03 * USFlux, 'i': 233, 'j': 183, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'DungenessMatriotti': { - 'prop': 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, 'i': 232, 'j': 200, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Coville': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 263, 'j': 128, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Salt': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 275, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Field': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 281, 'j': 100, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lyre': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 283, 'j': 98, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'EastWestTwin': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 293, 'j': 81, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deep': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 299, 'j': 72, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Pysht': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 311, 'j': 63, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Clallom': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 333, 'j': 45, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Hoko': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 346, 'j': 35, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sekiu': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 349, 'j': 31, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sail': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 373, 'j': 17, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["jdf"] = { + "SanJuan": { + "prop": 0.33 * CAFlux, + "i": 401, + "j": 63, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Gordon": { + "prop": 0.14 * CAFlux, + "i": 404, + "j": 64, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Loss": { + "prop": 0.05 * CAFlux, + "i": 376, + "j": 72, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jordan": { + "prop": 0.05 * CAFlux, + "i": 349, + "j": 97, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Muir": { + "prop": 0.05 * CAFlux, + "i": 327, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tugwell": { + "prop": 0.05 * CAFlux, + "i": 325, + "j": 121, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sooke": { + "prop": 0.33 * CAFlux, + "i": 316, + "j": 144, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Elwha": { + "prop": 0.60 * 0.50 * USFlux, + "i": 261, + "j": 134, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tumwater": { + "prop": 0.60 * 0.01 * USFlux, + "i": 248, + "j": 151, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Valley": { + "prop": 0.60 * 0.01 * USFlux, + "i": 247, + "j": 152, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Ennis": { + "prop": 0.60 * 0.02 * USFlux, + "i": 244, + "j": 156, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Morse": { + "prop": 0.60 * 0.07 * USFlux, + "i": 240, + "j": 164, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Bagley": { + "prop": 0.60 * 0.02 * USFlux, + "i": 239, + "j": 165, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Siebert": { + "prop": 0.60 * 0.02 * USFlux, + "i": 235, + "j": 174, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McDonald": { + "prop": 0.60 * 0.03 * USFlux, + "i": 233, + "j": 183, + "di": 1, + "dj": 1, + "depth": 1, + }, + "DungenessMatriotti": { + "prop": 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, + "i": 232, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Coville": { + "prop": 0.40 * 0.05 * USFlux, + "i": 263, + "j": 128, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Salt": { + "prop": 0.40 * 0.05 * USFlux, + "i": 275, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Field": { + "prop": 0.40 * 0.05 * USFlux, + "i": 281, + "j": 100, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lyre": { + "prop": 0.40 * 0.20 * USFlux, + "i": 283, + "j": 98, + "di": 1, + "dj": 1, + "depth": 1, + }, + "EastWestTwin": { + "prop": 0.40 * 0.05 * USFlux, + "i": 293, + "j": 81, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deep": { + "prop": 0.40 * 0.05 * USFlux, + "i": 299, + "j": 72, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Pysht": { + "prop": 0.40 * 0.10 * USFlux, + "i": 311, + "j": 63, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Clallom": { + "prop": 0.40 * 0.10 * USFlux, + "i": 333, + "j": 45, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Hoko": { + "prop": 0.40 * 0.20 * USFlux, + "i": 346, + "j": 35, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sekiu": { + "prop": 0.40 * 0.10 * USFlux, + "i": 349, + "j": 31, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sail": { + "prop": 0.40 * 0.05 * USFlux, + "i": 373, + "j": 17, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # WRIA17 10% of Puget Sound Watershed WRIA17 = 0.10 @@ -172,466 +559,1166 @@ WRIA9 = 0.10 # WRIA8 10% of Puget Sound Watershed WRIA8 = 0.10 -prop_dict['puget'] = { - 'Johnson': { - 'prop': 0.05 * WRIA17, 'i': 208, 'j': 202, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jimmycomelately': { - 'prop': 0.05 * WRIA17, 'i': 199, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SalmonSnow': { - 'prop': 0.25 * WRIA17, 'i': 182, 'j': 220, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chimacum': { - 'prop': 0.20 * WRIA17, 'i': 184, 'j': 240, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Thorndike': { - 'prop': 0.05 * WRIA17, 'i': 136, 'j': 214, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Torboo': { - 'prop': 0.05 * WRIA17, 'i': 149, 'j': 208, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'LittleBigQuilcene': { - 'prop': 0.35 * WRIA17, 'i': 148, 'j': 200, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Dosewalips': { - 'prop': 0.20 * WRIA16, 'i': 124, 'j': 177, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duckabush': { - 'prop': 0.14 * WRIA16, 'i': 119, 'j': 167, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Fulton': { - 'prop': 0.02 * WRIA16, 'i': 117, 'j': 156, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Waketick': { - 'prop': 0.02 * WRIA16, 'i': 108, 'j': 141, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'HammaHamma': { - 'prop': 0.14 * WRIA16, 'i': 107, 'j': 139, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jorsted': { - 'prop': 0.02 * WRIA16, 'i': 104, 'j': 135, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Eagle': { - 'prop': 0.02 * WRIA16, 'i': 98, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lilliwaup': { - 'prop': 0.02 * WRIA16, 'i': 95, 'j': 119, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Finch': { - 'prop': 0.02 * WRIA16, 'i': 87, 'j': 108, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Skokomish': { - 'prop': 0.40 * WRIA16, 'i': 73, 'j': 103, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Rendsland': { - 'prop': 0.025 * WRIA15, 'i': 82, 'j': 107, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tahuya': { - 'prop': 0.20 * WRIA15, 'i': 72, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Mission': { - 'prop': 0.05 * WRIA15, 'i': 73, 'j': 149, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Union': { - 'prop': 0.10 * WRIA15, 'i': 73, 'j': 153, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Coulter': { - 'prop': 0.05 * WRIA15, 'i': 64, 'j': 154, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Minter': { - 'prop': 0.05 * WRIA15, 'i': 46, 'j': 168, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Burley': { - 'prop': 0.05 * WRIA15, 'i': 46, 'j': 179, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Olalla': { - 'prop': 0.05 * WRIA15, 'i': 48, 'j': 198, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Blackjack': { - 'prop': 0.05 * WRIA15, 'i': 79, 'j': 198, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'ClearBarker': { - 'prop': 0.075 * WRIA15, 'i': 82, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'BigValley': { - 'prop': 0.1 * WRIA15, 'i': 109, 'j': 220, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'BigBear': { - 'prop': 0.05 * WRIA15, 'i': 112, 'j': 189, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Swaback': { - 'prop': 0.025 * WRIA15, 'i': 112, 'j': 185, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stavis': { - 'prop': 0.025 * WRIA15, 'i': 113, 'j': 175, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Anderson': { - 'prop': 0.05 * WRIA15, 'i': 107, 'j': 150, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Dewatta': { - 'prop': 0.05 * WRIA15, 'i': 93, 'j': 124, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sherwood': { - 'prop': 0.15 * WRIA14, 'i': 60, 'j': 150, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'DeerJohnsGoldboroughMill': { - 'prop': 0.50 * WRIA14, 'i': 34, 'j': 111, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Skookum': { - 'prop': 0.10 * WRIA14, 'i':29, 'j': 95, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'KennedySchneider': { - 'prop': 0.15 * WRIA14, 'i': 24, 'j': 80, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'PerryMcClane': { - 'prop': 0.10 * WRIA14 + 0.10 * WRIA13, 'i': 11, 'j': 85, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deschutes': { - 'prop': 0.70 * WRIA13, 'i': 3, 'j': 100, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Woodward': { - 'prop': 0.10 * WRIA13, 'i': 13, 'j': 119, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Woodland': { - 'prop': 0.10 * WRIA13, 'i': 10, 'j': 118, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chambers': { - 'prop': 1.0 * WRIA12, 'i': 6, 'j': 162, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NisquallyMcAllister': { - 'prop': 1.0 * WRIA11, 'i': 1, 'j': 135, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Puyallup': { - 'prop': 0.995 * WRIA10, 'i': 11, 'j': 194, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Hylebas': { - 'prop': 0.005 * WRIA10, 'i': 13, 'j': 199, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duwamish1': { - 'prop': 0.50 * WRIA9, 'i': 67, 'j': 243, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duwamish2': { - 'prop': 0.50 * WRIA9, 'i': 68, 'j': 246, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'CedarSammamish': { - 'prop': 1.0 * WRIA8, 'i': 89, 'j': 246, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["puget"] = { + "Johnson": { + "prop": 0.05 * WRIA17, + "i": 208, + "j": 202, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jimmycomelately": { + "prop": 0.05 * WRIA17, + "i": 199, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SalmonSnow": { + "prop": 0.25 * WRIA17, + "i": 182, + "j": 220, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chimacum": { + "prop": 0.20 * WRIA17, + "i": 184, + "j": 240, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Thorndike": { + "prop": 0.05 * WRIA17, + "i": 136, + "j": 214, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Torboo": { + "prop": 0.05 * WRIA17, + "i": 149, + "j": 208, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleBigQuilcene": { + "prop": 0.35 * WRIA17, + "i": 148, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dosewalips": { + "prop": 0.20 * WRIA16, + "i": 124, + "j": 177, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duckabush": { + "prop": 0.14 * WRIA16, + "i": 119, + "j": 167, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Fulton": { + "prop": 0.02 * WRIA16, + "i": 117, + "j": 156, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Waketick": { + "prop": 0.02 * WRIA16, + "i": 108, + "j": 141, + "di": 1, + "dj": 1, + "depth": 1, + }, + "HammaHamma": { + "prop": 0.14 * WRIA16, + "i": 107, + "j": 139, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jorsted": { + "prop": 0.02 * WRIA16, + "i": 104, + "j": 135, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Eagle": { + "prop": 0.02 * WRIA16, + "i": 98, + "j": 126, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lilliwaup": { + "prop": 0.02 * WRIA16, + "i": 95, + "j": 119, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Finch": { + "prop": 0.02 * WRIA16, + "i": 87, + "j": 108, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Skokomish": { + "prop": 0.40 * WRIA16, + "i": 73, + "j": 103, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rendsland": { + "prop": 0.025 * WRIA15, + "i": 82, + "j": 107, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tahuya": { + "prop": 0.20 * WRIA15, + "i": 72, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mission": { + "prop": 0.05 * WRIA15, + "i": 73, + "j": 149, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Union": { + "prop": 0.10 * WRIA15, + "i": 73, + "j": 153, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Coulter": { + "prop": 0.05 * WRIA15, + "i": 64, + "j": 154, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Minter": { + "prop": 0.05 * WRIA15, + "i": 46, + "j": 168, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Burley": { + "prop": 0.05 * WRIA15, + "i": 46, + "j": 179, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Olalla": { + "prop": 0.05 * WRIA15, + "i": 48, + "j": 198, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Blackjack": { + "prop": 0.05 * WRIA15, + "i": 79, + "j": 198, + "di": 1, + "dj": 1, + "depth": 1, + }, + "ClearBarker": { + "prop": 0.075 * WRIA15, + "i": 82, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "BigValley": { + "prop": 0.1 * WRIA15, + "i": 109, + "j": 220, + "di": 1, + "dj": 1, + "depth": 1, + }, + "BigBear": { + "prop": 0.05 * WRIA15, + "i": 112, + "j": 189, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Swaback": { + "prop": 0.025 * WRIA15, + "i": 112, + "j": 185, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stavis": { + "prop": 0.025 * WRIA15, + "i": 113, + "j": 175, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Anderson": { + "prop": 0.05 * WRIA15, + "i": 107, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dewatta": { + "prop": 0.05 * WRIA15, + "i": 93, + "j": 124, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sherwood": { + "prop": 0.15 * WRIA14, + "i": 60, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "DeerJohnsGoldboroughMill": { + "prop": 0.50 * WRIA14, + "i": 34, + "j": 111, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Skookum": { + "prop": 0.10 * WRIA14, + "i": 29, + "j": 95, + "di": 1, + "dj": 1, + "depth": 1, + }, + "KennedySchneider": { + "prop": 0.15 * WRIA14, + "i": 24, + "j": 80, + "di": 1, + "dj": 1, + "depth": 1, + }, + "PerryMcClane": { + "prop": 0.10 * WRIA14 + 0.10 * WRIA13, + "i": 11, + "j": 85, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deschutes": { + "prop": 0.70 * WRIA13, + "i": 3, + "j": 100, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodward": { + "prop": 0.10 * WRIA13, + "i": 13, + "j": 119, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodland": { + "prop": 0.10 * WRIA13, + "i": 10, + "j": 118, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chambers": { + "prop": 1.0 * WRIA12, + "i": 6, + "j": 162, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NisquallyMcAllister": { + "prop": 1.0 * WRIA11, + "i": 1, + "j": 135, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Puyallup": { + "prop": 0.995 * WRIA10, + "i": 11, + "j": 194, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Hylebas": { + "prop": 0.005 * WRIA10, + "i": 13, + "j": 199, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duwamish1": { + "prop": 0.50 * WRIA9, + "i": 67, + "j": 243, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duwamish2": { + "prop": 0.50 * WRIA9, + "i": 68, + "j": 246, + "di": 1, + "dj": 1, + "depth": 1, + }, + "CedarSammamish": { + "prop": 1.0 * WRIA8, + "i": 89, + "j": 246, + "di": 1, + "dj": 1, + "depth": 1, + }, +} WRIA4 = 0.33 WRIA3 = 0.17 WRIA5 = 0.17 WRIA7 = 0.33 -prop_dict['skagit'] = { - 'Skagit1': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 205, 'j': 330, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Skagit2': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 229, 'j': 319, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Samish': { - 'prop': WRIA3 * 0.20, 'i': 263, 'j': 347, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'JoeLeary': { - 'prop': WRIA3 * 0.05, 'i': 257, 'j': 339, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish1': { - 'prop': 0.7 * WRIA5 * 1.0, 'i': 186, 'j': 319, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish2': { - 'prop': 0.1 * WRIA5 * 1.0, 'i': 193, 'j': 318, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish3': { - 'prop': 0.2 * WRIA5 * 1.0, 'i': 197, 'j': 319, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SnohomishAllenQuilceda': { - 'prop': WRIA7 * 0.98, 'i': 143, 'j': 321, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Tulalip': { - 'prop': WRIA7 * 0.01, 'i': 155, 'j': 312, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Mission': { - 'prop': WRIA7 * 0.01, 'i': 153, 'j': 313, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["skagit"] = { + "Skagit1": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 205, + "j": 330, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Skagit2": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 229, + "j": 319, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Samish": { + "prop": WRIA3 * 0.20, + "i": 263, + "j": 347, + "di": 1, + "dj": 1, + "depth": 3, + }, + "JoeLeary": { + "prop": WRIA3 * 0.05, + "i": 257, + "j": 339, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish1": { + "prop": 0.7 * WRIA5 * 1.0, + "i": 186, + "j": 319, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish2": { + "prop": 0.1 * WRIA5 * 1.0, + "i": 193, + "j": 318, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish3": { + "prop": 0.2 * WRIA5 * 1.0, + "i": 197, + "j": 319, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SnohomishAllenQuilceda": { + "prop": WRIA7 * 0.98, + "i": 143, + "j": 321, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Tulalip": { + "prop": WRIA7 * 0.01, + "i": 155, + "j": 312, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mission": { + "prop": WRIA7 * 0.01, + "i": 153, + "j": 313, + "di": 1, + "dj": 1, + "depth": 1, + }, +} WRIA1 = 0.016 Nooksack_cor = WRIA1 * 0.75 / 2040 Fraser = 1 - WRIA1 - Nooksack_cor * (116 + 149 + 60 + 31) -prop_dict['fraser'] = { - 'Serpentine': { - 'prop': 116 * Nooksack_cor, 'i': 392, 'j': 354, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Nicomekl': { - 'prop': 149 * Nooksack_cor, 'i': 388, 'j': 350, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'LittleCampbell': { - 'prop': 60 * Nooksack_cor, 'i': 373, 'j': 357, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Colebrook': { - 'prop': 31 * Nooksack_cor, 'i': 395, 'j': 346, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Dakota': { - 'prop': WRIA1 * 0.06, 'i': 363, 'j': 357, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Terrel': { - 'prop': WRIA1 * 0.04, 'i': 353, 'j': 350, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nooksack': { - 'prop': WRIA1 * 0.75, 'i': 312, 'j': 354, - 'di': 1, 'dj': 4, 'depth': 1, - }, - 'Squallum': { - 'prop': WRIA1 * 0.05, 'i': 305, 'j': 365, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lakethingo': { - 'prop': WRIA1 * 0.06, 'i': 302, 'j': 367, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chuckanut': { - 'prop': WRIA1 * 0.04, 'i': 298, 'j': 361, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Fraser': { - 'prop': Fraser, 'i': 500, 'j': 394, - 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["fraser"] = { + "Serpentine": { + "prop": 116 * Nooksack_cor, + "i": 392, + "j": 354, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nicomekl": { + "prop": 149 * Nooksack_cor, + "i": 388, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleCampbell": { + "prop": 60 * Nooksack_cor, + "i": 373, + "j": 357, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Colebrook": { + "prop": 31 * Nooksack_cor, + "i": 395, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dakota": { + "prop": WRIA1 * 0.06, + "i": 363, + "j": 357, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Terrel": { + "prop": WRIA1 * 0.04, + "i": 353, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nooksack": { + "prop": WRIA1 * 0.75, + "i": 312, + "j": 354, + "di": 1, + "dj": 4, + "depth": 1, + }, + "Squallum": { + "prop": WRIA1 * 0.05, + "i": 305, + "j": 365, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lakethingo": { + "prop": WRIA1 * 0.06, + "i": 302, + "j": 367, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chuckanut": { + "prop": WRIA1 * 0.04, + "i": 298, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Fraser": { + "prop": Fraser, + "i": 500, + "j": 394, + "di": 1, + "dj": 1, + "depth": 3, + }, +} totalarea = 9709.0 -prop_dict['evi_n'] = { - 'Oyster': { - 'prop': 363 / totalarea, 'i': 706, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1}, - 'Qunisam': { - 'prop': 1470 / totalarea, 'i': 750, 'j': 123, 'di': 2, 'dj': 1, 'depth': 1}, - 'Snowden': { - 'prop': 139 / totalarea, 'i': 771, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1}, - 'Menzies': { - 'prop': 31 / totalarea, 'i': 776, 'j': 115, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek1': { - 'prop': 23 / totalarea, 'i': 788, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek2': { - 'prop': 16 / totalarea, 'i': 796, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek3': { - 'prop': 23 / totalarea, 'i': 799, 'j': 127, 'di': 1, 'dj': 1, 'depth': 1}, - 'Elk': { - 'prop': 23 / totalarea, 'i': 808, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1}, - 'Slab': { - 'prop': 12 / totalarea, 'i': 813, 'j': 128, 'di': 1, 'dj': 1, 'depth': 1}, - 'Pye': { - 'prop': 109 / totalarea, 'i': 826, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1}, - 'BearPoint': { - 'prop': 12 / totalarea, 'i': 839, 'j': 108, 'di': 1, 'dj': 1, 'depth': 1}, - 'AmordeCosmos': { - 'prop': 229 / totalarea, 'i': 843, 'j': 96, 'di': 1, 'dj': 1, 'depth': 1}, - 'Humpback': { - 'prop': 10 / totalarea, 'i': 844, 'j': 94, 'di': 1, 'dj': 1, 'depth': 1}, - 'Palmer': { - 'prop': 14 / totalarea, 'i': 845, 'j': 92, 'di': 1, 'dj': 1, 'depth': 1}, - 'Hkusam': { - 'prop': 14 / totalarea, 'i': 849, 'j': 87, 'di': 1, 'dj': 1, 'depth': 1}, - 'CampPoint': { - 'prop': 28 / totalarea, 'i': 857, 'j': 78, 'di': 1, 'dj': 1, 'depth': 1}, - 'SalmonSayward': { - 'prop': (1210 + 14) / totalarea, 'i': 864, 'j': 64, 'di': 1, 'dj': 1, 'depth': 1}, - 'Kelsey': { - 'prop': 7 / totalarea, 'i': 878, 'j': 58, 'di': 1, 'dj': 1, 'depth': 1}, - 'unmarked': { - 'prop': 7 / totalarea, 'i': 884, 'j': 53, 'di': 1, 'dj': 1, 'depth': 1}, - 'Newcastle': { - 'prop': 34 / totalarea, - 'i': 890, 'j': 47, - 'di': 1, 'dj': 1, 'depth': 1}, - 'Windy': { - 'prop': 10 / totalarea, 'i': 891, 'j': 46, - 'di': 1, 'dj': 1, 'depth': 1}} +prop_dict["evi_n"] = { + "Oyster": { + "prop": 363 / totalarea, + "i": 706, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Qunisam": { + "prop": 1470 / totalarea, + "i": 750, + "j": 123, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Snowden": { + "prop": 139 / totalarea, + "i": 771, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Menzies": { + "prop": 31 / totalarea, + "i": 776, + "j": 115, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek1": { + "prop": 23 / totalarea, + "i": 788, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek2": { + "prop": 16 / totalarea, + "i": 796, + "j": 126, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek3": { + "prop": 23 / totalarea, + "i": 799, + "j": 127, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Elk": {"prop": 23 / totalarea, "i": 808, "j": 126, "di": 1, "dj": 1, "depth": 1}, + "Slab": {"prop": 12 / totalarea, "i": 813, "j": 128, "di": 1, "dj": 1, "depth": 1}, + "Pye": {"prop": 109 / totalarea, "i": 826, "j": 121, "di": 1, "dj": 1, "depth": 1}, + "BearPoint": { + "prop": 12 / totalarea, + "i": 839, + "j": 108, + "di": 1, + "dj": 1, + "depth": 1, + }, + "AmordeCosmos": { + "prop": 229 / totalarea, + "i": 843, + "j": 96, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Humpback": { + "prop": 10 / totalarea, + "i": 844, + "j": 94, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Palmer": {"prop": 14 / totalarea, "i": 845, "j": 92, "di": 1, "dj": 1, "depth": 1}, + "Hkusam": {"prop": 14 / totalarea, "i": 849, "j": 87, "di": 1, "dj": 1, "depth": 1}, + "CampPoint": { + "prop": 28 / totalarea, + "i": 857, + "j": 78, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SalmonSayward": { + "prop": (1210 + 14) / totalarea, + "i": 864, + "j": 64, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Kelsey": {"prop": 7 / totalarea, "i": 878, "j": 58, "di": 1, "dj": 1, "depth": 1}, + "unmarked": { + "prop": 7 / totalarea, + "i": 884, + "j": 53, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Newcastle": { + "prop": 34 / totalarea, + "i": 890, + "j": 47, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Windy": {"prop": 10 / totalarea, "i": 891, "j": 46, "di": 1, "dj": 1, "depth": 1}, +} # Jervis Inlet only area = 1400km2 (Trites 1955) ==> 25% of Jervis # watershed Jervis = 0.25 -prop_dict['jervis'] = { - 'SkwawkaHunaechin': { - 'prop': Jervis * 0.20, 'i': 692, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Loquilts': { - 'prop': Jervis * 0.04, 'i': 674, 'j': 347, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Potato': { - 'prop': Jervis * 0.04, 'i': 666, 'j': 349, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deserted': { - 'prop': Jervis * 0.10, 'i': 653, 'j': 353, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stakawus': { - 'prop': Jervis * 0.04, 'i': 651, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Crabapple': { - 'prop': Jervis * 0.04, 'i': 665, 'j': 342, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Osgood': { - 'prop': Jervis * 0.04, 'i': 652, 'j': 323, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lausmann': { - 'prop': Jervis * 0.03, 'i': 690, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Slane': { - 'prop': Jervis * 0.03, 'i': 687, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Smanit': { - 'prop': Jervis * 0.04, 'i': 681, 'j': 334, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Glacial': { - 'prop': Jervis * 0.05, 'i': 649, 'j': 310, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Seshal': { - 'prop': Jervis * 0.05, 'i': 652, 'j': 318, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brittain': { - 'prop': Jervis * 0.10, 'i': 652, 'j': 301, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'VancouverHigh': { - 'prop': Jervis * 0.10, 'i': 628, 'j': 312, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Perketts': { - 'prop': Jervis * 0.05, 'i': 619, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Treat': { - 'prop': Jervis * 0.05, 'i': 612, 'j': 302, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sechelt': { - 'prop': 0.17, 'i': 593, 'j': 285, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Powell': { - 'prop': 0.32, 'i': 667, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lois': { - 'prop': 0.10, 'i': 629, 'j': 227, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Haslam': { - 'prop': 0.02, 'i': 633, 'j': 219, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chapman': { - 'prop': 0.016, 'i': 522, 'j': 273, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lapan': { - 'prop': 0.02, 'i': 620, 'j': 283, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nelson': { - 'prop': 0.02, 'i': 604, 'j': 262, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Wakefield': { - 'prop': 0.02, 'i': 534, 'j': 264, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Halfmoon': { - 'prop': 0.02, 'i': 549, 'j': 254, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'MyersKleindaleAnderson': { - 'prop': 0.04, 'i': 571, 'j': 248, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Wilson': { - 'prop': 0.004, 'i': 521, 'j': 274, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["jervis"] = { + "SkwawkaHunaechin": { + "prop": Jervis * 0.20, + "i": 692, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Loquilts": { + "prop": Jervis * 0.04, + "i": 674, + "j": 347, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Potato": { + "prop": Jervis * 0.04, + "i": 666, + "j": 349, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deserted": { + "prop": Jervis * 0.10, + "i": 653, + "j": 353, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stakawus": { + "prop": Jervis * 0.04, + "i": 651, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Crabapple": { + "prop": Jervis * 0.04, + "i": 665, + "j": 342, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Osgood": { + "prop": Jervis * 0.04, + "i": 652, + "j": 323, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lausmann": { + "prop": Jervis * 0.03, + "i": 690, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Slane": { + "prop": Jervis * 0.03, + "i": 687, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Smanit": { + "prop": Jervis * 0.04, + "i": 681, + "j": 334, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Glacial": { + "prop": Jervis * 0.05, + "i": 649, + "j": 310, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Seshal": { + "prop": Jervis * 0.05, + "i": 652, + "j": 318, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brittain": { + "prop": Jervis * 0.10, + "i": 652, + "j": 301, + "di": 1, + "dj": 1, + "depth": 1, + }, + "VancouverHigh": { + "prop": Jervis * 0.10, + "i": 628, + "j": 312, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Perketts": { + "prop": Jervis * 0.05, + "i": 619, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Treat": { + "prop": Jervis * 0.05, + "i": 612, + "j": 302, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sechelt": { + "prop": 0.17, + "i": 593, + "j": 285, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Powell": { + "prop": 0.32, + "i": 667, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lois": { + "prop": 0.10, + "i": 629, + "j": 227, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Haslam": { + "prop": 0.02, + "i": 633, + "j": 219, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chapman": { + "prop": 0.016, + "i": 522, + "j": 273, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lapan": { + "prop": 0.02, + "i": 620, + "j": 283, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nelson": { + "prop": 0.02, + "i": 604, + "j": 262, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Wakefield": { + "prop": 0.02, + "i": 534, + "j": 264, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Halfmoon": { + "prop": 0.02, + "i": 549, + "j": 254, + "di": 1, + "dj": 1, + "depth": 1, + }, + "MyersKleindaleAnderson": { + "prop": 0.04, + "i": 571, + "j": 248, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Wilson": { + "prop": 0.004, + "i": 521, + "j": 274, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # Wilson area = 24.58 km2, Jervis ws is 5785 km2 so 0.004 (take out of Chapman) -prop_dict['toba'] = { - 'Toba': { - 'prop': 0.50, 'i': 775, 'j': 311, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Theodosia': { - 'prop': 0.12, 'i': 713, 'j': 197, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Quatam': { - 'prop': 0.09, 'i': 794, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brem': { - 'prop': 0.09, 'i': 785, 'j': 260, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tahumming': { - 'prop': 0.08, 'i': 777, 'j': 309, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Racine': { - 'prop': 0.04, 'i': 770, 'j': 272, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Homfray': { - 'prop': 0.03, 'i': 754, 'j': 245, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Forbes': { - 'prop': 0.03, 'i': 742, 'j': 247, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chusan': { - 'prop': 0.02, 'i': 773, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["toba"] = { + "Toba": { + "prop": 0.50, + "i": 775, + "j": 311, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Theodosia": { + "prop": 0.12, + "i": 713, + "j": 197, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Quatam": { + "prop": 0.09, + "i": 794, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brem": { + "prop": 0.09, + "i": 785, + "j": 260, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tahumming": { + "prop": 0.08, + "i": 777, + "j": 309, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Racine": { + "prop": 0.04, + "i": 770, + "j": 272, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Homfray": { + "prop": 0.03, + "i": 754, + "j": 245, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Forbes": { + "prop": 0.03, + "i": 742, + "j": 247, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chusan": { + "prop": 0.02, + "i": 773, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, +} -prop_dict['bute'] = { - 'Homathko': { - 'prop': 0.58, 'i': 896, 'j': 293, 'di': 1, 'dj': 3, 'depth': 2, - }, - 'Southgate': { - 'prop': 0.35, 'i': 885, 'j': 297, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Orford': { - 'prop': 0.07, 'i': 830, 'j': 250, 'di': 1, 'dj': 1, 'depth': 1, - }} - -prop_dict['evi_s'] = { - 'Cowichan1': { - 'prop': 0.5*0.22, 'i': 384, 'j': 201, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Cowichan2': { - 'prop': 0.5*0.22, 'i': 383, 'j': 200, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius1': { - 'prop': 0.5 * 0.13, 'i': 414, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius2': { - 'prop': 0.5 * 0.13, 'i': 417, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanaimo1': { - 'prop': 0.67 * 0.14, 'i': 477, 'j': 207, 'di': 1, 'dj': 2, 'depth': 1, - }, - 'Nanaimo2': { - 'prop': 0.33 * 0.14, 'i': 477, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NorNanaimo': { - 'prop': 0.02, 'i': 486, 'j': 208, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Goldstream': { - 'prop': 0.08, 'i': 329, 'j': 182, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanoose': { - 'prop': 0.02, 'i': 521, 'j': 183, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Englishman': { - 'prop': 0.05, 'i': 542, 'j': 175, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'FrenchCreek': { - 'prop': 0.01, 'i': 551, 'j': 168, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'LittleQualicum': { - 'prop': 0.05, 'i': 564, 'j': 149, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Qualicum': { - 'prop': 0.02, 'i': 578, 'j': 138, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SouthDenman': { - 'prop': 0.05, 'i': 602, 'j': 122, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tsable': { - 'prop': 0.03, 'i': 616, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Trent': { - 'prop': 0.01, 'i': 649, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Puntledge': { - 'prop': 0.14, 'i': 656, 'j': 119, 'di': 1, 'dj': 2, 'depth': 1, - }, - 'BlackCreek': { - 'prop': 0.03, 'i': 701, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["bute"] = { + "Homathko": { + "prop": 0.58, + "i": 896, + "j": 293, + "di": 1, + "dj": 3, + "depth": 2, + }, + "Southgate": { + "prop": 0.35, + "i": 885, + "j": 297, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Orford": { + "prop": 0.07, + "i": 830, + "j": 250, + "di": 1, + "dj": 1, + "depth": 1, + }, +} +prop_dict["evi_s"] = { + "Cowichan1": { + "prop": 0.5 * 0.22, + "i": 384, + "j": 201, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cowichan2": { + "prop": 0.5 * 0.22, + "i": 383, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius1": { + "prop": 0.5 * 0.13, + "i": 414, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius2": { + "prop": 0.5 * 0.13, + "i": 417, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanaimo1": { + "prop": 0.67 * 0.14, + "i": 477, + "j": 207, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Nanaimo2": { + "prop": 0.33 * 0.14, + "i": 477, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NorNanaimo": { + "prop": 0.02, + "i": 486, + "j": 208, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Goldstream": { + "prop": 0.08, + "i": 329, + "j": 182, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanoose": { + "prop": 0.02, + "i": 521, + "j": 183, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Englishman": { + "prop": 0.05, + "i": 542, + "j": 175, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FrenchCreek": { + "prop": 0.01, + "i": 551, + "j": 168, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleQualicum": { + "prop": 0.05, + "i": 564, + "j": 149, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Qualicum": { + "prop": 0.02, + "i": 578, + "j": 138, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SouthDenman": { + "prop": 0.05, + "i": 602, + "j": 122, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tsable": { + "prop": 0.03, + "i": 616, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Trent": { + "prop": 0.01, + "i": 649, + "j": 121, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Puntledge": { + "prop": 0.14, + "i": 656, + "j": 119, + "di": 1, + "dj": 2, + "depth": 1, + }, + "BlackCreek": { + "prop": 0.03, + "i": 701, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, +} diff --git a/SalishSeaTools/salishsea_tools/river_202108.py b/SalishSeaTools/salishsea_tools/river_202108.py index 15f68f4a..fd460467 100644 --- a/SalishSeaTools/salishsea_tools/river_202108.py +++ b/SalishSeaTools/salishsea_tools/river_202108.py @@ -22,45 +22,296 @@ prop_dict = {} # dictionary of rivers in Howe watershed # depth at First Narrows, Fig 4.5 Isachsen thesis -prop_dict['howe'] = { - 'Squamish': { - 'prop': 0.706947808647832, 'i': 534, 'j': 384, 'di': 1, 'dj': 2, 'depth': 2, - }, - 'Jericho': {'prop': 0.002474317330267412, 'i': 453, 'j': 329, 'di': 1, 'dj': 1, 'depth': 1}, - 'FalseCreek': {'prop': 0.005054676831831999, 'i': 450, 'j': 337, 'di': 1, 'dj': 1, 'depth': 1}, - 'FirstNarrows': {'prop': 0.1294681892089998, 'i': 457, 'j': 343, 'di': 1, 'dj': 1, 'depth': 8}, - 'Capilano': {'prop': 0.00918474034551144, 'i': 458, 'j': 343, 'di': 1, 'dj': 1, 'depth': 1}, - 'Lawson': {'prop': 0.0013338988915802515, 'i': 461, 'j': 341, 'di': 1, 'dj': 1, 'depth': 1}, - 'Marr': {'prop': 0.0011794866070598039, 'i': 464, 'j': 338, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rodgers': {'prop':0.0009074233240685963, 'i': 465, 'j': 337, 'di': 2, 'dj': 1, 'depth': 1}, - 'Westmount': {'prop': 0.00044693984618303996, 'i': 467, 'j': 336, 'di': 1, 'dj': 1, 'depth': 1}, - 'Cypress': {'prop': 0.0029598788514702654, 'i': 469, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1}, - 'Eagle': {'prop': 0.0018306227466038597, 'i': 474, 'j': 329, 'di': 1, 'dj': 1, 'depth': 1}, - 'Whyte': {'prop': 0.0010827463806132587, 'i': 478, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1}, - 'Disbrow': {'prop': 0.0005525355241273846, 'i': 481, 'j': 337, 'di': 1, 'dj': 1, 'depth': 1}, - 'Sclufield': {'prop': 0.00044835374180033564, 'i': 482, 'j': 339, 'di': 1, 'dj': 1, 'depth': 1}, - 'Turpin': {'prop': 0.0014176163952359158, 'i': 485, 'j': 343, 'di': 1, 'dj': 1, 'depth': 1}, - 'Harvey': {'prop': 0.0038900733365332023, 'i': 492, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1}, - 'Deeks': {'prop': 0.0032407975859592725, 'i': 501, 'j': 350, 'di': 1, 'dj': 1, 'depth': 1}, - 'Kallahn': {'prop': 0.0024854796640881673, 'i': 510, 'j': 358, 'di': 1, 'dj': 1, 'depth': 1}, - 'Furry': {'prop': 0.012349261983695549, 'i': 515, 'j': 364, 'di': 1, 'dj': 1, 'depth': 1}, - 'Britannia': {'prop': 0.006394156890322628, 'i': 523, 'j': 373, 'di': 1, 'dj': 1, 'depth': 1}, - 'Gonzalos': {'prop': 0.002790583455188811, 'i': 527, 'j': 381, 'di': 1, 'dj': 1, 'depth': 1}, - 'Shannon': {'prop': 0.0033375378124058178, 'i': 529, 'j': 386, 'di': 1, 'dj': 1, 'depth': 1}, - 'Stawanus': {'prop': 0.010745606691447047, 'i': 530, 'j': 387, 'di': 1, 'dj': 1, 'depth': 1}, - 'Woodfibre': {'prop': 0.013573397926038377, 'i': 533, 'j': 370, 'di': 1, 'dj': 2, 'depth': 1}, - 'Foulger': {'prop': 0.0032147521403775104, 'i': 530, 'j': 367, 'di': 1, 'dj': 1, 'depth': 1}, - 'Ellesmere': {'prop': 0.0028538366801730904, 'i': 521, 'j': 361, 'di': 1, 'dj': 1, 'depth': 1}, - 'Potlatch': {'prop': 0.006500199061619803, 'i': 522, 'j': 350, 'di': 1, 'dj': 1, 'depth': 1}, - 'McNab': {'prop': 0.013525027812815104, 'i': 522, 'j': 338, 'di': 1, 'dj': 1, 'depth': 1}, - 'Bain': {'prop': 0.0024817588861479156, 'i': 522, 'j': 326, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rainy': {'prop': 0.013266433745967606, 'i': 522, 'j': 319, 'di': 1, 'dj': 1, 'depth': 1}, - 'McNair': {'prop': 0.010051681605590097, 'i': 520, 'j': 316, 'di': 1, 'dj': 1, 'depth': 1}, - 'Twin': {'prop': 0.003638920825566209, 'i': 513, 'j': 313, 'di': 1, 'dj': 1, 'depth': 1}, - 'Langdale': {'prop': 0.005008167107578853, 'i': 504, 'j': 309, 'di': 1, 'dj': 1, 'depth': 1}, - 'Chester': {'prop': 0.004572836088569398, 'i': 501, 'j': 291, 'di': 1, 'dj': 1, 'depth': 1}, - 'Roberts': {'prop': 0.009084279341124641, 'i': 512, 'j': 282, 'di': 1, 'dj': 1, 'depth': 1}, - 'Rume': {'prop': 0.0017059766856054263, 'i': 517, 'j': 279, 'di': 1, 'dj': 1, 'depth': 1}} +prop_dict["howe"] = { + "Squamish": { + "prop": 0.706947808647832, + "i": 534, + "j": 384, + "di": 1, + "dj": 2, + "depth": 2, + }, + "Jericho": { + "prop": 0.002474317330267412, + "i": 453, + "j": 329, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FalseCreek": { + "prop": 0.005054676831831999, + "i": 450, + "j": 337, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FirstNarrows": { + "prop": 0.1294681892089998, + "i": 457, + "j": 343, + "di": 1, + "dj": 1, + "depth": 8, + }, + "Capilano": { + "prop": 0.00918474034551144, + "i": 458, + "j": 343, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lawson": { + "prop": 0.0013338988915802515, + "i": 461, + "j": 341, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Marr": { + "prop": 0.0011794866070598039, + "i": 464, + "j": 338, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rodgers": { + "prop": 0.0009074233240685963, + "i": 465, + "j": 337, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Westmount": { + "prop": 0.00044693984618303996, + "i": 467, + "j": 336, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cypress": { + "prop": 0.0029598788514702654, + "i": 469, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Eagle": { + "prop": 0.0018306227466038597, + "i": 474, + "j": 329, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Whyte": { + "prop": 0.0010827463806132587, + "i": 478, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Disbrow": { + "prop": 0.0005525355241273846, + "i": 481, + "j": 337, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sclufield": { + "prop": 0.00044835374180033564, + "i": 482, + "j": 339, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Turpin": { + "prop": 0.0014176163952359158, + "i": 485, + "j": 343, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Harvey": { + "prop": 0.0038900733365332023, + "i": 492, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deeks": { + "prop": 0.0032407975859592725, + "i": 501, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Kallahn": { + "prop": 0.0024854796640881673, + "i": 510, + "j": 358, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Furry": { + "prop": 0.012349261983695549, + "i": 515, + "j": 364, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Britannia": { + "prop": 0.006394156890322628, + "i": 523, + "j": 373, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Gonzalos": { + "prop": 0.002790583455188811, + "i": 527, + "j": 381, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Shannon": { + "prop": 0.0033375378124058178, + "i": 529, + "j": 386, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stawanus": { + "prop": 0.010745606691447047, + "i": 530, + "j": 387, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodfibre": { + "prop": 0.013573397926038377, + "i": 533, + "j": 370, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Foulger": { + "prop": 0.0032147521403775104, + "i": 530, + "j": 367, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Ellesmere": { + "prop": 0.0028538366801730904, + "i": 521, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Potlatch": { + "prop": 0.006500199061619803, + "i": 522, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNab": { + "prop": 0.013525027812815104, + "i": 522, + "j": 338, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Bain": { + "prop": 0.0024817588861479156, + "i": 522, + "j": 326, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rainy": { + "prop": 0.013266433745967606, + "i": 522, + "j": 319, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNair": { + "prop": 0.010051681605590097, + "i": 520, + "j": 316, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Twin": { + "prop": 0.003638920825566209, + "i": 513, + "j": 313, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Langdale": { + "prop": 0.005008167107578853, + "i": 504, + "j": 309, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chester": { + "prop": 0.004572836088569398, + "i": 501, + "j": 291, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Roberts": { + "prop": 0.009084279341124641, + "i": 512, + "j": 282, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rume": { + "prop": 0.0017059766856054263, + "i": 517, + "j": 279, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # Assume that 50% of the area of the JdF watershed defined by Morrison # et al (2011) is on north side of JdF (Canada side) @@ -69,88 +320,224 @@ # al (2011) is on south side of JdF (US side) USFlux = 0.50 # dictionary of rivers in Juan de Fuca watershed -prop_dict['jdf'] = { - 'SanJuan': { - 'prop': 0.33 * CAFlux, 'i': 401, 'j': 62, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Gordon': { - 'prop': 0.14 * CAFlux, 'i': 404, 'j': 62, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Loss': { - 'prop': 0.05 * CAFlux, 'i': 376, 'j': 72, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jordan': { - 'prop': 0.05 * CAFlux, 'i': 349, 'j': 96, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Muir': { - 'prop': 0.05 * CAFlux, 'i': 326, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tugwell': { - 'prop': 0.05 * CAFlux, 'i': 325, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sooke': { - 'prop': 0.33 * CAFlux, 'i': 315, 'j': 144, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Elwha': { - 'prop': 0.60 * 0.50 * USFlux, 'i': 261, 'j': 134, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tumwater': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 248, 'j': 151, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Valley': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 247, 'j': 152, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Ennis': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 244, 'j': 156, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Morse': { - 'prop': 0.60 * 0.07 * USFlux, 'i': 240, 'j': 164, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Bagley': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 239, 'j': 165, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Siebert': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 235, 'j': 174, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'McDonald': { - 'prop': 0.60 * 0.03 * USFlux, 'i': 233, 'j': 183, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'DungenessMatriotti': { - 'prop': 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, 'i': 233, 'j': 202, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Coville': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 263, 'j': 128, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Salt': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 275, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Field': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 281, 'j': 101, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lyre': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 283, 'j': 99, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'EastWestTwin': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 293, 'j': 82, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deep': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 299, 'j': 73, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Pysht': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 311, 'j': 65, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Clallom': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 333, 'j': 45, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Hoko': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 346, 'j': 35, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sekiu': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 350, 'j': 31, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sail': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 373, 'j': 17, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["jdf"] = { + "SanJuan": { + "prop": 0.33 * CAFlux, + "i": 401, + "j": 62, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Gordon": { + "prop": 0.14 * CAFlux, + "i": 404, + "j": 62, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Loss": { + "prop": 0.05 * CAFlux, + "i": 376, + "j": 72, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jordan": { + "prop": 0.05 * CAFlux, + "i": 349, + "j": 96, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Muir": { + "prop": 0.05 * CAFlux, + "i": 326, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tugwell": { + "prop": 0.05 * CAFlux, + "i": 325, + "j": 121, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sooke": { + "prop": 0.33 * CAFlux, + "i": 315, + "j": 144, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Elwha": { + "prop": 0.60 * 0.50 * USFlux, + "i": 261, + "j": 134, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tumwater": { + "prop": 0.60 * 0.01 * USFlux, + "i": 248, + "j": 151, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Valley": { + "prop": 0.60 * 0.01 * USFlux, + "i": 247, + "j": 152, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Ennis": { + "prop": 0.60 * 0.02 * USFlux, + "i": 244, + "j": 156, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Morse": { + "prop": 0.60 * 0.07 * USFlux, + "i": 240, + "j": 164, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Bagley": { + "prop": 0.60 * 0.02 * USFlux, + "i": 239, + "j": 165, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Siebert": { + "prop": 0.60 * 0.02 * USFlux, + "i": 235, + "j": 174, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McDonald": { + "prop": 0.60 * 0.03 * USFlux, + "i": 233, + "j": 183, + "di": 1, + "dj": 1, + "depth": 1, + }, + "DungenessMatriotti": { + "prop": 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, + "i": 233, + "j": 202, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Coville": { + "prop": 0.40 * 0.05 * USFlux, + "i": 263, + "j": 128, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Salt": { + "prop": 0.40 * 0.05 * USFlux, + "i": 275, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Field": { + "prop": 0.40 * 0.05 * USFlux, + "i": 281, + "j": 101, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lyre": { + "prop": 0.40 * 0.20 * USFlux, + "i": 283, + "j": 99, + "di": 1, + "dj": 1, + "depth": 1, + }, + "EastWestTwin": { + "prop": 0.40 * 0.05 * USFlux, + "i": 293, + "j": 82, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deep": { + "prop": 0.40 * 0.05 * USFlux, + "i": 299, + "j": 73, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Pysht": { + "prop": 0.40 * 0.10 * USFlux, + "i": 311, + "j": 65, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Clallom": { + "prop": 0.40 * 0.10 * USFlux, + "i": 333, + "j": 45, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Hoko": { + "prop": 0.40 * 0.20 * USFlux, + "i": 346, + "j": 35, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sekiu": { + "prop": 0.40 * 0.10 * USFlux, + "i": 350, + "j": 31, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sail": { + "prop": 0.40 * 0.05 * USFlux, + "i": 373, + "j": 17, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # WRIA17 10% of Puget Sound Watershed WRIA17 = 0.10 @@ -172,475 +559,1183 @@ WRIA9 = 0.10 # WRIA8 10% of Puget Sound Watershed WRIA8 = 0.10 -prop_dict['puget'] = { - 'Johnson': { - 'prop': 0.05 * WRIA17, 'i': 208, 'j': 202, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jimmycomelately': { - 'prop': 0.05 * WRIA17, 'i': 199, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SalmonSnow': { - 'prop': 0.25 * WRIA17, 'i': 182, 'j': 220, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chimacum': { - 'prop': 0.20 * WRIA17, 'i': 184, 'j': 240, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Thorndike': { - 'prop': 0.05 * WRIA17, 'i': 136, 'j': 214, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Torboo': { - 'prop': 0.05 * WRIA17, 'i': 148, 'j': 208, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'LittleBigQuilcene': { - 'prop': 0.35 * WRIA17, 'i': 145, 'j': 196, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Dosewalips': { - 'prop': 0.20 * WRIA16, 'i': 123, 'j': 177, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duckabush': { - 'prop': 0.14 * WRIA16, 'i': 118, 'j': 167, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Fulton': { - 'prop': 0.02 * WRIA16, 'i': 117, 'j': 156, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Waketick': { - 'prop': 0.02 * WRIA16, 'i': 108, 'j': 141, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'HammaHamma': { - 'prop': 0.14 * WRIA16, 'i': 107, 'j': 139, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Jorsted': { - 'prop': 0.02 * WRIA16, 'i': 104, 'j': 135, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Eagle': { - 'prop': 0.02 * WRIA16, 'i': 98, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lilliwaup': { - 'prop': 0.02 * WRIA16, 'i': 95, 'j': 119, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Finch': { - 'prop': 0.02 * WRIA16, 'i': 87, 'j': 108, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Skokomish': { - 'prop': 0.40 * WRIA16, 'i': 76, 'j': 102, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Rendsland': { - 'prop': 0.025 * WRIA15, 'i': 82, 'j': 107, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tahuya': { - 'prop': 0.20 * WRIA15, 'i': 72, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Mission': { - 'prop': 0.05 * WRIA15, 'i': 73, 'j': 146, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Union': { - 'prop': 0.10 * WRIA15, 'i': 72, 'j': 147, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Coulter': { - 'prop': 0.05 * WRIA15, 'i': 58, 'j': 150, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Minter': { - 'prop': 0.05 * WRIA15, 'i': 46, 'j': 168, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Burley': { - 'prop': 0.05 * WRIA15, 'i': 46, 'j': 179, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Olalla': { - 'prop': 0.05 * WRIA15, 'i': 48, 'j': 198, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Blackjack': { - 'prop': 0.05 * WRIA15, 'i': 79, 'j': 198, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'ClearBarker': { - 'prop': 0.075 * WRIA15, 'i': 82, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'BigValley': { - 'prop': 0.1 * WRIA15, 'i': 109, 'j': 220, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'BigBear': { - 'prop': 0.05 * WRIA15, 'i': 112, 'j': 189, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Swaback': { - 'prop': 0.025 * WRIA15, 'i': 112, 'j': 185, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stavis': { - 'prop': 0.025 * WRIA15, 'i': 113, 'j': 175, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Anderson': { - 'prop': 0.05 * WRIA15, 'i': 107, 'j': 150, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Dewatta': { - 'prop': 0.05 * WRIA15, 'i': 94, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sherwood': { - 'prop': 0.15 * WRIA14, 'i': 57, 'j': 149, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'DeerJohnsGoldboroughMill': { - 'prop': 0.50 * WRIA14, 'i': 34, 'j': 113, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Skookum': { - 'prop': 0.10 * WRIA14, 'i':29, 'j': 95, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'KennedySchneider': { - 'prop': 0.15 * WRIA14, 'i': 23, 'j': 89, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'PerryMcClane': { - 'prop': 0.10 * WRIA14 + 0.10 * WRIA13, 'i': 13, 'j': 91, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deschutes': { - 'prop': 0.70 * WRIA13, 'i': 4, 'j': 100, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Woodward': { - 'prop': 0.10 * WRIA13, 'i': 13, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Woodland': { - 'prop': 0.10 * WRIA13, 'i': 14, 'j': 119, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chambers': { - 'prop': 1.0 * WRIA12, 'i': 6, 'j': 162, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NisquallyMcAllister': { - 'prop': 1.0 * WRIA11, 'i': 1, 'j': 135, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Puyallup': { - 'prop': 0.995 * WRIA10, 'i': 11, 'j': 194, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Hylebas': { - 'prop': 0.005 * WRIA10, 'i': 13, 'j': 199, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duwamish1': { - 'prop': 0.50 * WRIA9, 'i': 67, 'j': 243, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Duwamish2': { - 'prop': 0.50 * WRIA9, 'i': 68, 'j': 246, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'CedarSammamish': { - 'prop': 1.0 * WRIA8, 'i': 89, 'j': 246, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["puget"] = { + "Johnson": { + "prop": 0.05 * WRIA17, + "i": 208, + "j": 202, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jimmycomelately": { + "prop": 0.05 * WRIA17, + "i": 199, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SalmonSnow": { + "prop": 0.25 * WRIA17, + "i": 182, + "j": 220, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chimacum": { + "prop": 0.20 * WRIA17, + "i": 184, + "j": 240, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Thorndike": { + "prop": 0.05 * WRIA17, + "i": 136, + "j": 214, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Torboo": { + "prop": 0.05 * WRIA17, + "i": 148, + "j": 208, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleBigQuilcene": { + "prop": 0.35 * WRIA17, + "i": 145, + "j": 196, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dosewalips": { + "prop": 0.20 * WRIA16, + "i": 123, + "j": 177, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duckabush": { + "prop": 0.14 * WRIA16, + "i": 118, + "j": 167, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Fulton": { + "prop": 0.02 * WRIA16, + "i": 117, + "j": 156, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Waketick": { + "prop": 0.02 * WRIA16, + "i": 108, + "j": 141, + "di": 1, + "dj": 1, + "depth": 1, + }, + "HammaHamma": { + "prop": 0.14 * WRIA16, + "i": 107, + "j": 139, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jorsted": { + "prop": 0.02 * WRIA16, + "i": 104, + "j": 135, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Eagle": { + "prop": 0.02 * WRIA16, + "i": 98, + "j": 126, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lilliwaup": { + "prop": 0.02 * WRIA16, + "i": 95, + "j": 119, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Finch": { + "prop": 0.02 * WRIA16, + "i": 87, + "j": 108, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Skokomish": { + "prop": 0.40 * WRIA16, + "i": 76, + "j": 102, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rendsland": { + "prop": 0.025 * WRIA15, + "i": 82, + "j": 107, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tahuya": { + "prop": 0.20 * WRIA15, + "i": 72, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mission": { + "prop": 0.05 * WRIA15, + "i": 73, + "j": 146, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Union": { + "prop": 0.10 * WRIA15, + "i": 72, + "j": 147, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Coulter": { + "prop": 0.05 * WRIA15, + "i": 58, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Minter": { + "prop": 0.05 * WRIA15, + "i": 46, + "j": 168, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Burley": { + "prop": 0.05 * WRIA15, + "i": 46, + "j": 179, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Olalla": { + "prop": 0.05 * WRIA15, + "i": 48, + "j": 198, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Blackjack": { + "prop": 0.05 * WRIA15, + "i": 79, + "j": 198, + "di": 1, + "dj": 1, + "depth": 1, + }, + "ClearBarker": { + "prop": 0.075 * WRIA15, + "i": 82, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "BigValley": { + "prop": 0.1 * WRIA15, + "i": 109, + "j": 220, + "di": 1, + "dj": 1, + "depth": 1, + }, + "BigBear": { + "prop": 0.05 * WRIA15, + "i": 112, + "j": 189, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Swaback": { + "prop": 0.025 * WRIA15, + "i": 112, + "j": 185, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stavis": { + "prop": 0.025 * WRIA15, + "i": 113, + "j": 175, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Anderson": { + "prop": 0.05 * WRIA15, + "i": 107, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dewatta": { + "prop": 0.05 * WRIA15, + "i": 94, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sherwood": { + "prop": 0.15 * WRIA14, + "i": 57, + "j": 149, + "di": 1, + "dj": 1, + "depth": 1, + }, + "DeerJohnsGoldboroughMill": { + "prop": 0.50 * WRIA14, + "i": 34, + "j": 113, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Skookum": { + "prop": 0.10 * WRIA14, + "i": 29, + "j": 95, + "di": 1, + "dj": 1, + "depth": 1, + }, + "KennedySchneider": { + "prop": 0.15 * WRIA14, + "i": 23, + "j": 89, + "di": 1, + "dj": 1, + "depth": 1, + }, + "PerryMcClane": { + "prop": 0.10 * WRIA14 + 0.10 * WRIA13, + "i": 13, + "j": 91, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deschutes": { + "prop": 0.70 * WRIA13, + "i": 4, + "j": 100, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodward": { + "prop": 0.10 * WRIA13, + "i": 13, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodland": { + "prop": 0.10 * WRIA13, + "i": 14, + "j": 119, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chambers": { + "prop": 1.0 * WRIA12, + "i": 6, + "j": 162, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NisquallyMcAllister": { + "prop": 1.0 * WRIA11, + "i": 1, + "j": 135, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Puyallup": { + "prop": 0.995 * WRIA10, + "i": 11, + "j": 194, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Hylebas": { + "prop": 0.005 * WRIA10, + "i": 13, + "j": 199, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duwamish1": { + "prop": 0.50 * WRIA9, + "i": 67, + "j": 243, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Duwamish2": { + "prop": 0.50 * WRIA9, + "i": 68, + "j": 246, + "di": 1, + "dj": 1, + "depth": 1, + }, + "CedarSammamish": { + "prop": 1.0 * WRIA8, + "i": 89, + "j": 246, + "di": 1, + "dj": 1, + "depth": 1, + }, +} WRIA4 = 0.33 WRIA3 = 0.17 WRIA5 = 0.17 WRIA7 = 0.33 -prop_dict['skagit'] = { - 'Skagit1': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 213, 'j': 313, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Skagit2': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 229, 'j': 310, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Samish': { - 'prop': WRIA3 * 0.20, 'i': 271, 'j': 342, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'JoeLeary': { - 'prop': WRIA3 * 0.05, 'i': 257, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish1': { - 'prop': 0.7 * WRIA5 * 1.0, 'i': 183, 'j': 308, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish2': { - 'prop': 0.1 * WRIA5 * 1.0, 'i': 188, 'j': 306, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stillaguamish3': { - 'prop': 0.2 * WRIA5 * 1.0, 'i': 205, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SnohomishAllenQuilceda': { - 'prop': WRIA7 * 0.98, 'i': 138, 'j': 311, 'di': 1, 'dj': 1, 'depth': 2, - }, - 'Tulalip': { - 'prop': WRIA7 * 0.01, 'i': 155, 'j': 310, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Mission': { - 'prop': WRIA7 * 0.01, 'i': 154, 'j': 311, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["skagit"] = { + "Skagit1": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 213, + "j": 313, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Skagit2": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 229, + "j": 310, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Samish": { + "prop": WRIA3 * 0.20, + "i": 271, + "j": 342, + "di": 1, + "dj": 1, + "depth": 3, + }, + "JoeLeary": { + "prop": WRIA3 * 0.05, + "i": 257, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish1": { + "prop": 0.7 * WRIA5 * 1.0, + "i": 183, + "j": 308, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish2": { + "prop": 0.1 * WRIA5 * 1.0, + "i": 188, + "j": 306, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stillaguamish3": { + "prop": 0.2 * WRIA5 * 1.0, + "i": 205, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SnohomishAllenQuilceda": { + "prop": WRIA7 * 0.98, + "i": 138, + "j": 311, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Tulalip": { + "prop": WRIA7 * 0.01, + "i": 155, + "j": 310, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mission": { + "prop": WRIA7 * 0.01, + "i": 154, + "j": 311, + "di": 1, + "dj": 1, + "depth": 1, + }, +} WRIA1 = 0.016 Nooksack_cor = WRIA1 * 0.75 / 2040 Fraser = 1 - WRIA1 - Nooksack_cor * (116 + 149 + 60 + 31) -prop_dict['fraser'] = { - 'Serpentine': { - 'prop': 116 * Nooksack_cor, 'i': 390, 'j': 350, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Nicomekl': { - 'prop': 149 * Nooksack_cor, 'i': 388, 'j': 350, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'LittleCampbell': { - 'prop': 60 * Nooksack_cor, 'i': 372, 'j': 357, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Colebrook': { - 'prop': 31 * Nooksack_cor, 'i': 390, 'j': 346, - 'di': 1, 'dj': 1, 'depth': 1 - }, - 'Dakota': { - 'prop': WRIA1 * 0.06, 'i': 365, 'j': 357, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Terrel': { - 'prop': WRIA1 * 0.04, 'i': 353, 'j': 349, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nooksack': { - 'prop': WRIA1 * 0.75/2., 'i': 308, 'j': 356, - 'di': 1, 'dj': 2, 'depth': 1, - }, - 'NooksackW': { - 'prop': WRIA1 * 0.75/4., 'i': 309, 'j': 349, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NooksackE': { - 'prop': WRIA1 * 0.75/4., 'i': 308, 'j': 361, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Squallum': { - 'prop': WRIA1 * 0.05, 'i': 305, 'j': 365, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lakethingo': { - 'prop': WRIA1 * 0.06, 'i': 303, 'j': 367, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chuckanut': { - 'prop': WRIA1 * 0.04, 'i': 298, 'j': 361, - 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Fraser': { - 'prop': Fraser, 'i': 500, 'j': 394, - 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["fraser"] = { + "Serpentine": { + "prop": 116 * Nooksack_cor, + "i": 390, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nicomekl": { + "prop": 149 * Nooksack_cor, + "i": 388, + "j": 350, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleCampbell": { + "prop": 60 * Nooksack_cor, + "i": 372, + "j": 357, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Colebrook": { + "prop": 31 * Nooksack_cor, + "i": 390, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Dakota": { + "prop": WRIA1 * 0.06, + "i": 365, + "j": 357, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Terrel": { + "prop": WRIA1 * 0.04, + "i": 353, + "j": 349, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nooksack": { + "prop": WRIA1 * 0.75 / 2.0, + "i": 308, + "j": 356, + "di": 1, + "dj": 2, + "depth": 1, + }, + "NooksackW": { + "prop": WRIA1 * 0.75 / 4.0, + "i": 309, + "j": 349, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NooksackE": { + "prop": WRIA1 * 0.75 / 4.0, + "i": 308, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Squallum": { + "prop": WRIA1 * 0.05, + "i": 305, + "j": 365, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lakethingo": { + "prop": WRIA1 * 0.06, + "i": 303, + "j": 367, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chuckanut": { + "prop": WRIA1 * 0.04, + "i": 298, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Fraser": { + "prop": Fraser, + "i": 500, + "j": 394, + "di": 1, + "dj": 1, + "depth": 3, + }, +} totalarea = 9709.0 -prop_dict['evi_n'] = { - 'Oyster': { - 'prop': 363 / totalarea, 'i': 705, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1}, - 'Qunisam': { - 'prop': 1470 / totalarea, 'i': 750, 'j': 123, 'di': 2, 'dj': 1, 'depth': 1}, - 'Snowden': { - 'prop': 139 / totalarea, 'i': 771, 'j': 116, 'di': 1, 'dj': 1, 'depth': 1}, - 'Menzies': { - 'prop': 31 / totalarea, 'i': 774, 'j': 115, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek1': { - 'prop': 23 / totalarea, 'i': 788, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek2': { - 'prop': 16 / totalarea, 'i': 796, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1}, - 'Creek3': { - 'prop': 23 / totalarea, 'i': 799, 'j': 127, 'di': 1, 'dj': 1, 'depth': 1}, - 'Elk': { - 'prop': 23 / totalarea, 'i': 808, 'j': 126, 'di': 1, 'dj': 1, 'depth': 1}, - 'Slab': { - 'prop': 12 / totalarea, 'i': 813, 'j': 128, 'di': 1, 'dj': 1, 'depth': 1}, - 'Pye': { - 'prop': 109 / totalarea, 'i': 826, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1}, - 'BearPoint': { - 'prop': 12 / totalarea, 'i': 839, 'j': 108, 'di': 1, 'dj': 1, 'depth': 1}, - 'AmordeCosmos': { - 'prop': 229 / totalarea, 'i': 843, 'j': 96, 'di': 1, 'dj': 1, 'depth': 1}, - 'Humpback': { - 'prop': 10 / totalarea, 'i': 844, 'j': 94, 'di': 1, 'dj': 1, 'depth': 1}, - 'Palmer': { - 'prop': 14 / totalarea, 'i': 845, 'j': 92, 'di': 1, 'dj': 1, 'depth': 1}, - 'Hkusam': { - 'prop': 14 / totalarea, 'i': 849, 'j': 87, 'di': 1, 'dj': 1, 'depth': 1}, - 'CampPoint': { - 'prop': 28 / totalarea, 'i': 857, 'j': 78, 'di': 1, 'dj': 1, 'depth': 1}, - 'SalmonSayward': { - 'prop': (1210 + 14) / totalarea, 'i': 865, 'j': 64, 'di': 1, 'dj': 1, 'depth': 1}, - 'Kelsey': { - 'prop': 7 / totalarea, 'i': 878, 'j': 58, 'di': 1, 'dj': 1, 'depth': 1}, - 'unmarked': { - 'prop': 7 / totalarea, 'i': 884, 'j': 53, 'di': 1, 'dj': 1, 'depth': 1}, - 'Newcastle': { - 'prop': 34 / totalarea, - 'i': 890, 'j': 47, - 'di': 1, 'dj': 1, 'depth': 1}, - 'Windy': { - 'prop': 10 / totalarea, 'i': 891, 'j': 46, - 'di': 1, 'dj': 1, 'depth': 1}} +prop_dict["evi_n"] = { + "Oyster": { + "prop": 363 / totalarea, + "i": 705, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Qunisam": { + "prop": 1470 / totalarea, + "i": 750, + "j": 123, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Snowden": { + "prop": 139 / totalarea, + "i": 771, + "j": 116, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Menzies": { + "prop": 31 / totalarea, + "i": 774, + "j": 115, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek1": { + "prop": 23 / totalarea, + "i": 788, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek2": { + "prop": 16 / totalarea, + "i": 796, + "j": 126, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Creek3": { + "prop": 23 / totalarea, + "i": 799, + "j": 127, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Elk": {"prop": 23 / totalarea, "i": 808, "j": 126, "di": 1, "dj": 1, "depth": 1}, + "Slab": {"prop": 12 / totalarea, "i": 813, "j": 128, "di": 1, "dj": 1, "depth": 1}, + "Pye": {"prop": 109 / totalarea, "i": 826, "j": 121, "di": 1, "dj": 1, "depth": 1}, + "BearPoint": { + "prop": 12 / totalarea, + "i": 839, + "j": 108, + "di": 1, + "dj": 1, + "depth": 1, + }, + "AmordeCosmos": { + "prop": 229 / totalarea, + "i": 843, + "j": 96, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Humpback": { + "prop": 10 / totalarea, + "i": 844, + "j": 94, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Palmer": {"prop": 14 / totalarea, "i": 845, "j": 92, "di": 1, "dj": 1, "depth": 1}, + "Hkusam": {"prop": 14 / totalarea, "i": 849, "j": 87, "di": 1, "dj": 1, "depth": 1}, + "CampPoint": { + "prop": 28 / totalarea, + "i": 857, + "j": 78, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SalmonSayward": { + "prop": (1210 + 14) / totalarea, + "i": 865, + "j": 64, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Kelsey": {"prop": 7 / totalarea, "i": 878, "j": 58, "di": 1, "dj": 1, "depth": 1}, + "unmarked": { + "prop": 7 / totalarea, + "i": 884, + "j": 53, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Newcastle": { + "prop": 34 / totalarea, + "i": 890, + "j": 47, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Windy": {"prop": 10 / totalarea, "i": 891, "j": 46, "di": 1, "dj": 1, "depth": 1}, +} # Jervis Inlet only area = 1400km2 (Trites 1955) ==> 25% of Jervis # watershed Jervis = 0.25 -prop_dict['jervis'] = { - 'SkwawkaHunaechin': { - 'prop': Jervis * 0.20, 'i': 692, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Loquilts': { - 'prop': Jervis * 0.04, 'i': 674, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Potato': { - 'prop': Jervis * 0.04, 'i': 666, 'j': 349, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Deserted': { - 'prop': Jervis * 0.10, 'i': 653, 'j': 353, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Stakawus': { - 'prop': Jervis * 0.04, 'i': 651, 'j': 346, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Crabapple': { - 'prop': Jervis * 0.04, 'i': 665, 'j': 342, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Osgood': { - 'prop': Jervis * 0.04, 'i': 652, 'j': 323, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lausmann': { - 'prop': Jervis * 0.03, 'i': 690, 'j': 332, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Slane': { - 'prop': Jervis * 0.03, 'i': 687, 'j': 331, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Smanit': { - 'prop': Jervis * 0.04, 'i': 681, 'j': 334, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Glacial': { - 'prop': Jervis * 0.05, 'i': 649, 'j': 310, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Seshal': { - 'prop': Jervis * 0.05, 'i': 652, 'j': 318, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brittain': { - 'prop': Jervis * 0.10, 'i': 652, 'j': 301, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'VancouverHigh': { - 'prop': Jervis * 0.10, 'i': 628, 'j': 312, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Perketts': { - 'prop': Jervis * 0.05, 'i': 619, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Treat': { - 'prop': Jervis * 0.05, 'i': 612, 'j': 302, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Sechelt': { - 'prop': 0.17, 'i': 593, 'j': 285, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Powell': { - 'prop': 0.32, 'i': 667, 'j': 203, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lois': { - 'prop': 0.10, 'i': 629, 'j': 227, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Haslam': { - 'prop': 0.02, 'i': 633, 'j': 219, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chapman': { - 'prop': 0.016, 'i': 522, 'j': 273, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Lapan': { - 'prop': 0.02, 'i': 620, 'j': 283, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nelson': { - 'prop': 0.02, 'i': 604, 'j': 262, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Wakefield': { - 'prop': 0.02, 'i': 534, 'j': 264, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Halfmoon': { - 'prop': 0.02, 'i': 549, 'j': 254, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'MyersKleindaleAnderson': { - 'prop': 0.04, 'i': 571, 'j': 248, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Wilson': { - 'prop': 0.004, 'i': 521, 'j': 274, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["jervis"] = { + "SkwawkaHunaechin": { + "prop": Jervis * 0.20, + "i": 692, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Loquilts": { + "prop": Jervis * 0.04, + "i": 674, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Potato": { + "prop": Jervis * 0.04, + "i": 666, + "j": 349, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deserted": { + "prop": Jervis * 0.10, + "i": 653, + "j": 353, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Stakawus": { + "prop": Jervis * 0.04, + "i": 651, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Crabapple": { + "prop": Jervis * 0.04, + "i": 665, + "j": 342, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Osgood": { + "prop": Jervis * 0.04, + "i": 652, + "j": 323, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lausmann": { + "prop": Jervis * 0.03, + "i": 690, + "j": 332, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Slane": { + "prop": Jervis * 0.03, + "i": 687, + "j": 331, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Smanit": { + "prop": Jervis * 0.04, + "i": 681, + "j": 334, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Glacial": { + "prop": Jervis * 0.05, + "i": 649, + "j": 310, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Seshal": { + "prop": Jervis * 0.05, + "i": 652, + "j": 318, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brittain": { + "prop": Jervis * 0.10, + "i": 652, + "j": 301, + "di": 1, + "dj": 1, + "depth": 1, + }, + "VancouverHigh": { + "prop": Jervis * 0.10, + "i": 628, + "j": 312, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Perketts": { + "prop": Jervis * 0.05, + "i": 619, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Treat": { + "prop": Jervis * 0.05, + "i": 612, + "j": 302, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sechelt": { + "prop": 0.17, + "i": 593, + "j": 285, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Powell": { + "prop": 0.32, + "i": 667, + "j": 203, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lois": { + "prop": 0.10, + "i": 629, + "j": 227, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Haslam": { + "prop": 0.02, + "i": 633, + "j": 219, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chapman": { + "prop": 0.016, + "i": 522, + "j": 273, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lapan": { + "prop": 0.02, + "i": 620, + "j": 283, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nelson": { + "prop": 0.02, + "i": 604, + "j": 262, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Wakefield": { + "prop": 0.02, + "i": 534, + "j": 264, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Halfmoon": { + "prop": 0.02, + "i": 549, + "j": 254, + "di": 1, + "dj": 1, + "depth": 1, + }, + "MyersKleindaleAnderson": { + "prop": 0.04, + "i": 571, + "j": 248, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Wilson": { + "prop": 0.004, + "i": 521, + "j": 274, + "di": 1, + "dj": 1, + "depth": 1, + }, +} # Wilson area = 24.58 km2, Jervis ws is 5785 km2 so 0.004 (take out of Chapman) -prop_dict['toba'] = { - 'Toba': { - 'prop': 0.50, 'i': 775, 'j': 311, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Theodosia': { - 'prop': 0.12, 'i': 713, 'j': 197, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Quatam': { - 'prop': 0.09, 'i': 794, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Brem': { - 'prop': 0.09, 'i': 785, 'j': 260, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tahumming': { - 'prop': 0.08, 'i': 777, 'j': 309, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Racine': { - 'prop': 0.04, 'i': 770, 'j': 272, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Homfray': { - 'prop': 0.03, 'i': 754, 'j': 245, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Forbes': { - 'prop': 0.03, 'i': 742, 'j': 247, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chusan': { - 'prop': 0.02, 'i': 773, 'j': 307, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["toba"] = { + "Toba": { + "prop": 0.50, + "i": 775, + "j": 311, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Theodosia": { + "prop": 0.12, + "i": 713, + "j": 197, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Quatam": { + "prop": 0.09, + "i": 794, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Brem": { + "prop": 0.09, + "i": 785, + "j": 260, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tahumming": { + "prop": 0.08, + "i": 777, + "j": 309, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Racine": { + "prop": 0.04, + "i": 770, + "j": 272, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Homfray": { + "prop": 0.03, + "i": 754, + "j": 245, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Forbes": { + "prop": 0.03, + "i": 742, + "j": 247, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chusan": { + "prop": 0.02, + "i": 773, + "j": 307, + "di": 1, + "dj": 1, + "depth": 1, + }, +} -prop_dict['bute'] = { - 'Homathko': { - 'prop': 0.58, 'i': 896, 'j': 293, 'di': 1, 'dj': 3, 'depth': 2, - }, - 'Southgate': { - 'prop': 0.35, 'i': 885, 'j': 297, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Orford': { - 'prop': 0.07, 'i': 830, 'j': 250, 'di': 1, 'dj': 1, 'depth': 1, - }} - -prop_dict['evi_s'] = { - 'Cowichan1': { - 'prop': 0.5*0.22, 'i': 383, 'j': 201, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Cowichan2': { - 'prop': 0.5*0.22, 'i': 382, 'j': 200, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius1': { - 'prop': 0.5 * 0.13, 'i': 415, 'j': 213, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Chemanius2': { - 'prop': 0.5 * 0.13, 'i': 418, 'j': 212, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanaimo1': { - 'prop': 0.67 * 0.14, 'i': 479, 'j': 210, 'di': 1, 'dj': 2, 'depth': 1, - }, - 'Nanaimo2': { - 'prop': 0.33 * 0.14, 'i': 478, 'j': 211, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'NorNanaimo': { - 'prop': 0.02, 'i': 486, 'j': 208, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Goldstream': { - 'prop': 0.08, 'i': 329, 'j': 182, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Nanoose': { - 'prop': 0.02, 'i': 520, 'j': 183, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Englishman': { - 'prop': 0.05, 'i': 542, 'j': 175, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'FrenchCreek': { - 'prop': 0.01, 'i': 551, 'j': 168, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'LittleQualicum': { - 'prop': 0.05, 'i': 564, 'j': 149, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Qualicum': { - 'prop': 0.02, 'i': 578, 'j': 138, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'SouthDenman': { - 'prop': 0.05, 'i': 602, 'j': 122, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Tsable': { - 'prop': 0.03, 'i': 616, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Trent': { - 'prop': 0.01, 'i': 649, 'j': 121, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'Puntledge': { - 'prop': 0.14, 'i': 654, 'j': 120, 'di': 1, 'dj': 1, 'depth': 1, - }, - 'BlackCreek': { - 'prop': 0.03, 'i': 701, 'j': 123, 'di': 1, 'dj': 1, 'depth': 1, - }} +prop_dict["bute"] = { + "Homathko": { + "prop": 0.58, + "i": 896, + "j": 293, + "di": 1, + "dj": 3, + "depth": 2, + }, + "Southgate": { + "prop": 0.35, + "i": 885, + "j": 297, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Orford": { + "prop": 0.07, + "i": 830, + "j": 250, + "di": 1, + "dj": 1, + "depth": 1, + }, +} +prop_dict["evi_s"] = { + "Cowichan1": { + "prop": 0.5 * 0.22, + "i": 383, + "j": 201, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cowichan2": { + "prop": 0.5 * 0.22, + "i": 382, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius1": { + "prop": 0.5 * 0.13, + "i": 415, + "j": 213, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Chemanius2": { + "prop": 0.5 * 0.13, + "i": 418, + "j": 212, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanaimo1": { + "prop": 0.67 * 0.14, + "i": 479, + "j": 210, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Nanaimo2": { + "prop": 0.33 * 0.14, + "i": 478, + "j": 211, + "di": 1, + "dj": 1, + "depth": 1, + }, + "NorNanaimo": { + "prop": 0.02, + "i": 486, + "j": 208, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Goldstream": { + "prop": 0.08, + "i": 329, + "j": 182, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Nanoose": { + "prop": 0.02, + "i": 520, + "j": 183, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Englishman": { + "prop": 0.05, + "i": 542, + "j": 175, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FrenchCreek": { + "prop": 0.01, + "i": 551, + "j": 168, + "di": 1, + "dj": 1, + "depth": 1, + }, + "LittleQualicum": { + "prop": 0.05, + "i": 564, + "j": 149, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Qualicum": { + "prop": 0.02, + "i": 578, + "j": 138, + "di": 1, + "dj": 1, + "depth": 1, + }, + "SouthDenman": { + "prop": 0.05, + "i": 602, + "j": 122, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Tsable": { + "prop": 0.03, + "i": 616, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Trent": { + "prop": 0.01, + "i": 649, + "j": 121, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Puntledge": { + "prop": 0.14, + "i": 654, + "j": 120, + "di": 1, + "dj": 1, + "depth": 1, + }, + "BlackCreek": { + "prop": 0.03, + "i": 701, + "j": 123, + "di": 1, + "dj": 1, + "depth": 1, + }, +} diff --git a/SalishSeaTools/salishsea_tools/river_downbyone2.py b/SalishSeaTools/salishsea_tools/river_downbyone2.py index 95029661..5d8cb03a 100644 --- a/SalishSeaTools/salishsea_tools/river_downbyone2.py +++ b/SalishSeaTools/salishsea_tools/river_downbyone2.py @@ -21,13 +21,24 @@ prop_dict = {} # dictionary of rivers in Howe watershed -prop_dict['howe'] = { - 'Squamish': { - 'prop': 0.9, 'i': 532, 'j': 385, 'di': 1, 'dj': 2, 'depth': 3, - }, - 'Burrard': { - 'prop': 0.1, 'i': 457, 'j': 343, 'di': 3, 'dj': 1, 'depth': 3, - }} +prop_dict["howe"] = { + "Squamish": { + "prop": 0.9, + "i": 532, + "j": 385, + "di": 1, + "dj": 2, + "depth": 3, + }, + "Burrard": { + "prop": 0.1, + "i": 457, + "j": 343, + "di": 3, + "dj": 1, + "depth": 3, + }, +} # Assume that 50% of the area of the JdF watershed defined by Morrison # et al (2011) is on north side of JdF (Canada side) @@ -36,88 +47,224 @@ # al (2011) is on south side of JdF (US side) USFlux = 0.50 # dictionary of rivers in Juan de Fuca watershed -prop_dict['jdf'] = { - 'SanJuan': { - 'prop': 0.33 * CAFlux, 'i': 402, 'j': 56, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Gordon': { - 'prop': 0.14 * CAFlux, 'i': 403, 'j': 56, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Loss': { - 'prop': 0.05 * CAFlux, 'i': 375, 'j': 71, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Jordan': { - 'prop': 0.05 * CAFlux, 'i': 348, 'j': 96, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Muir': { - 'prop': 0.05 * CAFlux, 'i': 326, 'j': 119, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Tugwell': { - 'prop': 0.05 * CAFlux, 'i': 325, 'j': 120, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Sooke': { - 'prop': 0.33 * CAFlux, 'i': 308, 'j': 137, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Elwha': { - 'prop': 0.60 * 0.50 * USFlux, 'i': 261, 'j': 134, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Tumwater': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 248, 'j': 151, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Valley': { - 'prop': 0.60 * 0.01 * USFlux, 'i': 247, 'j': 152, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Ennis': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 244, 'j': 156, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Morse': { - 'prop': 0.60 * 0.07 * USFlux, 'i': 240, 'j': 164, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Bagley': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 239, 'j': 165, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Siebert': { - 'prop': 0.60 * 0.02 * USFlux, 'i': 235, 'j': 174, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'McDonald': { - 'prop': 0.60 * 0.03 * USFlux, 'i': 233, 'j': 183, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'DungenessMatriotti': { - 'prop': 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, 'i': 231, 'j': 201, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Coville': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 263, 'j': 128, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Salt': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 275, 'j': 116, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Field': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 281, 'j': 100, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Lyre': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 283, 'j': 98, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'EastWestTwin': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 293, 'j': 81, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Deep': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 299, 'j': 72, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Pysht': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 310, 'j': 65, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Clallom': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 333, 'j': 45, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Hoko': { - 'prop': 0.40 * 0.20 * USFlux, 'i': 345, 'j': 35, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Sekiu': { - 'prop': 0.40 * 0.10 * USFlux, 'i': 348, 'j': 31, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Sail': { - 'prop': 0.40 * 0.05 * USFlux, 'i': 373, 'j': 17, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["jdf"] = { + "SanJuan": { + "prop": 0.33 * CAFlux, + "i": 402, + "j": 56, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Gordon": { + "prop": 0.14 * CAFlux, + "i": 403, + "j": 56, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Loss": { + "prop": 0.05 * CAFlux, + "i": 375, + "j": 71, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Jordan": { + "prop": 0.05 * CAFlux, + "i": 348, + "j": 96, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Muir": { + "prop": 0.05 * CAFlux, + "i": 326, + "j": 119, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Tugwell": { + "prop": 0.05 * CAFlux, + "i": 325, + "j": 120, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Sooke": { + "prop": 0.33 * CAFlux, + "i": 308, + "j": 137, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Elwha": { + "prop": 0.60 * 0.50 * USFlux, + "i": 261, + "j": 134, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Tumwater": { + "prop": 0.60 * 0.01 * USFlux, + "i": 248, + "j": 151, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Valley": { + "prop": 0.60 * 0.01 * USFlux, + "i": 247, + "j": 152, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Ennis": { + "prop": 0.60 * 0.02 * USFlux, + "i": 244, + "j": 156, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Morse": { + "prop": 0.60 * 0.07 * USFlux, + "i": 240, + "j": 164, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Bagley": { + "prop": 0.60 * 0.02 * USFlux, + "i": 239, + "j": 165, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Siebert": { + "prop": 0.60 * 0.02 * USFlux, + "i": 235, + "j": 174, + "di": 1, + "dj": 1, + "depth": 3, + }, + "McDonald": { + "prop": 0.60 * 0.03 * USFlux, + "i": 233, + "j": 183, + "di": 1, + "dj": 1, + "depth": 3, + }, + "DungenessMatriotti": { + "prop": 0.60 * 0.30 * USFlux + 0.60 * 0.02 * USFlux, + "i": 231, + "j": 201, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Coville": { + "prop": 0.40 * 0.05 * USFlux, + "i": 263, + "j": 128, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Salt": { + "prop": 0.40 * 0.05 * USFlux, + "i": 275, + "j": 116, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Field": { + "prop": 0.40 * 0.05 * USFlux, + "i": 281, + "j": 100, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Lyre": { + "prop": 0.40 * 0.20 * USFlux, + "i": 283, + "j": 98, + "di": 1, + "dj": 1, + "depth": 3, + }, + "EastWestTwin": { + "prop": 0.40 * 0.05 * USFlux, + "i": 293, + "j": 81, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Deep": { + "prop": 0.40 * 0.05 * USFlux, + "i": 299, + "j": 72, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Pysht": { + "prop": 0.40 * 0.10 * USFlux, + "i": 310, + "j": 65, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Clallom": { + "prop": 0.40 * 0.10 * USFlux, + "i": 333, + "j": 45, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Hoko": { + "prop": 0.40 * 0.20 * USFlux, + "i": 345, + "j": 35, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Sekiu": { + "prop": 0.40 * 0.10 * USFlux, + "i": 348, + "j": 31, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Sail": { + "prop": 0.40 * 0.05 * USFlux, + "i": 373, + "j": 17, + "di": 1, + "dj": 1, + "depth": 3, + }, +} # WRIA17 10% of Puget Sound Watershed WRIA17 = 0.10 @@ -139,388 +286,965 @@ WRIA9 = 0.10 # WRIA8 10% of Puget Sound Watershed WRIA8 = 0.10 -prop_dict['puget'] = { - 'Johnson': { - 'prop': 0.05 * WRIA17, 'i': 207, 'j': 202, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Jimmycomelately': { - 'prop': 0.05 * WRIA17, 'i': 199, 'j': 202, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'SalmonSnow': { - 'prop': 0.25 * WRIA17, 'i': 182, 'j': 219, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Chimacum': { - 'prop': 0.20 * WRIA17, 'i': 185, 'j': 240, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Thorndike': { - 'prop': 0.05 * WRIA17, 'i': 137, 'j': 215, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Torboo': { - 'prop': 0.05 * WRIA17, 'i': 149, 'j': 208, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'LittleBigQuilcene': { - 'prop': 0.35 * WRIA17, 'i': 146, 'j': 199, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Dosewalips': { - 'prop': 0.20 * WRIA16, 'i': 124, 'j': 177, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Duckabush': { - 'prop': 0.14 * WRIA16, 'i': 119, 'j': 167, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Fulton': { - 'prop': 0.02 * WRIA16, 'i': 116, 'j': 156, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Waketick': { - 'prop': 0.02 * WRIA16, 'i': 108, 'j': 141, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'HammaHamma': { - 'prop': 0.14 * WRIA16, 'i': 107, 'j': 139, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Jorsted': { - 'prop': 0.02 * WRIA16, 'i': 104, 'j': 135, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Eagle': { - 'prop': 0.02 * WRIA16, 'i': 98, 'j': 127, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Lilliwaup': { - 'prop': 0.02 * WRIA16, 'i': 95, 'j': 118, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Finch': { - 'prop': 0.02 * WRIA16, 'i': 87, 'j': 108, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Skokomish': { - 'prop': 0.40 * WRIA16, 'i': 75, 'j': 103, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Rendsland': { - 'prop': 0.025 * WRIA15, 'i': 81, 'j': 107, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Tahuya': { - 'prop': 0.20 * WRIA15, 'i': 72, 'j': 114, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Mission': { - 'prop': 0.05 * WRIA15, 'i': 73, 'j': 149, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Union': { - 'prop': 0.10 * WRIA15, 'i': 74, 'j': 153, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Coulter': { - 'prop': 0.05 * WRIA15, 'i': 64, 'j': 153, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Minter': { - 'prop': 0.05 * WRIA15, 'i': 46, 'j': 168, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Butley': { - 'prop': 0.05 * WRIA15, 'i': 47, 'j': 178, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Olalla': { - 'prop': 0.05 * WRIA15, 'i': 48, 'j': 197, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'BlackjackClearBarkerBigValley1': { - 'prop': 0.1125 * WRIA15, 'i': 68, 'j': 210, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'BlackjackClearBarkerBigValley2': { - 'prop': 0.1125 * WRIA15, 'i': 108, 'j': 232, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'BigBear': { - 'prop': 0.05 * WRIA15, 'i': 112, 'j': 189, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Swaback': { - 'prop': 0.025 * WRIA15, 'i': 112, 'j': 185, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Stavis': { - 'prop': 0.025 * WRIA15, 'i': 113, 'j': 174, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Anderson': { - 'prop': 0.05 * WRIA15, 'i': 107, 'j': 150, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Dewatta': { - 'prop': 0.05 * WRIA15, 'i': 94, 'j': 122, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Sherwood': { - 'prop': 0.15 * WRIA14, 'i': 60, 'j': 149, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'DeerJohnsGoldboroughMillSkookumKennedySchneider': { - 'prop': 0.375 * WRIA14, 'i': 47, 'j': 130, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'DeerJohnsGoldboroughMillSkookumKennedySchneiderPerry': { - 'prop': 0.475 * WRIA14, 'i': 20, 'j': 120, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'McClaneDeschutesWoodwardWoodland': { - 'prop': 1.0 * WRIA13, 'i': 22, 'j': 121, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Chambers': { - 'prop': 1.0 * WRIA12, 'i': 6, 'j': 162, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'NisquallyMcAllister': { - 'prop': 1.0 * WRIA11, 'i': 0, 'j': 137, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Puyallup': { - 'prop': 0.995 * WRIA10, 'i': 10, 'j': 195, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Hylebas': { - 'prop': 0.005 * WRIA10, 'i': 13, 'j': 199, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Duwamish1': { - 'prop': 0.50 * WRIA9, 'i': 68, 'j': 243, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Duwamish2': { - 'prop': 0.50 * WRIA9, 'i': 68, 'j': 246, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'CedarSammamish': { - 'prop': 1.0 * WRIA8, 'i': 88, 'j': 246, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["puget"] = { + "Johnson": { + "prop": 0.05 * WRIA17, + "i": 207, + "j": 202, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Jimmycomelately": { + "prop": 0.05 * WRIA17, + "i": 199, + "j": 202, + "di": 1, + "dj": 1, + "depth": 3, + }, + "SalmonSnow": { + "prop": 0.25 * WRIA17, + "i": 182, + "j": 219, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Chimacum": { + "prop": 0.20 * WRIA17, + "i": 185, + "j": 240, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Thorndike": { + "prop": 0.05 * WRIA17, + "i": 137, + "j": 215, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Torboo": { + "prop": 0.05 * WRIA17, + "i": 149, + "j": 208, + "di": 1, + "dj": 1, + "depth": 3, + }, + "LittleBigQuilcene": { + "prop": 0.35 * WRIA17, + "i": 146, + "j": 199, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Dosewalips": { + "prop": 0.20 * WRIA16, + "i": 124, + "j": 177, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Duckabush": { + "prop": 0.14 * WRIA16, + "i": 119, + "j": 167, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Fulton": { + "prop": 0.02 * WRIA16, + "i": 116, + "j": 156, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Waketick": { + "prop": 0.02 * WRIA16, + "i": 108, + "j": 141, + "di": 1, + "dj": 1, + "depth": 3, + }, + "HammaHamma": { + "prop": 0.14 * WRIA16, + "i": 107, + "j": 139, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Jorsted": { + "prop": 0.02 * WRIA16, + "i": 104, + "j": 135, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Eagle": { + "prop": 0.02 * WRIA16, + "i": 98, + "j": 127, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Lilliwaup": { + "prop": 0.02 * WRIA16, + "i": 95, + "j": 118, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Finch": { + "prop": 0.02 * WRIA16, + "i": 87, + "j": 108, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Skokomish": { + "prop": 0.40 * WRIA16, + "i": 75, + "j": 103, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Rendsland": { + "prop": 0.025 * WRIA15, + "i": 81, + "j": 107, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Tahuya": { + "prop": 0.20 * WRIA15, + "i": 72, + "j": 114, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Mission": { + "prop": 0.05 * WRIA15, + "i": 73, + "j": 149, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Union": { + "prop": 0.10 * WRIA15, + "i": 74, + "j": 153, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Coulter": { + "prop": 0.05 * WRIA15, + "i": 64, + "j": 153, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Minter": { + "prop": 0.05 * WRIA15, + "i": 46, + "j": 168, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Butley": { + "prop": 0.05 * WRIA15, + "i": 47, + "j": 178, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Olalla": { + "prop": 0.05 * WRIA15, + "i": 48, + "j": 197, + "di": 1, + "dj": 1, + "depth": 3, + }, + "BlackjackClearBarkerBigValley1": { + "prop": 0.1125 * WRIA15, + "i": 68, + "j": 210, + "di": 1, + "dj": 1, + "depth": 3, + }, + "BlackjackClearBarkerBigValley2": { + "prop": 0.1125 * WRIA15, + "i": 108, + "j": 232, + "di": 1, + "dj": 1, + "depth": 3, + }, + "BigBear": { + "prop": 0.05 * WRIA15, + "i": 112, + "j": 189, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Swaback": { + "prop": 0.025 * WRIA15, + "i": 112, + "j": 185, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Stavis": { + "prop": 0.025 * WRIA15, + "i": 113, + "j": 174, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Anderson": { + "prop": 0.05 * WRIA15, + "i": 107, + "j": 150, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Dewatta": { + "prop": 0.05 * WRIA15, + "i": 94, + "j": 122, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Sherwood": { + "prop": 0.15 * WRIA14, + "i": 60, + "j": 149, + "di": 1, + "dj": 1, + "depth": 3, + }, + "DeerJohnsGoldboroughMillSkookumKennedySchneider": { + "prop": 0.375 * WRIA14, + "i": 47, + "j": 130, + "di": 1, + "dj": 1, + "depth": 3, + }, + "DeerJohnsGoldboroughMillSkookumKennedySchneiderPerry": { + "prop": 0.475 * WRIA14, + "i": 20, + "j": 120, + "di": 1, + "dj": 1, + "depth": 3, + }, + "McClaneDeschutesWoodwardWoodland": { + "prop": 1.0 * WRIA13, + "i": 22, + "j": 121, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Chambers": { + "prop": 1.0 * WRIA12, + "i": 6, + "j": 162, + "di": 1, + "dj": 1, + "depth": 3, + }, + "NisquallyMcAllister": { + "prop": 1.0 * WRIA11, + "i": 0, + "j": 137, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Puyallup": { + "prop": 0.995 * WRIA10, + "i": 10, + "j": 195, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Hylebas": { + "prop": 0.005 * WRIA10, + "i": 13, + "j": 199, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Duwamish1": { + "prop": 0.50 * WRIA9, + "i": 68, + "j": 243, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Duwamish2": { + "prop": 0.50 * WRIA9, + "i": 68, + "j": 246, + "di": 1, + "dj": 1, + "depth": 3, + }, + "CedarSammamish": { + "prop": 1.0 * WRIA8, + "i": 88, + "j": 246, + "di": 1, + "dj": 1, + "depth": 3, + }, +} WRIA4 = 0.33 WRIA3 = 0.17 WRIA5 = 0.17 WRIA7 = 0.33 -prop_dict['skagit'] = { - 'Skagit1': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 207, 'j': 326, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Skagit2': { - 'prop': 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), 'i': 229, 'j': 319, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Samish': { - 'prop': WRIA3 * 0.20, 'i': 265, 'j': 348, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'JoeLeary': { - 'prop': WRIA3 * 0.05, 'i': 257, 'j': 339, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Stillaguamish1': { - 'prop': 0.7 * WRIA5 * 1.0, 'i': 186, 'j': 316, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Stillaguamish2': { - 'prop': 0.1 * WRIA5 * 1.0, 'i': 192, 'j': 315, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Stillaguamish3': { - 'prop': 0.2 * WRIA5 * 1.0, 'i': 200, 'j': 318, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'SnohomishAllenQuilceda': { - 'prop': WRIA7 * 0.98, 'i': 143, 'j': 318, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Tulalip': { - 'prop': WRIA7 * 0.01, 'i': 154, 'j': 311, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Mission': { - 'prop': WRIA7 * 0.01, 'i': 152, 'j': 312, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["skagit"] = { + "Skagit1": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 207, + "j": 326, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Skagit2": { + "prop": 0.5 * (WRIA4 * 1.0 + WRIA3 * 0.75), + "i": 229, + "j": 319, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Samish": { + "prop": WRIA3 * 0.20, + "i": 265, + "j": 348, + "di": 1, + "dj": 1, + "depth": 3, + }, + "JoeLeary": { + "prop": WRIA3 * 0.05, + "i": 257, + "j": 339, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Stillaguamish1": { + "prop": 0.7 * WRIA5 * 1.0, + "i": 186, + "j": 316, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Stillaguamish2": { + "prop": 0.1 * WRIA5 * 1.0, + "i": 192, + "j": 315, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Stillaguamish3": { + "prop": 0.2 * WRIA5 * 1.0, + "i": 200, + "j": 318, + "di": 1, + "dj": 1, + "depth": 3, + }, + "SnohomishAllenQuilceda": { + "prop": WRIA7 * 0.98, + "i": 143, + "j": 318, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Tulalip": { + "prop": WRIA7 * 0.01, + "i": 154, + "j": 311, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Mission": { + "prop": WRIA7 * 0.01, + "i": 152, + "j": 312, + "di": 1, + "dj": 1, + "depth": 3, + }, +} WRIA1 = 0.016 Fraser = 1 - WRIA1 -prop_dict['fraser'] = { - 'Dakota': { - 'prop': WRIA1 * 0.06, 'i': 362, 'j': 357, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Terrel': { - 'prop': WRIA1 * 0.04, 'i': 351, 'j': 345, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Nooksack': { - 'prop': WRIA1 * 0.75, 'i': 321, 'j': 347, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Squallum': { - 'prop': WRIA1 * 0.05, 'i': 305, 'j': 365, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Lakethingo': { - 'prop': WRIA1 * 0.06, 'i': 302, 'j': 367, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Chuckanut': { - 'prop': WRIA1 * 0.04, 'i': 298, 'j': 361, - 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Fraser1': { - 'prop': Fraser * 0.75, 'i': 500, 'j': 395, - 'di': 1, 'dj': 1, 'depth': 3, - }, # Fraser1 is main arm - 'Fraser2': { - 'prop': Fraser * 0.05, 'i': 409, 'j': 315, - 'di': 2, 'dj': 1, 'depth': 3, - }, # Fraser2 was Deas, moved to Canoe Pass - 'Fraser3': { - 'prop': Fraser * 0.05, 'i': 434, 'j': 318, - 'di': 2, 'dj': 1, 'depth': 3, - }, # Fraser3 is Middle arm - 'Fraser4': { - 'prop': Fraser * 0.15, 'i': 440, 'j': 323, - 'di': 1, 'dj': 2, 'depth': 3, - }} # Fraser4 is North arm +prop_dict["fraser"] = { + "Dakota": { + "prop": WRIA1 * 0.06, + "i": 362, + "j": 357, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Terrel": { + "prop": WRIA1 * 0.04, + "i": 351, + "j": 345, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Nooksack": { + "prop": WRIA1 * 0.75, + "i": 321, + "j": 347, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Squallum": { + "prop": WRIA1 * 0.05, + "i": 305, + "j": 365, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Lakethingo": { + "prop": WRIA1 * 0.06, + "i": 302, + "j": 367, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Chuckanut": { + "prop": WRIA1 * 0.04, + "i": 298, + "j": 361, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Fraser1": { + "prop": Fraser * 0.75, + "i": 500, + "j": 395, + "di": 1, + "dj": 1, + "depth": 3, + }, # Fraser1 is main arm + "Fraser2": { + "prop": Fraser * 0.05, + "i": 409, + "j": 315, + "di": 2, + "dj": 1, + "depth": 3, + }, # Fraser2 was Deas, moved to Canoe Pass + "Fraser3": { + "prop": Fraser * 0.05, + "i": 434, + "j": 318, + "di": 2, + "dj": 1, + "depth": 3, + }, # Fraser3 is Middle arm + "Fraser4": { + "prop": Fraser * 0.15, + "i": 440, + "j": 323, + "di": 1, + "dj": 2, + "depth": 3, + }, +} # Fraser4 is North arm totalarea = 9709.0 -prop_dict['evi_n'] = { - 'Oyster': { - 'prop': 363 / totalarea, 'i': 705, 'j': 122, 'di': 1, 'dj': 1, 'depth': 3}, - 'Qunisam': { - 'prop': 1470 / totalarea, 'i': 749, 'j': 123, 'di': 2, 'dj': 1, 'depth': 3}, - 'Snowden': { - 'prop': 139 / totalarea, 'i': 770, 'j': 117, 'di': 1, 'dj': 1, 'depth': 3}, - 'Menzies': { - 'prop': 31 / totalarea, 'i': 773, 'j': 117, 'di': 1, 'dj': 1, 'depth': 3}, - 'Creek1': { - 'prop': 23 / totalarea, 'i': 786, 'j': 123, 'di': 1, 'dj': 1, 'depth': 3}, - 'Creek2': { - 'prop': 16 / totalarea, 'i': 795, 'j': 126, 'di': 1, 'dj': 1, 'depth': 3}, - 'Creek3': { - 'prop': 23 / totalarea, 'i': 798, 'j': 127, 'di': 1, 'dj': 1, 'depth': 3}, - 'Elk': { - 'prop': 23 / totalarea, 'i': 807, 'j': 127, 'di': 1, 'dj': 1, 'depth': 3}, - 'Slab': { - 'prop': 12 / totalarea, 'i': 813, 'j': 129, 'di': 1, 'dj': 1, 'depth': 3}, - 'Pye': { - 'prop': 109 / totalarea, 'i': 826, 'j': 121, 'di': 1, 'dj': 1, 'depth': 3}, - 'BearPoint': { - 'prop': 12 / totalarea, 'i': 839, 'j': 107, 'di': 1, 'dj': 1, 'depth': 3}, - 'AmordeCosmos': { - 'prop': 229 / totalarea, 'i': 843, 'j': 96, 'di': 1, 'dj': 1, 'depth': 3}, - 'Humpback': { - 'prop': 10 / totalarea, 'i': 844, 'j': 93, 'di': 1, 'dj': 1, 'depth': 3}, - 'Palmer': { - 'prop': 14 / totalarea, 'i': 845, 'j': 92, 'di': 1, 'dj': 1, 'depth': 3}, - 'Hkusam': { - 'prop': 14 / totalarea, 'i': 848, 'j': 87, 'di': 1, 'dj': 1, 'depth': 3}, - 'CampPoint': { - 'prop': 28 / totalarea, 'i': 858, 'j': 77, 'di': 1, 'dj': 1, 'depth': 3}, - 'SalmonSayward': { - 'prop': (1210 + 14) / totalarea, 'i': 866, 'j': 64, 'di': 1, 'dj': 1, 'depth': 3}, - 'Kelsey': { - 'prop': 7 / totalarea, 'i': 878, 'j': 59, 'di': 1, 'dj': 1, 'depth': 3}, - 'unmarked': { - 'prop': 7 / totalarea, 'i': 884, 'j': 54, 'di': 1, 'dj': 1, 'depth': 3}, - 'Newcastle': { - 'prop': 34 / totalarea, - 'i': 890, 'j': 47, - 'di': 1, 'dj': 1, 'depth': 3}, - 'Windy': { - 'prop': 10 / totalarea, 'i': 891, 'j': 45, - 'di': 1, 'dj': 1, 'depth': 3}} +prop_dict["evi_n"] = { + "Oyster": { + "prop": 363 / totalarea, + "i": 705, + "j": 122, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Qunisam": { + "prop": 1470 / totalarea, + "i": 749, + "j": 123, + "di": 2, + "dj": 1, + "depth": 3, + }, + "Snowden": { + "prop": 139 / totalarea, + "i": 770, + "j": 117, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Menzies": { + "prop": 31 / totalarea, + "i": 773, + "j": 117, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Creek1": { + "prop": 23 / totalarea, + "i": 786, + "j": 123, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Creek2": { + "prop": 16 / totalarea, + "i": 795, + "j": 126, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Creek3": { + "prop": 23 / totalarea, + "i": 798, + "j": 127, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Elk": {"prop": 23 / totalarea, "i": 807, "j": 127, "di": 1, "dj": 1, "depth": 3}, + "Slab": {"prop": 12 / totalarea, "i": 813, "j": 129, "di": 1, "dj": 1, "depth": 3}, + "Pye": {"prop": 109 / totalarea, "i": 826, "j": 121, "di": 1, "dj": 1, "depth": 3}, + "BearPoint": { + "prop": 12 / totalarea, + "i": 839, + "j": 107, + "di": 1, + "dj": 1, + "depth": 3, + }, + "AmordeCosmos": { + "prop": 229 / totalarea, + "i": 843, + "j": 96, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Humpback": { + "prop": 10 / totalarea, + "i": 844, + "j": 93, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Palmer": {"prop": 14 / totalarea, "i": 845, "j": 92, "di": 1, "dj": 1, "depth": 3}, + "Hkusam": {"prop": 14 / totalarea, "i": 848, "j": 87, "di": 1, "dj": 1, "depth": 3}, + "CampPoint": { + "prop": 28 / totalarea, + "i": 858, + "j": 77, + "di": 1, + "dj": 1, + "depth": 3, + }, + "SalmonSayward": { + "prop": (1210 + 14) / totalarea, + "i": 866, + "j": 64, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Kelsey": {"prop": 7 / totalarea, "i": 878, "j": 59, "di": 1, "dj": 1, "depth": 3}, + "unmarked": { + "prop": 7 / totalarea, + "i": 884, + "j": 54, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Newcastle": { + "prop": 34 / totalarea, + "i": 890, + "j": 47, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Windy": {"prop": 10 / totalarea, "i": 891, "j": 45, "di": 1, "dj": 1, "depth": 3}, +} # Jervis Inlet only area = 1400km2 (Trites 1955) ==> 25% of Jervis # watershed Jervis = 0.25 -prop_dict['jervis'] = { - 'SkwawkaLoquiltsPotatoDesertedStakawusCrabappleOsgood': { - 'prop': Jervis * 0.60, 'i': 650, 'j': 309, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Glacial': { - 'prop': Jervis * 0.05, 'i': 649, 'j': 310, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Seshal': { - 'prop': Jervis * 0.05, 'i': 651, 'j': 307, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Brittain': { - 'prop': Jervis * 0.10, 'i': 650, 'j': 301, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'VancouverHigh': { - 'prop': Jervis * 0.10, 'i': 626, 'j': 311, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Perketts': { - 'prop': Jervis * 0.05, 'i': 619, 'j': 307, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Treat': { - 'prop': Jervis * 0.05, 'i': 612, 'j': 301, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Sechelt': { - 'prop': 0.17, 'i': 604, 'j': 280, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Powell': { - 'prop': 0.32, 'i': 666, 'j': 202, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Lois': { - 'prop': 0.10, 'i': 629, 'j': 224, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Haslam': { - 'prop': 0.02, 'i': 632, 'j': 219, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Chapman': { - 'prop': 0.02, 'i': 522, 'j': 273, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Lapan': { - 'prop': 0.02, 'i': 619, 'j': 282, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Nelson': { - 'prop': 0.02, 'i': 599, 'j': 257, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Wakefield': { - 'prop': 0.02, 'i': 533, 'j': 263, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Halfmoon': { - 'prop': 0.02, 'i': 549, 'j': 253, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'MyersKleindaleAnderson': { - 'prop': 0.04, 'i': 571, 'j': 248, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["jervis"] = { + "SkwawkaLoquiltsPotatoDesertedStakawusCrabappleOsgood": { + "prop": Jervis * 0.60, + "i": 650, + "j": 309, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Glacial": { + "prop": Jervis * 0.05, + "i": 649, + "j": 310, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Seshal": { + "prop": Jervis * 0.05, + "i": 651, + "j": 307, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Brittain": { + "prop": Jervis * 0.10, + "i": 650, + "j": 301, + "di": 1, + "dj": 1, + "depth": 3, + }, + "VancouverHigh": { + "prop": Jervis * 0.10, + "i": 626, + "j": 311, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Perketts": { + "prop": Jervis * 0.05, + "i": 619, + "j": 307, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Treat": { + "prop": Jervis * 0.05, + "i": 612, + "j": 301, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Sechelt": { + "prop": 0.17, + "i": 604, + "j": 280, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Powell": { + "prop": 0.32, + "i": 666, + "j": 202, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Lois": { + "prop": 0.10, + "i": 629, + "j": 224, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Haslam": { + "prop": 0.02, + "i": 632, + "j": 219, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Chapman": { + "prop": 0.02, + "i": 522, + "j": 273, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Lapan": { + "prop": 0.02, + "i": 619, + "j": 282, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Nelson": { + "prop": 0.02, + "i": 599, + "j": 257, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Wakefield": { + "prop": 0.02, + "i": 533, + "j": 263, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Halfmoon": { + "prop": 0.02, + "i": 549, + "j": 253, + "di": 1, + "dj": 1, + "depth": 3, + }, + "MyersKleindaleAnderson": { + "prop": 0.04, + "i": 571, + "j": 248, + "di": 1, + "dj": 1, + "depth": 3, + }, +} -prop_dict['toba'] = { - 'Toba': { - 'prop': 1.0, 'i': 746, 'j': 240, 'di': 1, 'dj': 3, 'depth': 3, - }} +prop_dict["toba"] = { + "Toba": { + "prop": 1.0, + "i": 746, + "j": 240, + "di": 1, + "dj": 3, + "depth": 3, + } +} -prop_dict['bute'] = { - 'Homathko': { - 'prop': 0.58, 'i': 897, 'j': 294, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Southgate': { - 'prop': 0.35, 'i': 885, 'j': 296, 'di': 1, 'dj': 2, 'depth': 3, - }, - 'Orford': { - 'prop': 0.07, 'i': 831, 'j': 249, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["bute"] = { + "Homathko": { + "prop": 0.58, + "i": 897, + "j": 294, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Southgate": { + "prop": 0.35, + "i": 885, + "j": 296, + "di": 1, + "dj": 2, + "depth": 3, + }, + "Orford": { + "prop": 0.07, + "i": 831, + "j": 249, + "di": 1, + "dj": 1, + "depth": 3, + }, +} -prop_dict['evi_s'] = { - 'Cowichan': { - 'prop': 0.22, 'i': 383, 'j': 201, 'di': 1, 'dj': 2, 'depth': 3, - }, - 'Chemanius1': { - 'prop': 0.5 * 0.13, 'i': 414, 'j': 211, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Chemanius2': { - 'prop': 0.5 * 0.13, 'i': 417, 'j': 212, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Nanaimo1': { - 'prop': 0.67 * 0.14, 'i': 478, 'j': 208, 'di': 1, 'dj': 2, 'depth': 3, - }, - 'Nanaimo2': { - 'prop': 0.33 * 0.14, 'i': 477, 'j': 210, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'NorNanaimo': { - 'prop': 0.02, 'i': 491, 'j': 213, 'di': 3, 'dj': 1, 'depth': 3, - }, - 'Goldstream': { - 'prop': 0.08, 'i': 334, 'j': 185, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Nanoose': { - 'prop': 0.02, 'i': 518, 'j': 185, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Englishman': { - 'prop': 0.05, 'i': 541, 'j': 175, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'FrenchCreek': { - 'prop': 0.01, 'i': 551, 'j': 168, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'LittleQualicum': { - 'prop': 0.05, 'i': 563, 'j': 150, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Qualicum': { - 'prop': 0.02, 'i': 578, 'j': 137, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'SouthDenman': { - 'prop': 0.05, 'i': 602, 'j': 120, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Tsable': { - 'prop': 0.03, 'i': 616, 'j': 120, 'di': 2, 'dj': 1, 'depth': 3, - }, - 'Trent': { - 'prop': 0.01, 'i': 648, 'j': 121, 'di': 1, 'dj': 1, 'depth': 3, - }, - 'Puntledge': { - 'prop': 0.14, 'i': 656, 'j': 119, 'di': 1, 'dj': 2, 'depth': 3, - }, - 'BlackCreek': { - 'prop': 0.03, 'i': 701, 'j': 123, 'di': 1, 'dj': 1, 'depth': 3, - }} +prop_dict["evi_s"] = { + "Cowichan": { + "prop": 0.22, + "i": 383, + "j": 201, + "di": 1, + "dj": 2, + "depth": 3, + }, + "Chemanius1": { + "prop": 0.5 * 0.13, + "i": 414, + "j": 211, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Chemanius2": { + "prop": 0.5 * 0.13, + "i": 417, + "j": 212, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Nanaimo1": { + "prop": 0.67 * 0.14, + "i": 478, + "j": 208, + "di": 1, + "dj": 2, + "depth": 3, + }, + "Nanaimo2": { + "prop": 0.33 * 0.14, + "i": 477, + "j": 210, + "di": 1, + "dj": 1, + "depth": 3, + }, + "NorNanaimo": { + "prop": 0.02, + "i": 491, + "j": 213, + "di": 3, + "dj": 1, + "depth": 3, + }, + "Goldstream": { + "prop": 0.08, + "i": 334, + "j": 185, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Nanoose": { + "prop": 0.02, + "i": 518, + "j": 185, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Englishman": { + "prop": 0.05, + "i": 541, + "j": 175, + "di": 1, + "dj": 1, + "depth": 3, + }, + "FrenchCreek": { + "prop": 0.01, + "i": 551, + "j": 168, + "di": 1, + "dj": 1, + "depth": 3, + }, + "LittleQualicum": { + "prop": 0.05, + "i": 563, + "j": 150, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Qualicum": { + "prop": 0.02, + "i": 578, + "j": 137, + "di": 1, + "dj": 1, + "depth": 3, + }, + "SouthDenman": { + "prop": 0.05, + "i": 602, + "j": 120, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Tsable": { + "prop": 0.03, + "i": 616, + "j": 120, + "di": 2, + "dj": 1, + "depth": 3, + }, + "Trent": { + "prop": 0.01, + "i": 648, + "j": 121, + "di": 1, + "dj": 1, + "depth": 3, + }, + "Puntledge": { + "prop": 0.14, + "i": 656, + "j": 119, + "di": 1, + "dj": 2, + "depth": 3, + }, + "BlackCreek": { + "prop": 0.03, + "i": 701, + "j": 123, + "di": 1, + "dj": 1, + "depth": 3, + }, +} diff --git a/SalishSeaTools/salishsea_tools/river_sss150.py b/SalishSeaTools/salishsea_tools/river_sss150.py index ef648e25..0d7061e9 100644 --- a/SalishSeaTools/salishsea_tools/river_sss150.py +++ b/SalishSeaTools/salishsea_tools/river_sss150.py @@ -24,355 +24,471 @@ # Note that the proportions are a bit smaller than for SSC, as when doing SSC, Coquitlam River did not drain # into Indian Arm. -prop_dict['howe'] = { -'Bain': {'prop': 0.0024147054593656597, - 'i': 605, - 'j': 39, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Britannia': {'prop': 0.0062213962997299645, - 'i': 717, - 'j': 166, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Capilano': {'prop': 0.00893658234849195, - 'i': 381, - 'j': 225, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Cypress': {'prop': 0.002879907335720214, - 'i': 394, - 'j': 167, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Deeks': {'prop': 0.003153236064628921, - 'i': 576, - 'j': 150, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Disbrow': {'prop': 0.0005376068376548734, - 'i': 465, - 'j': 160, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Eagle': {'prop': 0.001781162047987863, - 'i': 409, - 'j': 150, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Ellesmere': {'prop': 0.002776730265867258, - 'i': 684, - 'j': 138, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'FalseCreek': {'prop': 0.004918106996324212, - 'i': 327, - 'j': 249, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Foulger': {'prop': 0.0031278943281738084, - 'i': 737, - 'j': 135, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Furry': {'prop': 0.012015603327788043, - 'i': 668, - 'j': 159, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Gonzalos': {'prop': 0.0027151860487619867, - 'i': 760, - 'j': 181, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Harvey': {'prop': 0.0037849693519742093, - 'i': 530, - 'j': 159, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Jericho': {'prop': 0.002407464963235628, - 'i': 326, - 'j': 192, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Kallahn': {'prop': 0.002418325707430676, - 'i': 624, - 'j': 150, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Langdale': {'prop': 0.004872853895511512, - 'i': 484, - 'j': 31, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Lawson': {'prop': 0.0012978589313082296, - 'i': 384, - 'j': 212, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Marr': {'prop': 0.001147618636610066, - 'i': 390, - 'j': 200, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'McNab': {'prop': 0.013159601716333095, - 'i': 635, - 'j': 74, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'McNair': {'prop': 0.009780100147640676, - 'i': 570, - 'j': 13, - 'di': 2, - 'dj': 1, - 'depth': 1}, - 'Potlatch': {'prop': 0.00632457336958292, - 'i': 659, - 'j': 108, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Rainy': {'prop': 0.012907994475814484, - 'i': 585, - 'j': 20, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Rodgers': {'prop': 0.0008829060980960977, - 'i': 392, - 'j': 195, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Sclufield': {'prop': 0.00043623989183442586, - 'i': 466, - 'j': 160, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Shannon': {'prop': 0.003247362514319336, - 'i': 775, - 'j': 188, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Squamish': {'prop': 0.6878471323530365, - 'i': 819, - 'j': 180, - 'di': 1, - 'dj': 1, - 'depth': 2}, - 'Stawanus': {'prop': 0.010455276411766155, - 'i': 789, - 'j': 192, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Turpin': {'prop': 0.0013793145127710892, - 'i': 490, - 'j': 165, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Twin': {'prop': 0.0035406026075856306, - 'i': 536, - 'j': 22, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Westmount': {'prop': 0.0004348641975697198, - 'i': 397, - 'j': 186, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Whyte': {'prop': 0.0010534921869196508, - 'i': 413, - 'j': 150, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Woodfibre': {'prop': 0.013206664941178304, - 'i': 752, - 'j': 135, - 'di': 2, - 'dj': 1, - 'depth': 1}, - 'Allan Creek': {'prop': 0.0007534321005542072, - 'i': 416, - 'j': 346, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Bunsen Lake': {'prop': 0.03607237294888977, - 'i': 449, - 'j': 370, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Burnaby Mountain': {'prop': 0.0012659832651139202, - 'i': 353, - 'j': 327, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Downtown': {'prop': 0.0006121635817002933, - 'i': 343, - 'j': 243, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Elsay Creek': {'prop': 0.0058988662295794535, - 'i': 492, - 'j': 361, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Francis Creek': {'prop': 0.0015847429999637772, - 'i': 403, - 'j': 336, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Grand Creek': {'prop': 0.0044427138044698805, - 'i': 540, - 'j': 371, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Indian': {'prop': 0.0057438848325217715, - 'i': 559, - 'j': 355, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Indian 2': {'prop': 0.0057438848325217715, - 'i': 560, - 'j': 356, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Indian 3': {'prop': 0.011487769665043543, - 'i': 561, - 'j': 356, - 'di': 1, - 'dj': 2, - 'depth': 1}, - 'Indian 4': {'prop': 0.0057438848325217715, - 'i': 563, - 'j': 357, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Indian 5': {'prop': 0.011487769665043543, - 'i': 564, - 'j': 357, - 'di': 1, - 'dj': 2, - 'depth': 1}, - 'Lighthall Creek': {'prop': 0.0014253631325388486, - 'i': 493, - 'j': 375, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Lost Lagoon': {'prop': 0.0004654616582750751, - 'i': 352, - 'j': 229, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Lynn Creek': {'prop': 0.011239902923171658, - 'i': 361, - 'j': 283, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Mackay Creek': {'prop': 0.0018310573405295755, - 'i': 373, - 'j': 247, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'McCartney Creek': {'prop': 0.002032093309667838, - 'i': 373, - 'j': 304, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'McGill Point': {'prop': 0.001961459050240881, - 'i': 351, - 'j': 285, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Mosquito Creek': {'prop': 0.0030517622342159595, - 'i': 374, - 'j': 253, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Mossum Creek': {'prop': 0.0025464556090846525, - 'i': 370, - 'j': 378, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Noons Creek': {'prop': 0.0011319592856884123, - 'i': 356, - 'j': 397, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Percy Creek': {'prop': 0.0011482594994023253, - 'i': 429, - 'j': 352, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Port Moody': {'prop': 0.002379831202231318, - 'i': 352, - 'j': 382, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Sasamat Lake': {'prop': 0.0013529177382547903, - 'i': 396, - 'j': 355, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Seymour': {'prop': 0.031875973484985685, - 'i': 367, - 'j': 290, - 'di': 1, - 'dj': 1, - 'depth': 1}, - 'Shone Creek': {'prop': 0.001343862063969283, - 'i': 451, - 'j': 361, - 'di': 1, - 'dj': 1, - 'depth': 1} +prop_dict["howe"] = { + "Bain": { + "prop": 0.0024147054593656597, + "i": 605, + "j": 39, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Britannia": { + "prop": 0.0062213962997299645, + "i": 717, + "j": 166, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Capilano": { + "prop": 0.00893658234849195, + "i": 381, + "j": 225, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Cypress": { + "prop": 0.002879907335720214, + "i": 394, + "j": 167, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Deeks": { + "prop": 0.003153236064628921, + "i": 576, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Disbrow": { + "prop": 0.0005376068376548734, + "i": 465, + "j": 160, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Eagle": { + "prop": 0.001781162047987863, + "i": 409, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Ellesmere": { + "prop": 0.002776730265867258, + "i": 684, + "j": 138, + "di": 1, + "dj": 1, + "depth": 1, + }, + "FalseCreek": { + "prop": 0.004918106996324212, + "i": 327, + "j": 249, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Foulger": { + "prop": 0.0031278943281738084, + "i": 737, + "j": 135, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Furry": { + "prop": 0.012015603327788043, + "i": 668, + "j": 159, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Gonzalos": { + "prop": 0.0027151860487619867, + "i": 760, + "j": 181, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Harvey": { + "prop": 0.0037849693519742093, + "i": 530, + "j": 159, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Jericho": { + "prop": 0.002407464963235628, + "i": 326, + "j": 192, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Kallahn": { + "prop": 0.002418325707430676, + "i": 624, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Langdale": { + "prop": 0.004872853895511512, + "i": 484, + "j": 31, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lawson": { + "prop": 0.0012978589313082296, + "i": 384, + "j": 212, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Marr": { + "prop": 0.001147618636610066, + "i": 390, + "j": 200, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNab": { + "prop": 0.013159601716333095, + "i": 635, + "j": 74, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McNair": { + "prop": 0.009780100147640676, + "i": 570, + "j": 13, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Potlatch": { + "prop": 0.00632457336958292, + "i": 659, + "j": 108, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rainy": { + "prop": 0.012907994475814484, + "i": 585, + "j": 20, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Rodgers": { + "prop": 0.0008829060980960977, + "i": 392, + "j": 195, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sclufield": { + "prop": 0.00043623989183442586, + "i": 466, + "j": 160, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Shannon": { + "prop": 0.003247362514319336, + "i": 775, + "j": 188, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Squamish": { + "prop": 0.6878471323530365, + "i": 819, + "j": 180, + "di": 1, + "dj": 1, + "depth": 2, + }, + "Stawanus": { + "prop": 0.010455276411766155, + "i": 789, + "j": 192, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Turpin": { + "prop": 0.0013793145127710892, + "i": 490, + "j": 165, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Twin": { + "prop": 0.0035406026075856306, + "i": 536, + "j": 22, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Westmount": { + "prop": 0.0004348641975697198, + "i": 397, + "j": 186, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Whyte": { + "prop": 0.0010534921869196508, + "i": 413, + "j": 150, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Woodfibre": { + "prop": 0.013206664941178304, + "i": 752, + "j": 135, + "di": 2, + "dj": 1, + "depth": 1, + }, + "Allan Creek": { + "prop": 0.0007534321005542072, + "i": 416, + "j": 346, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Bunsen Lake": { + "prop": 0.03607237294888977, + "i": 449, + "j": 370, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Burnaby Mountain": { + "prop": 0.0012659832651139202, + "i": 353, + "j": 327, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Downtown": { + "prop": 0.0006121635817002933, + "i": 343, + "j": 243, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Elsay Creek": { + "prop": 0.0058988662295794535, + "i": 492, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Francis Creek": { + "prop": 0.0015847429999637772, + "i": 403, + "j": 336, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Grand Creek": { + "prop": 0.0044427138044698805, + "i": 540, + "j": 371, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Indian": { + "prop": 0.0057438848325217715, + "i": 559, + "j": 355, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Indian 2": { + "prop": 0.0057438848325217715, + "i": 560, + "j": 356, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Indian 3": { + "prop": 0.011487769665043543, + "i": 561, + "j": 356, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Indian 4": { + "prop": 0.0057438848325217715, + "i": 563, + "j": 357, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Indian 5": { + "prop": 0.011487769665043543, + "i": 564, + "j": 357, + "di": 1, + "dj": 2, + "depth": 1, + }, + "Lighthall Creek": { + "prop": 0.0014253631325388486, + "i": 493, + "j": 375, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lost Lagoon": { + "prop": 0.0004654616582750751, + "i": 352, + "j": 229, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Lynn Creek": { + "prop": 0.011239902923171658, + "i": 361, + "j": 283, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mackay Creek": { + "prop": 0.0018310573405295755, + "i": 373, + "j": 247, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McCartney Creek": { + "prop": 0.002032093309667838, + "i": 373, + "j": 304, + "di": 1, + "dj": 1, + "depth": 1, + }, + "McGill Point": { + "prop": 0.001961459050240881, + "i": 351, + "j": 285, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mosquito Creek": { + "prop": 0.0030517622342159595, + "i": 374, + "j": 253, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Mossum Creek": { + "prop": 0.0025464556090846525, + "i": 370, + "j": 378, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Noons Creek": { + "prop": 0.0011319592856884123, + "i": 356, + "j": 397, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Percy Creek": { + "prop": 0.0011482594994023253, + "i": 429, + "j": 352, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Port Moody": { + "prop": 0.002379831202231318, + "i": 352, + "j": 382, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Sasamat Lake": { + "prop": 0.0013529177382547903, + "i": 396, + "j": 355, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Seymour": { + "prop": 0.031875973484985685, + "i": 367, + "j": 290, + "di": 1, + "dj": 1, + "depth": 1, + }, + "Shone Creek": { + "prop": 0.001343862063969283, + "i": 451, + "j": 361, + "di": 1, + "dj": 1, + "depth": 1, + }, } ### Fraser Watershed @@ -381,15 +497,23 @@ Nooksack_cor = WRIA1 * 0.75 / 2040 Fraser = 1 - WRIA1 - Nooksack_cor * (116 + 149 + 60 + 31) -Pitt = 1640/(232000) # from Canadian Hydrology Discharge Areas +Pitt = 1640 / (232000) # from Canadian Hydrology Discharge Areas -prop_dict['fraser'] = { - 'Fraser': { - 'prop': Fraser * (1-Pitt), 'i': 203, 'j': 707, - 'di': 8, 'dj': 1, 'depth': 3, - }, - 'Pitt River': { - 'prop': Fraser * Pitt, 'i': 659, 'j': 506, - 'di': 1, 'dj': 1, 'depth': 1, - } +prop_dict["fraser"] = { + "Fraser": { + "prop": Fraser * (1 - Pitt), + "i": 203, + "j": 707, + "di": 8, + "dj": 1, + "depth": 3, + }, + "Pitt River": { + "prop": Fraser * Pitt, + "i": 659, + "j": 506, + "di": 1, + "dj": 1, + "depth": 1, + }, } diff --git a/SalishSeaTools/salishsea_tools/rivertools.py b/SalishSeaTools/salishsea_tools/rivertools.py index cb1a1d82..447ba18d 100644 --- a/SalishSeaTools/salishsea_tools/rivertools.py +++ b/SalishSeaTools/salishsea_tools/rivertools.py @@ -20,8 +20,7 @@ import numpy as np -def put_watershed_into_runoff( - rivertype, area, flux, runoff, run_depth, run_temp, pd): +def put_watershed_into_runoff(rivertype, area, flux, runoff, run_depth, run_temp, pd): """Fill the river file with the rivers of one watershed. :arg str rivertype: 'constant' or 'monthly' flows @@ -45,49 +44,71 @@ def put_watershed_into_runoff( for key in pd: river = pd[key] - if rivertype == 'constant': + if rivertype == "constant": fill_runoff_array( - flux * river['prop'], river['i'], - river['di'], river['j'], river['dj'], - river['depth'], runoff, run_depth, area) - elif rivertype == 'monthly': + flux * river["prop"], + river["i"], + river["di"], + river["j"], + river["dj"], + river["depth"], + runoff, + run_depth, + area, + ) + elif rivertype == "monthly": fill_runoff_array_monthly( - flux * river['prop'], river['i'], - river['di'], river['j'], river['dj'], - river['depth'], runoff, run_depth, - run_temp, area, numtimes=12) - elif rivertype == 'daily': + flux * river["prop"], + river["i"], + river["di"], + river["j"], + river["dj"], + river["depth"], + runoff, + run_depth, + run_temp, + area, + numtimes=12, + ) + elif rivertype == "daily": fill_runoff_array_monthly( - flux * river['prop'], river['i'], - river['di'], river['j'], river['dj'], - river['depth'], runoff, run_depth, - run_temp, area, numtimes=365) + flux * river["prop"], + river["i"], + river["di"], + river["j"], + river["dj"], + river["depth"], + runoff, + run_depth, + run_temp, + area, + numtimes=365, + ) return runoff, run_depth, run_temp -def get_watershed_prop_dict(watershedname, Fraser_River='short'): - """get the proportion that each river occupies in the watershed. - """ +def get_watershed_prop_dict(watershedname, Fraser_River="short"): + """get the proportion that each river occupies in the watershed.""" raise DeprecationWarning( - 'get_watershed_prop_dict() depreciated, use the river_** dictionary files') + "get_watershed_prop_dict() depreciated, use the river_** dictionary files" + ) return def get_bathy_cell_size( - grid='../../../nemo-forcing/grid/' - 'coordinates_seagrid_SalishSea.nc', + grid="../../../nemo-forcing/grid/" "coordinates_seagrid_SalishSea.nc", ): - """Get the bathymetry and size of each cell. - """ + """Get the bathymetry and size of each cell.""" fc = NC.Dataset(grid) - e1t = fc.variables['e1t'] - e2t = fc.variables['e2t'] + e1t = fc.variables["e1t"] + e2t = fc.variables["e2t"] return e1t, e2t def init_runoff_array( - bathy='../../../nemo-forcing/grid/' - 'bathy_meter_SalishSea.nc', init_depth=-1, init_temp=-99 + bathy="../../../nemo-forcing/grid/" "bathy_meter_SalishSea.nc", + init_depth=-1, + init_temp=-99, ): """Initialise the runoff array. @@ -98,82 +119,88 @@ def init_runoff_array( bathy='/ocean/jieliu/research/meopar/river-treatment/bathy_meter_SalishSea6.nc') """ raise DeprecationWarning( - 'init_runoff_array() deprecated, use runoff = np.zeros_like(area); run_depth = np.ones_like(runoff)') + "init_runoff_array() deprecated, use runoff = np.zeros_like(area); run_depth = np.ones_like(runoff)" + ) return def init_runoff_array_monthly( - bathy='../../../nemo-forcing/grid/' - 'bathy_meter_SalishSea.nc', init_depth=-1, init_temp=-99 + bathy="../../../nemo-forcing/grid/" "bathy_meter_SalishSea.nc", + init_depth=-1, + init_temp=-99, ): - """Initialise the runoff array for each month. - """ + """Initialise the runoff array for each month.""" raise DeprecationWarning( - 'init_runoff_array() deprecated, use runoff = np.zeros((12, area.shape[0], area.shape[1])); run_depth = np.ones_like(runoff)') + "init_runoff_array() deprecated, use runoff = np.zeros((12, area.shape[0], area.shape[1])); run_depth = np.ones_like(runoff)" + ) return def fill_runoff_array( - flux, istart, di, jstart, dj, depth_of_flux, runoff, run_depth, - area): - """Fill the runoff array. - """ + flux, istart, di, jstart, dj, depth_of_flux, runoff, run_depth, area +): + """Fill the runoff array.""" number_cells = di * dj total_area = number_cells * area[istart, jstart] - w = flux / total_area * 1000. # w is in kg/s not m/s - runoff[istart:istart + di, jstart:jstart + dj] = w - run_depth[istart:istart + di, jstart:jstart + dj] = depth_of_flux + w = flux / total_area * 1000.0 # w is in kg/s not m/s + runoff[istart : istart + di, jstart : jstart + dj] = w + run_depth[istart : istart + di, jstart : jstart + dj] = depth_of_flux return runoff, run_depth def fill_runoff_array_monthly( - flux, istart, di, jstart, dj, - depth_of_flux, runoff, run_depth, run_temp, area, numtimes=12): - """Fill the monthly runoff array. - """ + flux, + istart, + di, + jstart, + dj, + depth_of_flux, + runoff, + run_depth, + run_temp, + area, + numtimes=12, +): + """Fill the monthly runoff array.""" number_cells = di * dj total_area = number_cells * area[istart, jstart] - for ntime in range(1, numtimes+1): - w = flux[ntime - 1] / total_area * 1000. # w is in kg/s not m/s - runoff[(ntime - 1), istart:istart + di, jstart:jstart + dj] = w - run_depth[(ntime - 1), - istart:istart + di, - jstart:jstart + dj] = depth_of_flux + for ntime in range(1, numtimes + 1): + w = flux[ntime - 1] / total_area * 1000.0 # w is in kg/s not m/s + runoff[(ntime - 1), istart : istart + di, jstart : jstart + dj] = w + run_depth[(ntime - 1), istart : istart + di, jstart : jstart + dj] = ( + depth_of_flux + ) if numtimes == 12: - run_temp[(ntime - 1), - istart:istart + di, - jstart:jstart + dj] = rivertemp(month=ntime) + run_temp[(ntime - 1), istart : istart + di, jstart : jstart + dj] = ( + rivertemp(month=ntime) + ) else: - run_temp[(ntime - 1), - istart:istart + di, - jstart:jstart + dj] = rivertemp_yday(yearday=ntime) + run_temp[(ntime - 1), istart : istart + di, jstart : jstart + dj] = ( + rivertemp_yday(yearday=ntime) + ) return runoff, run_depth, run_temp def check_sum(runoff_orig, runoff_new, flux, area): - """Check that the runoff adds up to what it should. -""" - new_flux = (np.sum(runoff_new * area)/1000. - -np.sum(runoff_orig * area)/1000.) - print (new_flux, flux, new_flux/flux) + """Check that the runoff adds up to what it should.""" + new_flux = np.sum(runoff_new * area) / 1000.0 - np.sum(runoff_orig * area) / 1000.0 + print(new_flux, flux, new_flux / flux) def check_sum_monthly(runoff_orig, runoff_new, flux, area, numtimes=12): - """Check that the runoff adds up per month to what it should. - """ - new_flux = (np.sum(runoff_new * area)/1000. - -np.sum(runoff_orig * area)/1000.) - print (new_flux/numtimes, np.sum(flux)/numtimes, new_flux/np.sum(flux)) + """Check that the runoff adds up per month to what it should.""" + new_flux = np.sum(runoff_new * area) / 1000.0 - np.sum(runoff_orig * area) / 1000.0 + print(new_flux / numtimes, np.sum(flux) / numtimes, new_flux / np.sum(flux)) def rivertemp_yday(yearday): """River temperature, based on Fraser River, see Allen and Wolfe (2013). - Temperature in NEMO is in Celsius. + Temperature in NEMO is in Celsius. """ - if (yearday < 52.8 or yearday > 334.4): + if yearday < 52.8 or yearday > 334.4: river_temp = 2.5 - elif (yearday < 232.9): + elif yearday < 232.9: river_temp = 2.5 + (yearday - 52.8) * (19.3 - 2.5) / (232.9 - 52.8) else: river_temp = 19.3 + (yearday - 232.9) * (2.5 - 19.3) / (334.4 - 232.9) @@ -197,73 +224,79 @@ def rivertemp(month): 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 15, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 15.5, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 15, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 15.5] + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 15.5, + ] yearday = centerday[month - 1] river_temp = rivertemp_yday(yearday) return river_temp def get_watershed_prop_dict_long_fraser(watershedname): - """get the proportion that each river occupies in the watershed. - """ + """get the proportion that each river occupies in the watershed.""" raise DeprecationWarning( - 'get_watershed_prop_dict_long_fraser() depreciated, use the river_** dictionary files') + "get_watershed_prop_dict_long_fraser() depreciated, use the river_** dictionary files" + ) return + def get_watershed_prop_dict_allArms_fraser(watershedname): - """get the proportion that each river occupies in the watershed. - """ + """get the proportion that each river occupies in the watershed.""" raise DeprecationWarning( - 'get_watershed_prop_dict_allArms_fraser() depreciated, all Arms bathy superceded by 201702') + "get_watershed_prop_dict_allArms_fraser() depreciated, all Arms bathy superceded by 201702" + ) return + def init_runoff3_array( - bathy='/ocean/jieliu/research/meopar/river-treatment/' - 'bathy_meter_SalishSea3.nc' + bathy="/ocean/jieliu/research/meopar/river-treatment/" "bathy_meter_SalishSea3.nc", ): - """Initialise the runoff array. - """ + """Initialise the runoff array.""" raise DeprecationWarning( - 'init_runoff3_array() depreciated, just use init_runoff_array') + "init_runoff3_array() depreciated, just use init_runoff_array" + ) return + def put_watershed_into_runoff3( - rivertype, watershedname, flux, runoff, run_depth, run_temp, + rivertype, + watershedname, + flux, + runoff, + run_depth, + run_temp, ): - """Fill the river file with the rivers of one watershed. - """ + """Fill the river file with the rivers of one watershed.""" raise DeprecationWarning( - 'put_watershed_into_runoff3() depreciated, just use put_watershed_into_runoff') + "put_watershed_into_runoff3() depreciated, just use put_watershed_into_runoff" + ) return def init_runoff3_array_monthly( - bathy='/ocean/jieliu/research/meopar/river-treatment/' - 'bathy_meter_SalishSea3.nc' + bathy="/ocean/jieliu/research/meopar/river-treatment/" "bathy_meter_SalishSea3.nc", ): - """Initialise the runoff array for each month. - """ + """Initialise the runoff array for each month.""" raise DeprecationWarning( - 'init_runoff3_array_monthly() depreciated, just use init_runoff_array_monthly') + "init_runoff3_array_monthly() depreciated, just use init_runoff_array_monthly" + ) return + def init_runoff5_array_monthly( - bathy='/ocean/jieliu/research/meopar/river-treatment/' - 'bathy_meter_SalishSea5.nc' + bathy="/ocean/jieliu/research/meopar/river-treatment/" "bathy_meter_SalishSea5.nc", ): - """Initialise the runoff array for each month. - """ + """Initialise the runoff array for each month.""" raise DeprecationWarning( - 'init_runoff5_array_monthly() depreciated, just use init_runoff_array_monthly') + "init_runoff5_array_monthly() depreciated, just use init_runoff_array_monthly" + ) return def init_runoff5_array( - bathy='/ocean/jieliu/research/meopar/river-treatment/' - 'bathy_meter_SalishSea5.nc' + bathy="/ocean/jieliu/research/meopar/river-treatment/" "bathy_meter_SalishSea5.nc", ): - """Initialise the runoff array. - """ + """Initialise the runoff array.""" raise DeprecationWarning( - 'init_runoff5_array() depreciated, just use init_runoff_array') + "init_runoff5_array() depreciated, just use init_runoff_array" + ) return diff --git a/SalishSeaTools/salishsea_tools/stormtools.py b/SalishSeaTools/salishsea_tools/stormtools.py index 5230b290..76898dce 100644 --- a/SalishSeaTools/salishsea_tools/stormtools.py +++ b/SalishSeaTools/salishsea_tools/stormtools.py @@ -63,17 +63,19 @@ def storm_surge_risk_level(site_name, max_ssh, ttide): threshold """ try: - max_tide_ssh = max(ttide.pred_all) + PLACES[site_name]['mean sea lvl'] - max_historic_ssh = PLACES[site_name]['hist max sea lvl'] + max_tide_ssh = max(ttide.pred_all) + PLACES[site_name]["mean sea lvl"] + max_historic_ssh = PLACES[site_name]["hist max sea lvl"] except KeyError as e: raise KeyError( - 'place name or info key not found in ' - 'salishsea_tools.places.PLACES: {}'.format(e)) + "place name or info key not found in " + "salishsea_tools.places.PLACES: {}".format(e) + ) extreme_threshold = max_tide_ssh + (max_historic_ssh - max_tide_ssh) / 2 risk_level = ( - None if max_ssh < max_tide_ssh - else 'extreme risk' if max_ssh > extreme_threshold - else 'moderate risk') + None + if max_ssh < max_tide_ssh + else "extreme risk" if max_ssh > extreme_threshold else "moderate risk" + ) return risk_level @@ -98,7 +100,7 @@ def convert_date_seconds(times, start): """ arr_times = [] for ii in range(0, len(times)): - arr_start = arrow.Arrow.strptime(start, '%d-%b-%Y') + arr_start = arrow.Arrow.strptime(start, "%d-%b-%Y") arr_new = arr_start.replace(seconds=times[ii]) arr_times.append(arr_new.datetime) @@ -122,7 +124,7 @@ def convert_date_hours(times, start): arr_times = [] for ii in range(0, len(times)): - arr_start = arrow.Arrow.strptime(start, '%d-%b-%Y') + arr_start = arrow.Arrow.strptime(start, "%d-%b-%Y") arr_new = arr_start.replace(hours=times[ii]) arr_times.append(arr_new.datetime) @@ -158,42 +160,42 @@ def get_CGRF_weather(start, end, grid): v10 = [] pres = [] time = [] - st_ar = arrow.Arrow.strptime(start, '%d-%b-%Y') - end_ar = arrow.Arrow.strptime(end, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end, "%d-%b-%Y") - CGRF_path = '/ocean/dlatorne/MEOPAR/CGRF/NEMO-atmos/' + CGRF_path = "/ocean/dlatorne/MEOPAR/CGRF/NEMO-atmos/" - for r in arrow.Arrow.range('day', st_ar, end_ar): + for r in arrow.Arrow.range("day", st_ar, end_ar): mstr = "{0:02d}".format(r.month) dstr = "{0:02d}".format(r.day) # u - strU = 'u10_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc' - fU = NC.Dataset(CGRF_path+strU) - var = fU.variables['u_wind'][:, grid[0], grid[1]] + strU = "u10_y" + str(r.year) + "m" + mstr + "d" + dstr + ".nc" + fU = NC.Dataset(CGRF_path + strU) + var = fU.variables["u_wind"][:, grid[0], grid[1]] u10.extend(var[:]) # time - tim = fU.variables['time_counter'] - time.extend(tim[:] + (r.day-st_ar.day)*24) + tim = fU.variables["time_counter"] + time.extend(tim[:] + (r.day - st_ar.day) * 24) times = convert_date_hours(time, start) # v - strV = 'v10_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc' - fV = NC.Dataset(CGRF_path+strV) - var = fV.variables['v_wind'][:, grid[0], grid[1]] + strV = "v10_y" + str(r.year) + "m" + mstr + "d" + dstr + ".nc" + fV = NC.Dataset(CGRF_path + strV) + var = fV.variables["v_wind"][:, grid[0], grid[1]] v10.extend(var[:]) # pressure - strP = 'slp_corr_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc' - fP = NC.Dataset(CGRF_path+strP) - var = fP.variables['atmpres'][:, grid[0], grid[1]] + strP = "slp_corr_y" + str(r.year) + "m" + mstr + "d" + dstr + ".nc" + fP = NC.Dataset(CGRF_path + strP) + var = fP.variables["atmpres"][:, grid[0], grid[1]] pres.extend(var[:]) u10s = np.array(u10) v10s = np.array(v10) press = np.array(pres) - windspeed = np.sqrt(u10s**2+v10s**2) + windspeed = np.sqrt(u10s**2 + v10s**2) winddir = np.arctan2(v10, u10) * 180 / np.pi winddir = winddir + 360 * (winddir < 0) @@ -232,13 +234,13 @@ def combine_data(data_list): sshs = {} for k in data_list: net = data_list.get(k) - us[k] = net.variables['vozocrtx'] - vs[k] = net.variables['vomecrty'] - lats[k] = net.variables['nav_lat'] - lons[k] = net.variables['nav_lon'] - tmps[k] = net.variables['votemper'] - sals[k] = net.variables['vosaline'] - sshs[k] = net.variables['sossheig'] + us[k] = net.variables["vozocrtx"] + vs[k] = net.variables["vomecrty"] + lats[k] = net.variables["nav_lat"] + lons[k] = net.variables["nav_lon"] + tmps[k] = net.variables["votemper"] + sals[k] = net.variables["vosaline"] + sshs[k] = net.variables["sossheig"] return us, vs, lats, lons, tmps, sals, sshs @@ -268,14 +270,14 @@ def get_variables(fU, fV, fT, timestamp, depth): """ # get u and ugrid - u_vel = fU.variables['vozocrtx'] # u currents and grid + u_vel = fU.variables["vozocrtx"] # u currents and grid U = u_vel[timestamp, depth, :, :] # get data at specified level and time. # mask u so that white is plotted on land points mu = U == 0 U = np.ma.array(U, mask=mu) # get v and v grid - v_vel = fV.variables['vomecrty'] # v currents and grid + v_vel = fV.variables["vomecrty"] # v currents and grid V = v_vel[timestamp, depth, :, :] # get data at specified level and time. # mask v so that white is plotted on land points @@ -283,17 +285,17 @@ def get_variables(fU, fV, fT, timestamp, depth): V = np.ma.array(V, mask=mu) # grid for T points - eta = fT.variables['sossheig'] + eta = fT.variables["sossheig"] E = eta[timestamp, :, :] mu = E == 0 E = np.ma.array(E, mask=mu) - sal = fT.variables['vosaline'] + sal = fT.variables["vosaline"] S = sal[timestamp, depth, :, :] mu = S == 0 S = np.ma.array(S, mask=mu) - temp = fT.variables['votemper'] + temp = fT.variables["votemper"] T = temp[timestamp, depth, :, :] mu = T == 0 T = np.ma.array(T, mask=mu) @@ -329,97 +331,105 @@ def get_EC_observations(station, start_day, end_day): # These ids have been identified as interesting locations in the SoG. # It is not necessarily a complete list. station_ids = { - 'Pam Rocks': 6817, - 'Sisters Islet': 6813, - 'Entrance Island': 29411, - 'Sand Heads': 6831, + "Pam Rocks": 6817, + "Sisters Islet": 6813, + "Entrance Island": 29411, + "Sand Heads": 6831, # NOTE: YVR station name changed in 2013. Older data use 889. - 'YVR': 51442, - 'YVR_old': 889, - 'Point Atkinson': 844, - 'Victoria': 10944, - 'Campbell River': 145, + "YVR": 51442, + "YVR_old": 889, + "Point Atkinson": 844, + "Victoria": 10944, + "Campbell River": 145, # NOTE: not exactly Patricia Bay. The EC name is Victoria Hartland CS - 'Patricia Bay': 11007, - 'Esquimalt': 52, - 'Discovery Island': 27226, - 'Race Rocks': 10943, - 'Saturna Island': 96, - 'Tsawwassen': 50228, - 'Ballenas Islands': 138, - 'Comox Airport': 155, - 'Squamish Airport': 336, + "Patricia Bay": 11007, + "Esquimalt": 52, + "Discovery Island": 27226, + "Race Rocks": 10943, + "Saturna Island": 96, + "Tsawwassen": 50228, + "Ballenas Islands": 138, + "Comox Airport": 155, + "Squamish Airport": 336, } # Create aliases to recognize places.py definitions names = [ - 'Campbell River', 'Entrance Island', 'Pam Rocks', 'Patricia Bay', - 'Point Atkinson', 'Sand Heads', 'Sisters Islet', + "Campbell River", + "Entrance Island", + "Pam Rocks", + "Patricia Bay", + "Point Atkinson", + "Sand Heads", + "Sisters Islet", ] aliases = [ - 'CampbellRiver', 'EntranceIsland', 'PamRocks', 'PatriciaBay', - 'PointAtkinson', 'Sandheads', 'SistersIsland', + "CampbellRiver", + "EntranceIsland", + "PamRocks", + "PatriciaBay", + "PointAtkinson", + "Sandheads", + "SistersIsland", ] for alias, name in zip(aliases, names): station_ids[alias] = station_ids[name] - st_ar = arrow.Arrow.strptime(start_day, '%d-%b-%Y') - end_ar = arrow.Arrow.strptime(end_day, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start_day, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end_day, "%d-%b-%Y") PST = tz.tzoffset("PST", -28800) wind_spd, wind_dir, temp = [], [], [] - url = 'https://climate.weather.gc.ca/climate_data/bulk_data_e.html' + url = "https://climate.weather.gc.ca/climate_data/bulk_data_e.html" query = { - 'timeframe': 1, - 'stationID': station_ids[station], - 'format': 'xml', - 'Year': st_ar.year, - 'Month': st_ar.month, - 'Day': 1, + "timeframe": 1, + "stationID": station_ids[station], + "format": "xml", + "Year": st_ar.year, + "Month": st_ar.month, + "Day": 1, } response = requests.get(url, params=query) tree = ElementTree.parse(BytesIO(response.content)) root = tree.getroot() # read lat and lon - for raw_info in root.findall('stationinformation'): - lat = float(raw_info.find('latitude').text) - lon = float(raw_info.find('longitude').text) + for raw_info in root.findall("stationinformation"): + lat = float(raw_info.find("latitude").text) + lon = float(raw_info.find("longitude").text) # read data - raw_data = root.findall('stationdata') + raw_data = root.findall("stationdata") times = [] for record in raw_data: - day = int(record.get('day')) - hour = int(record.get('hour')) - year = int(record.get('year')) - month = int(record.get('month')) + day = int(record.get("day")) + hour = int(record.get("hour")) + year = int(record.get("year")) + month = int(record.get("month")) t = arrow.Arrow(year, month, day, hour, tzinfo=PST) selectors = ( (day == st_ar.day - 1 and hour >= 16) - or - (day >= st_ar.day and day < end_ar.day) - or - (day == end_ar.day and hour < 16) + or (day >= st_ar.day and day < end_ar.day) + or (day == end_ar.day and hour < 16) ) if selectors: try: - wind_spd.append(float(record.find('windspd').text)) - t.to('utc') + wind_spd.append(float(record.find("windspd").text)) + t.to("utc") times.append(t.datetime) except TypeError: - wind_spd.append(float('NaN')) - t.to('utc') + wind_spd.append(float("NaN")) + t.to("utc") times.append(t.datetime) try: - wind_dir.append(float(record.find('winddir').text) * 10) + wind_dir.append(float(record.find("winddir").text) * 10) except: - wind_dir.append(float('NaN')) + wind_dir.append(float("NaN")) try: - temp.append(float(record.find('temp').text)+273) + temp.append(float(record.find("temp").text) + 273) except: - temp.append(float('NaN')) + temp.append(float("NaN")) wind_spd = np.array(wind_spd) * 1000 / 3600 # km/hr to m/s - wind_dir = -np.array(wind_dir)+270 # met. direction to cartesian angle - with np.errstate(invalid='ignore'): + wind_dir = -np.array(wind_dir) + 270 # met. direction to cartesian angle + with np.errstate(invalid="ignore"): wind_dir = wind_dir + 360 * (wind_dir < 0) temp = np.array(temp) for i in np.arange(len(times)): @@ -440,22 +450,30 @@ def get_SSH_forcing(boundary, date): :returns: ssh_forc, time_ssh: arrays of the ssh forcing values and corresponding times """ - date_arr = arrow.Arrow.strptime(date, '%d-%b-%Y') + date_arr = arrow.Arrow.strptime(date, "%d-%b-%Y") year = date_arr.year month = date_arr.month month = "%02d" % (month,) - if boundary == 'north': - filen = 'sshNorth' + if boundary == "north": + filen = "sshNorth" else: - filen = 'ssh' - ssh_path = '/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/' + \ - boundary + '/ssh/' + filen + '_y' + str(year) + 'm' + str(month)\ - + '.nc' + filen = "ssh" + ssh_path = ( + "/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/" + + boundary + + "/ssh/" + + filen + + "_y" + + str(year) + + "m" + + str(month) + + ".nc" + ) fS = NC.Dataset(ssh_path) - ssh_forc = fS.variables['sossheig'] - tss = fS.variables['time_counter'][:] + ssh_forc = fS.variables["sossheig"] + tss = fS.variables["time_counter"][:] l = tss.shape[0] - t = np.linspace(0, l-1, l) # time array + t = np.linspace(0, l - 1, l) # time array time_ssh = convert_date_hours(t, date) return ssh_forc, time_ssh @@ -504,13 +522,15 @@ def load_tidal_predictions(filename): mycsv = list(csv.reader(f)) msl = float(mycsv[1][1]) ttide = pd.read_csv( - filename, skiprows=3, parse_dates=[0], date_parser=dateParserMeasured2) + filename, skiprows=3, parse_dates=[0], date_parser=dateParserMeasured2 + ) ttide = ttide.rename( columns={ - 'time ': 'time', - ' pred_8 ': 'pred_8', - ' pred_all ': 'pred_all', - }) + "time ": "time", + " pred_8 ": "pred_8", + " pred_all ": "pred_all", + } + ) return ttide, msl @@ -531,15 +551,19 @@ def load_observations(start, end, location): :returns: wlev_meas: a dict object with the water level measurements reference to Chart Datum """ - stations = {'PointAtkinson': 7795, 'Victoria': 7120, 'PatriciaBay': 7277, - 'CampbellRiver': 8074} + stations = { + "PointAtkinson": 7795, + "Victoria": 7120, + "PatriciaBay": 7277, + "CampbellRiver": 8074, + } statID_PA = stations[location] - filename = 'wlev_' + str(statID_PA) + '_' + start + '_' + end + '.csv' + filename = "wlev_" + str(statID_PA) + "_" + start + "_" + end + ".csv" tidetools.get_dfo_wlev(statID_PA, start, end) - wlev_meas = pd.read_csv(filename, skiprows=7, parse_dates=[0], - date_parser=dateParserMeasured) - wlev_meas = wlev_meas.rename(columns={'Obs_date': 'time', - 'SLEV(metres)': 'slev'}) + wlev_meas = pd.read_csv( + filename, skiprows=7, parse_dates=[0], date_parser=dateParserMeasured + ) + wlev_meas = wlev_meas.rename(columns={"Obs_date": "time", "SLEV(metres)": "slev"}) return wlev_meas @@ -563,11 +587,11 @@ def observed_anomaly(ttide, wlev_meas, msl): for i in np.arange(0, len(wlev_meas.time)): # check that there is a corresponding time # if any(wlev_pred.time == wlev_meas.time[i]): - ssanomaly[i] = (wlev_meas.slev[i] - - (ttide.pred_all[ttide.time == wlev_meas.time[i]] + - msl)) - if not(ssanomaly[i]): - ssanomaly[i] = float('Nan') + ssanomaly[i] = wlev_meas.slev[i] - ( + ttide.pred_all[ttide.time == wlev_meas.time[i]] + msl + ) + if not (ssanomaly[i]): + ssanomaly[i] = float("Nan") return ssanomaly @@ -587,8 +611,7 @@ def modelled_anomaly(sshs, location): :returns: anom: the difference between all_forcing and tidesonly """ - anom = (sshs['all_forcing'][location][:, 0, 0] - - sshs['tidesonly'][location][:, 0, 0]) + anom = sshs["all_forcing"][location][:, 0, 0] - sshs["tidesonly"][location][:, 0, 0] return anom @@ -615,13 +638,13 @@ def correct_model(ssh, ttide, sdt, edt): inds = ttide.time[ttide.time == sdt].index[0] inde = ttide.time[ttide.time == edt].index[0] - difference = ttide.pred_all-ttide.pred_8 + difference = ttide.pred_all - ttide.pred_8 difference = np.array(difference) # average correction over two times to shift to the model 1/2 outputs # question: should I reconsider this calculation by interpolating? - corr = 0.5*(difference[inds:inde] + difference[inds+1:inde+1]) + corr = 0.5 * (difference[inds:inde] + difference[inds + 1 : inde + 1]) - corr_model = ssh+corr + corr_model = ssh + corr return corr_model @@ -650,9 +673,9 @@ def surge_tide(ssh, ttide, sdt, edt): # average correction over two times to shift to the model 1/2 outputs tide = np.array(ttide.pred_all) - tide_corr = 0.5*(tide[inds:inde] + tide[inds+1:inde+1]) + tide_corr = 0.5 * (tide[inds:inde] + tide[inds + 1 : inde + 1]) - surgetide = ssh+tide_corr + surgetide = ssh + tide_corr return surgetide @@ -686,16 +709,17 @@ def get_statistics(obs, model, t_obs, t_model, sdt, edt): """ # truncate model trun_model, trun_tm = truncate( - model, t_model, sdt.replace(minute=30), edt.replace(minute=30)) + model, t_model, sdt.replace(minute=30), edt.replace(minute=30) + ) trun_model = trun_model[:-1] trun_tm = trun_tm[:-1] # truncate and interpolate observations obs_interp = interp_to_model_time(trun_tm, obs, t_obs) # rebase observations # rbase_obs, rbase_to = rebase_obs(trun_obs, trun_to) - error = trun_model-obs_interp + error = trun_model - obs_interp # calculate statistics - gamma2 = np.var(error)/np.var(obs_interp) + gamma2 = np.var(error) / np.var(obs_interp) mean_error = np.mean(error) mean_abs_error = np.mean(np.abs(error)) rms_error = _rmse(error) @@ -710,8 +734,20 @@ def get_statistics(obs, model, t_obs, t_model, sdt, edt): ws = willmott_skill(obs_interp, trun_model) return ( - max_obs, max_model, tmax_obs, tmax_model, mean_error, mean_abs_error, - rms_error, gamma2, corr, ws, mean_obs, mean_model, std_obs, std_model, + max_obs, + max_model, + tmax_obs, + tmax_model, + mean_error, + mean_abs_error, + rms_error, + gamma2, + corr, + ws, + mean_obs, + mean_model, + std_obs, + std_model, ) @@ -735,8 +771,8 @@ def truncate(data, time, sdt, edt): inds = np.where(time == sdt)[0] inde = np.where(time == edt)[0] - data_t = np.array(data[inds:inde + 1]) - time_t = np.array(time[inds:inde + 1]) + data_t = np.array(data[inds : inde + 1]) + time_t = np.array(time[inds : inde + 1]) return data_t, time_t @@ -756,7 +792,7 @@ def rebase_obs(data, time): :returns: rebase_data, rebase_time, the data and times shifted by half an hour """ - rebase_data = 0.5*(data[1:]+data[:-1]) + rebase_data = 0.5 * (data[1:] + data[:-1]) rebase_time = [] for k in range(time.shape[0]): rebase_time.append(time[k].replace(minute=30)) @@ -790,10 +826,10 @@ def willmott_skill(obs, model): mprime = model - obar oprime = obs - obar - diff_sq = np.sum((model-obs)**2) - add_sq = np.sum((np.abs(mprime) + np.abs(oprime))**2) + diff_sq = np.sum((model - obs) ** 2) + add_sq = np.sum((np.abs(mprime) + np.abs(oprime)) ** 2) - ws = 1-diff_sq/add_sq + ws = 1 - diff_sq / add_sq return ws @@ -814,28 +850,31 @@ def get_NOAA_wlev(station_no, start_date, end_date): Time zone is UTC """ # Name the output file - outfile = ('wlev_' + str(station_no) + '_' + str(start_date) + - '_' + str(end_date) + '.csv') + outfile = ( + "wlev_" + str(station_no) + "_" + str(start_date) + "_" + str(end_date) + ".csv" + ) # Form urls and html information - st_ar = arrow.Arrow.strptime(start_date, '%d-%b-%Y') - end_ar = arrow.Arrow.strptime(end_date, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start_date, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end_date, "%d-%b-%Y") - base_url = 'https://tidesandcurrents.noaa.gov' - form_handler = ( - '/stationhome.html?id=' - + str(station_no)) + base_url = "https://tidesandcurrents.noaa.gov" + form_handler = "/stationhome.html?id=" + str(station_no) data_provider = ( - '/api/datagetter?product=hourly_height&application=NOS.COOPS.TAC.WL' - + '&begin_date=' + st_ar.format('YYYYMMDD') + '&end_date=' - + end_ar.format('YYYYMMDD') - + '&datum=MLLW&station='+str(station_no) - + '&time_zone=GMT&units=metric&interval=h&format=csv') + "/api/datagetter?product=hourly_height&application=NOS.COOPS.TAC.WL" + + "&begin_date=" + + st_ar.format("YYYYMMDD") + + "&end_date=" + + end_ar.format("YYYYMMDD") + + "&datum=MLLW&station=" + + str(station_no) + + "&time_zone=GMT&units=metric&interval=h&format=csv" + ) # Go get the data from the DFO site with requests.Session() as s: s.post(base_url) r = s.get(base_url + data_provider) # Write the data to a text file - with open(outfile, 'w') as f: + with open(outfile, "w") as f: f.write(r.text) @@ -853,29 +892,38 @@ def get_NOAA_predictions(station_no, start_date, end_date): Time zone is UTC """ # Name the output file - outfile = ('predictions_' + str(station_no) + '_' + str(start_date) + '_' - + str(end_date) + '.csv') + outfile = ( + "predictions_" + + str(station_no) + + "_" + + str(start_date) + + "_" + + str(end_date) + + ".csv" + ) # Form urls and html information - st_ar = arrow.Arrow.strptime(start_date, '%d-%b-%Y') - end_ar = arrow.Arrow.strptime(end_date, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start_date, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end_date, "%d-%b-%Y") - base_url = 'https://tidesandcurrents.noaa.gov' - form_handler = ( - '/stationhome.html?id=' - + str(station_no)) + base_url = "https://tidesandcurrents.noaa.gov" + form_handler = "/stationhome.html?id=" + str(station_no) data_provider = ( - '/api/datagetter?product=predictions&application=NOS.COOPS.TAC.WL' - + '&begin_date=' + st_ar.format('YYYYMMDD') + '&end_date=' - + end_ar.format('YYYYMMDD') - + '&datum=MLLW&station='+str(station_no) - + '&time_zone=GMT&units=metric&interval=h&format=csv') + "/api/datagetter?product=predictions&application=NOS.COOPS.TAC.WL" + + "&begin_date=" + + st_ar.format("YYYYMMDD") + + "&end_date=" + + end_ar.format("YYYYMMDD") + + "&datum=MLLW&station=" + + str(station_no) + + "&time_zone=GMT&units=metric&interval=h&format=csv" + ) # Go get the data from the DFO site with requests.Session() as s: s.post(base_url) r = s.get(base_url + data_provider) # Write the data to a text file - with open(outfile, 'w') as f: + with open(outfile, "w") as f: f.write(r.text) @@ -908,39 +956,38 @@ def get_operational_weather(start, end, grid): v10 = [] pres = [] time = [] - st_ar = arrow.Arrow.strptime(start, '%d-%b-%Y') - end_ar = arrow.Arrow.strptime(end, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end, "%d-%b-%Y") - ops_path = '/ocean/sallen/allen/research/Meopar/Operational/' - opsp_path = '/ocean/nsoontie/MEOPAR/GEM2.5/ops/' + ops_path = "/ocean/sallen/allen/research/Meopar/Operational/" + opsp_path = "/ocean/nsoontie/MEOPAR/GEM2.5/ops/" - for r in arrow.Arrow.range('day', st_ar, end_ar): + for r in arrow.Arrow.range("day", st_ar, end_ar): mstr = "{0:02d}".format(r.month) dstr = "{0:02d}".format(r.day) - fstr = 'ops_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc' - f = NC.Dataset(ops_path+fstr) + fstr = "ops_y" + str(r.year) + "m" + mstr + "d" + dstr + ".nc" + f = NC.Dataset(ops_path + fstr) # u - var = f.variables['u_wind'][:, grid[0], grid[1]] + var = f.variables["u_wind"][:, grid[0], grid[1]] u10.extend(var[:]) # v - var = f.variables['v_wind'][:, grid[0], grid[1]] + var = f.variables["v_wind"][:, grid[0], grid[1]] v10.extend(var[:]) # pressure - fpstr = ('slp_corr_ops_y' + str(r.year) + 'm' + mstr + 'd' + dstr - + '.nc') - fP = NC.Dataset(opsp_path+fpstr) - var = fP.variables['atmpres'][:, grid[0], grid[1]] + fpstr = "slp_corr_ops_y" + str(r.year) + "m" + mstr + "d" + dstr + ".nc" + fP = NC.Dataset(opsp_path + fpstr) + var = fP.variables["atmpres"][:, grid[0], grid[1]] pres.extend(var[:]) # time - tim = f.variables['time_counter'] + tim = f.variables["time_counter"] time.extend(tim[:]) - times = convert_date_seconds(time, '01-Jan-1970') + times = convert_date_seconds(time, "01-Jan-1970") u10s = np.array(u10) v10s = np.array(v10) press = np.array(pres) - windspeed = np.sqrt(u10s**2+v10s**2) + windspeed = np.sqrt(u10s**2 + v10s**2) winddir = np.arctan2(v10, u10) * 180 / np.pi winddir = winddir + 360 * (winddir < 0) return windspeed, winddir, press, times @@ -969,12 +1016,12 @@ def interp_to_model_time(time_model, varp, tp): # Determine tp times wrt epc tp_wrt_epoc = [] for t in tp: - tp_wrt_epoc.append((t-epoc).total_seconds()) + tp_wrt_epoc.append((t - epoc).total_seconds()) # Interpolate observations to model times varp_interp = [] for t in time_model: - mod_wrt_epoc = (t-epoc).total_seconds() + mod_wrt_epoc = (t - epoc).total_seconds() varp_interp.append(np.interp(mod_wrt_epoc, tp_wrt_epoc, varp)) return varp_interp diff --git a/SalishSeaTools/salishsea_tools/tidetools.py b/SalishSeaTools/salishsea_tools/tidetools.py index 296d2123..4d4d8340 100644 --- a/SalishSeaTools/salishsea_tools/tidetools.py +++ b/SalishSeaTools/salishsea_tools/tidetools.py @@ -1,4 +1,3 @@ - # Copyright 2013-2021 The Salish Sea MEOPAR contributors # and The University of British Columbia @@ -47,40 +46,16 @@ # /data/dlatorne/MEOPAR/SalishSea/nowcast/08jul15/ocean.output # The freq parameter it the frequency of the tidal consituent in degrees/hour. CorrTides = { - 'reftime': datetime.datetime(2014, 9, 10, tzinfo=tz.tzutc()), - 'K1': { - 'freq': 15.041069000, - 'ft': 0.891751, - 'uvt': 262.636797}, - 'O1': { - 'freq': 13.943036, - 'ft': 0.822543, - 'uvt': 81.472430}, - 'Q1': { - 'freq': 13.398661, - 'ft': 0.822543, - 'uvt': 46.278236}, - 'P1': { - 'freq': 14.958932, - 'ft': 1.0000000, - 'uvt': 101.042160}, - 'M2': { - 'freq': 28.984106, - 'ft': 1.035390, - 'uvt': 346.114490}, - 'N2': { - 'freq': 28.439730, - 'ft': 1.035390, - 'uvt': 310.920296}, - 'S2': { - 'freq': 30.000002, - 'ft': 1.0000000, - 'uvt': 0.000000}, - 'K2': { - 'freq': 30.082138, - 'ft': 0.763545, - 'uvt': 344.740346} - } + "reftime": datetime.datetime(2014, 9, 10, tzinfo=tz.tzutc()), + "K1": {"freq": 15.041069000, "ft": 0.891751, "uvt": 262.636797}, + "O1": {"freq": 13.943036, "ft": 0.822543, "uvt": 81.472430}, + "Q1": {"freq": 13.398661, "ft": 0.822543, "uvt": 46.278236}, + "P1": {"freq": 14.958932, "ft": 1.0000000, "uvt": 101.042160}, + "M2": {"freq": 28.984106, "ft": 1.035390, "uvt": 346.114490}, + "N2": {"freq": 28.439730, "ft": 1.035390, "uvt": 310.920296}, + "S2": {"freq": 30.000002, "ft": 1.0000000, "uvt": 0.000000}, + "K2": {"freq": 30.082138, "ft": 0.763545, "uvt": 344.740346}, +} def get_all_perm_dfo_wlev(start_date, end_date): @@ -96,16 +71,16 @@ def get_all_perm_dfo_wlev(start_date, end_date): :returns: Saves text files with water level data at each site """ stations = { - 'Point Atkinson': 7795, - 'Vancouver': 7735, - 'Patricia Bay': 7277, - 'Victoria Harbour': 7120, - 'Bamfield': 8545, - 'Tofino': 8615, - 'Winter Harbour': 8735, - 'Port Hardy': 8408, - 'Campbell River': 8074, - 'New Westminster': 7654, + "Point Atkinson": 7795, + "Vancouver": 7735, + "Patricia Bay": 7277, + "Victoria Harbour": 7120, + "Bamfield": 8545, + "Tofino": 8615, + "Winter Harbour": 8735, + "Port Hardy": 8408, + "Campbell River": 8074, + "New Westminster": 7654, } for ttt in stations: get_dfo_wlev(stations[ttt], start_date, end_date) @@ -127,29 +102,35 @@ def get_dfo_wlev(station_no, start_date, end_date): :returns: Saves text file with water level data at one station """ # Name the output file - outfile = 'wlev_'+str(station_no)+'_'+start_date+'_'+end_date+'.csv' + outfile = "wlev_" + str(station_no) + "_" + start_date + "_" + end_date + ".csv" # Form urls and html information - base_url = 'https://www.meds-sdmm.dfo-mpo.gc.ca/isdm-gdsi/twl-mne/inventory-inventaire/' - form_handler = ( - 'data-donnees-eng.asp?user=isdm-gdsi®ion=PAC&tst=1&no=' - + str(station_no)) + base_url = ( + "https://www.meds-sdmm.dfo-mpo.gc.ca/isdm-gdsi/twl-mne/inventory-inventaire/" + ) + form_handler = "data-donnees-eng.asp?user=isdm-gdsi®ion=PAC&tst=1&no=" + str( + station_no + ) sitedata = { - 'start_period': start_date, - 'end_period': end_date, - 'resolution': 'h', - 'time_zone': 'l', + "start_period": start_date, + "end_period": end_date, + "resolution": "h", + "time_zone": "l", } data_provider = ( - 'download-telecharger.asp' - '?File=E:%5Ciusr_tmpfiles%5CTWL%5C' - + str(station_no) + '-'+start_date + '_slev.csv' - '&Name=' + str(station_no) + '-'+start_date+'_slev.csv') + "download-telecharger.asp" + "?File=E:%5Ciusr_tmpfiles%5CTWL%5C" + + str(station_no) + + "-" + + start_date + + "_slev.csv" + "&Name=" + str(station_no) + "-" + start_date + "_slev.csv" + ) # Go get the data from the DFO site with requests.Session() as s: s.post(base_url + form_handler, data=sitedata) r = s.get(base_url + data_provider) # Write the data to a text file - with open(outfile, 'w') as f: + with open(outfile, "w") as f: f.write(r.text) @@ -165,9 +146,9 @@ def dateParserMeasured(s): # Convert the string to a datetime object unaware = datetime.datetime.strptime(s, "%Y/%m/%d %H:%M") # Add in the local time zone (Canada/Pacific) - aware = unaware.replace(tzinfo=pytz.timezone('Canada/Pacific')) + aware = unaware.replace(tzinfo=pytz.timezone("Canada/Pacific")) # Convert to UTC - return aware.astimezone(pytz.timezone('UTC')) + return aware.astimezone(pytz.timezone("UTC")) def read_dfo_wlev_file(filename): @@ -181,9 +162,9 @@ def read_dfo_wlev_file(filename): """ info = pd.read_csv(filename, nrows=4, index_col=0, header=None) wlev_meas = pd.read_csv( - filename, skiprows=7, parse_dates=[0], date_parser=dateParserMeasured) - wlev_meas = wlev_meas.rename( - columns={'Obs_date': 'time', 'SLEV(metres)': 'slev'}) + filename, skiprows=7, parse_dates=[0], date_parser=dateParserMeasured + ) + wlev_meas = wlev_meas.rename(columns={"Obs_date": "time", "SLEV(metres)": "slev"}) # Allocate the variables to nice names stat_name = info[1][0] stat_num = info[1][1] @@ -193,13 +174,12 @@ def read_dfo_wlev_file(filename): # then convert dates to UTC for x in np.arange(0, len(wlev_meas.time)): wlev_meas.time[x] = wlev_meas.time[x].replace( - tzinfo=pytz.timezone('Canada/Pacific')) + tzinfo=pytz.timezone("Canada/Pacific") + ) print(wlev_meas.time[x]) - wlev_meas.time[x] = wlev_meas.time[x].astimezone(pytz.timezone('UTC')) + wlev_meas.time[x] = wlev_meas.time[x].astimezone(pytz.timezone("UTC")) print(wlev_meas.time[x]) - return ( - wlev_meas.time, wlev_meas.slev, - stat_name, stat_num, stat_lat, stat_lon) + return (wlev_meas.time, wlev_meas.slev, stat_name, stat_num, stat_lat, stat_lon) def get_amp_phase_data(runname, loc): @@ -219,27 +199,28 @@ def get_amp_phase_data(runname, loc): :returns: mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha """ - if runname == 'concepts110': - mod_M2_amp, mod_M2_pha = get_netcdf_amp_phase_data_concepts110( - loc + runname) + if runname == "concepts110": + mod_M2_amp, mod_M2_pha = get_netcdf_amp_phase_data_concepts110(loc + runname) mod_K1_amp = 0.0 mod_K1_pha = 0.0 - elif runname == 'jpp72': + elif runname == "jpp72": mod_M2_amp, mod_M2_pha = get_netcdf_amp_phase_data_jpp72(loc + runname) mod_K1_amp = 0.0 mod_K1_pha = 0.0 - elif runname == 'composite': + elif runname == "composite": # 'composite' was the first set of runs where the harmonics were # combined manually mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_composite_harms2() elif type(runname) is not str and len(runname) > 1: # Combine the harmonics from a set of runs mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_composite_harms( - runname, loc) + runname, loc + ) else: # Get the harmonics for a specific run - mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = \ - get_netcdf_amp_phase_data(loc + runname) + mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_netcdf_amp_phase_data( + loc + runname + ) return mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha @@ -263,14 +244,13 @@ def plot_amp_phase_maps(runname, loc, grid): :returns: plots the amplitude and phase """ - mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_amp_phase_data( - runname, loc) + mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_amp_phase_data(runname, loc) bathy, X, Y = get_bathy_data(grid) - plot_amp_map(X, Y, grid, mod_M2_amp, 'M2') - plot_pha_map(X, Y, grid, mod_M2_pha, 'M2') - if runname != 'concepts110' and runname != 'jpp72': - plot_amp_map(X, Y, grid, mod_K1_amp, 'K1') - plot_pha_map(X, Y, grid, mod_K1_pha, 'K1') + plot_amp_map(X, Y, grid, mod_M2_amp, "M2") + plot_pha_map(X, Y, grid, mod_M2_pha, "M2") + if runname != "concepts110" and runname != "jpp72": + plot_amp_map(X, Y, grid, mod_K1_amp, "K1") + plot_pha_map(X, Y, grid, mod_K1_pha, "K1") def get_netcdf_amp_phase_data(loc): @@ -283,16 +263,16 @@ def get_netcdf_amp_phase_data(loc): :returns: model M2 amplitude, model K1 amplitude, model M2 phase, model K1 phase """ - harmT = NC.Dataset(loc+'/Tidal_Harmonics_eta.nc', 'r') + harmT = NC.Dataset(loc + "/Tidal_Harmonics_eta.nc", "r") # Get imaginary and real components - mod_M2_eta_real = harmT.variables['M2_eta_real'][0, :, :] - mod_M2_eta_imag = harmT.variables['M2_eta_imag'][0, :, :] - mod_K1_eta_real = harmT.variables['K1_eta_real'][0, :, :] - mod_K1_eta_imag = harmT.variables['K1_eta_imag'][0, :, :] + mod_M2_eta_real = harmT.variables["M2_eta_real"][0, :, :] + mod_M2_eta_imag = harmT.variables["M2_eta_imag"][0, :, :] + mod_K1_eta_real = harmT.variables["K1_eta_real"][0, :, :] + mod_K1_eta_imag = harmT.variables["K1_eta_imag"][0, :, :] # Convert to amplitude and phase - mod_M2_amp = np.sqrt(mod_M2_eta_real**2+mod_M2_eta_imag**2) + mod_M2_amp = np.sqrt(mod_M2_eta_real**2 + mod_M2_eta_imag**2) mod_M2_pha = -np.degrees(np.arctan2(mod_M2_eta_imag, mod_M2_eta_real)) - mod_K1_amp = np.sqrt(mod_K1_eta_real**2+mod_K1_eta_imag**2) + mod_K1_amp = np.sqrt(mod_K1_eta_real**2 + mod_K1_eta_imag**2) mod_K1_pha = -np.degrees(np.arctan2(mod_K1_eta_imag, mod_K1_eta_real)) return mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha @@ -303,12 +283,12 @@ def get_netcdf_amp_phase_data_jpp72(loc): :returns: model M2 amplitude, model M2 phase """ - harmT = NC.Dataset(loc+'/JPP_1d_20020102_20020104_grid_T.nc', 'r') + harmT = NC.Dataset(loc + "/JPP_1d_20020102_20020104_grid_T.nc", "r") # Get amplitude and phase - mod_M2_x_elev = harmT.variables['M2_x_elev'][0, :, :] # Cj - mod_M2_y_elev = harmT.variables['M2_y_elev'][0, :, :] # Sj + mod_M2_x_elev = harmT.variables["M2_x_elev"][0, :, :] # Cj + mod_M2_y_elev = harmT.variables["M2_y_elev"][0, :, :] # Sj # See section 11.6 of NEMO manual (p223/367) - mod_M2_amp = np.sqrt(mod_M2_x_elev**2+mod_M2_y_elev**2) + mod_M2_amp = np.sqrt(mod_M2_x_elev**2 + mod_M2_y_elev**2) mod_M2_pha = -np.degrees(np.arctan2(mod_M2_y_elev, mod_M2_x_elev)) return mod_M2_amp, mod_M2_pha @@ -319,9 +299,9 @@ def get_netcdf_amp_phase_data_concepts110(loc): :returns: model M2 amplitude, model M2 phase """ - harmT = NC.Dataset(loc+'/WC3_Harmonics_gridT_TIDE2D.nc', 'r') - mod_M2_amp = harmT.variables['M2_amp'][0, :, :] - mod_M2_pha = harmT.variables['M2_pha'][0, :, :] + harmT = NC.Dataset(loc + "/WC3_Harmonics_gridT_TIDE2D.nc", "r") + mod_M2_amp = harmT.variables["M2_amp"][0, :, :] + mod_M2_pha = harmT.variables["M2_pha"][0, :, :] return mod_M2_amp, mod_M2_pha @@ -334,9 +314,9 @@ def get_bathy_data(grid): :returns: bathy, X, Y """ - bathy = grid.variables['Bathymetry'][:, :] - X = grid.variables['nav_lon'][:, :] - Y = grid.variables['nav_lat'][:, :] + bathy = grid.variables["Bathymetry"][:, :] + X = grid.variables["nav_lon"][:, :] + Y = grid.variables["nav_lat"][:, :] return bathy, X, Y @@ -353,11 +333,11 @@ def get_SS_bathy_data(): :returns: bathy, X, Y """ grid = NC.Dataset( - '/ocean/klesouef/meopar/nemo-forcing/grid/bathy_meter_SalishSea.nc', - 'r') - bathy = grid.variables['Bathymetry'][:, :] - X = grid.variables['nav_lon'][:, :] - Y = grid.variables['nav_lat'][:, :] + "/ocean/klesouef/meopar/nemo-forcing/grid/bathy_meter_SalishSea.nc", "r" + ) + bathy = grid.variables["Bathymetry"][:, :] + X = grid.variables["nav_lon"][:, :] + Y = grid.variables["nav_lat"][:, :] return bathy, X, Y @@ -374,10 +354,11 @@ def get_SS2_bathy_data(): :returns: bathy, X, Y """ grid = NC.Dataset( - '/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc', 'r') - bathy = grid.variables['Bathymetry'][:, :] - X = grid.variables['nav_lon'][:, :] - Y = grid.variables['nav_lat'][:, :] + "/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc", "r" + ) + bathy = grid.variables["Bathymetry"][:, :] + X = grid.variables["nav_lon"][:, :] + Y = grid.variables["nav_lat"][:, :] return bathy, X, Y @@ -394,16 +375,17 @@ def get_subdomain_bathy_data(): :returns: bathy, X, Y """ grid = NC.Dataset( - '/ocean/klesouef/meopar/nemo-forcing/grid/SubDom_bathy_meter_NOBCchancomp.nc', - 'r') - bathy = grid.variables['Bathymetry'][:, :] - X = grid.variables['nav_lon'][:, :] - Y = grid.variables['nav_lat'][:, :] + "/ocean/klesouef/meopar/nemo-forcing/grid/SubDom_bathy_meter_NOBCchancomp.nc", + "r", + ) + bathy = grid.variables["Bathymetry"][:, :] + X = grid.variables["nav_lon"][:, :] + Y = grid.variables["nav_lat"][:, :] return bathy, X, Y def find_model_level(depth, model_depths, fractional=False): - """ Returns the index of the model level closest to a specified depth. + """Returns the index of the model level closest to a specified depth. The model level can be fractional (ie between two grid points). If depth is between 0 and first model level the result is negative. If depth is greater than the max depth the lowest level is returned. @@ -423,28 +405,32 @@ def find_model_level(depth, model_depths, fractional=False): """ # index for closest value - idx = (np.abs(depth-model_depths)).argmin() + idx = (np.abs(depth - model_depths)).argmin() # If a fractional index is requried... if fractional: - sign = np.sign(depth-model_depths[idx]) - idxpm = idx + sign*1 + sign = np.sign(depth - model_depths[idx]) + idxpm = idx + sign * 1 # Check not to go out of bounds if idxpm < model_depths.shape[0] and idxpm >= 0: - m = (idx-idxpm)/(model_depths[idx] - - model_depths[idxpm])*(depth-model_depths[idx]) + m = ( + (idx - idxpm) + / (model_depths[idx] - model_depths[idxpm]) + * (depth - model_depths[idx]) + ) idx = m + idx # If idxpm < 0 then we are between z=0 and depth of first gridcell if idxpm < 0: # assume z=0 correspons to idx = -model_depths[0] idxpm = -model_depths[0] - m = (idx-idxpm)/(model_depths[idx]-0)*(depth-model_depths[idx]) - idx = m+idx + m = (idx - idxpm) / (model_depths[idx] - 0) * (depth - model_depths[idx]) + idx = m + idx return idx -def find_closest_model_point(lon, lat, X, Y, bathy, lon_tol=0.0052, - lat_tol=0.00189, allow_land=False): +def find_closest_model_point( + lon, lat, X, Y, bathy, lon_tol=0.0052, lat_tol=0.00189, allow_land=False +): """Returns the grid co-ordinates of the closest non-land model point to a specified lon/lat. @@ -454,9 +440,9 @@ def find_closest_model_point(lon, lat, X, Y, bathy, lon_tol=0.0052, Use :py:func:`geo_tools.find_closest_model_point` instead. """ raise DeprecationWarning( - 'tidetools.find_closest_model_point() has been replaced by ' - 'geo_tools.find_closest_model_point()') - + "tidetools.find_closest_model_point() has been replaced by " + "geo_tools.find_closest_model_point()" + ) def plot_amp_map(X, Y, grid, amp, constituent_name, figsize=(9, 9)): @@ -488,22 +474,22 @@ def plot_amp_map(X, Y, grid, amp, constituent_name, figsize=(9, 9)): amp = np.ma.masked_equal(amp, 0) # Range of amplitudes to plot fig, ax = plt.subplots(1, 1, figsize=figsize) - viz_tools.set_aspect(ax, coords='map', lats=Y) + viz_tools.set_aspect(ax, coords="map", lats=Y) # Plot the coastline and amplitude contours - viz_tools.plot_coastline(ax, grid, coords='map') + viz_tools.plot_coastline(ax, grid, coords="map") v2 = np.arange(0, 1.80, 0.10) CS = ax.contourf(X, Y, amp, v2) - CS2 = ax.contour(X, Y, amp, v2, colors='black') + CS2 = ax.contour(X, Y, amp, v2, colors="black") # Add a colour bar cbar = fig.colorbar(CS) cbar.add_lines(CS2) - cbar.set_label('amplitude [m]') + cbar.set_label("amplitude [m]") # Set axes labels and title - ax.set_label('longitude (deg)') - ax.set_label('latitude (deg)') + ax.set_label("longitude (deg)") + ax.set_label("latitude (deg)") ax.set_title( - '{constituent} amplitude (m) for model' - .format(constituent=constituent_name)) + "{constituent} amplitude (m) for model".format(constituent=constituent_name) + ) return fig @@ -532,27 +518,36 @@ def plot_pha_map(X, Y, grid, pha, constituent_name, figsize=(9, 9)): # Make 0 values NaNs so they plot blank pha = np.ma.masked_equal(pha, 0) fig, ax = plt.subplots(1, 1, figsize=figsize) - viz_tools.set_aspect(ax, coords='map', lats=Y) + viz_tools.set_aspect(ax, coords="map", lats=Y) # Plot the coastline and the phase contours - viz_tools.plot_coastline(ax, grid, coords='map') + viz_tools.plot_coastline(ax, grid, coords="map") v2 = np.arange(-180, 202.5, 22.5) - CS = ax.contourf(X, Y, pha, v2, cmap='gist_rainbow') - CS2 = ax.contour(X, Y, pha, v2, colors='black', linestyles='solid') + CS = ax.contourf(X, Y, pha, v2, cmap="gist_rainbow") + CS2 = ax.contour(X, Y, pha, v2, colors="black", linestyles="solid") # Add a colour bar cbar = fig.colorbar(CS) cbar.add_lines(CS2) - cbar.set_label('phase [deg]') + cbar.set_label("phase [deg]") # Set axes labels and title - ax.set_label('longitude (deg)') - ax.set_label('latitude (deg)') + ax.set_label("longitude (deg)") + ax.set_label("latitude (deg)") ax.set_title( - '{constituent} phase (deg) for model' - .format(constituent=constituent_name)) + "{constituent} phase (deg) for model".format(constituent=constituent_name) + ) return fig -def plot_scatter_pha_amp(Am, Ao, gm, go, constituent_name, figsize=(12, 6), - split1=0, split2=0, labels=['', '', '']): +def plot_scatter_pha_amp( + Am, + Ao, + gm, + go, + constituent_name, + figsize=(12, 6), + split1=0, + split2=0, + labels=["", "", ""], +): """Plot scatter plot of observed vs. modelled phase and amplitude :arg Am: Modelled amplitude. @@ -587,53 +582,69 @@ def plot_scatter_pha_amp(Am, Ao, gm, go, constituent_name, figsize=(12, 6), :rtype: Matplotlib figure """ fig, (ax_amp, ax_pha) = plt.subplots(1, 2, figsize=figsize) - ax_amp.set_aspect('equal') + ax_amp.set_aspect("equal") if split1 == 0: - ax_amp.scatter(Ao, Am, color='blue', edgecolors='blue') + ax_amp.scatter(Ao, Am, color="blue", edgecolors="blue") else: - ax_amp.scatter(Ao[:split1], Am[:split1], color='green', - edgecolors = 'green', label=labels[0]) - ax_amp.scatter(Ao[split1:split2], Am[split1:split2], color='blue', - edgecolors = 'blue', label=labels[1]) - ax_amp.scatter(Ao[split2:], Am[split2:], color='black', - edgecolors = 'black', label=labels[2]) + ax_amp.scatter( + Ao[:split1], Am[:split1], color="green", edgecolors="green", label=labels[0] + ) + ax_amp.scatter( + Ao[split1:split2], + Am[split1:split2], + color="blue", + edgecolors="blue", + label=labels[1], + ) + ax_amp.scatter( + Ao[split2:], Am[split2:], color="black", edgecolors="black", label=labels[2] + ) min_value, max_value = ax_amp.set_xlim(0, 1.2) ax_amp.set_ylim(min_value, max_value) - ax_amp.legend(loc='upper left') + ax_amp.legend(loc="upper left") # Equality line - ax_amp.plot([min_value, max_value], [min_value, max_value], color='red') - ax_amp.set_xlabel('Observed Amplitude [m]') - ax_amp.set_ylabel('Modelled Amplitude [m]') - ax_amp.set_title( - '{constituent} Amplitude'.format(constituent=constituent_name)) + ax_amp.plot([min_value, max_value], [min_value, max_value], color="red") + ax_amp.set_xlabel("Observed Amplitude [m]") + ax_amp.set_ylabel("Modelled Amplitude [m]") + ax_amp.set_title("{constituent} Amplitude".format(constituent=constituent_name)) # Phase plot - ax_pha.set_aspect('equal') + ax_pha.set_aspect("equal") if split1 == 0: - ax_pha.scatter(go, gm, color='blue', edgecolors='blue') + ax_pha.scatter(go, gm, color="blue", edgecolors="blue") else: - ax_pha.scatter(go[:split1], gm[:split1], color='green', - edgecolors='green', label=labels[0]) - ax_pha.scatter(go[split1:split2], gm[split1:split2], color='blue', - edgecolors='blue', label=labels[1]) - ax_pha.scatter(go[split2:], gm[split2:], color='black', - edgecolors='black', label=labels[2]) + ax_pha.scatter( + go[:split1], gm[:split1], color="green", edgecolors="green", label=labels[0] + ) + ax_pha.scatter( + go[split1:split2], + gm[split1:split2], + color="blue", + edgecolors="blue", + label=labels[1], + ) + ax_pha.scatter( + go[split2:], gm[split2:], color="black", edgecolors="black", label=labels[2] + ) min_value, max_value = ax_pha.set_xlim(0, 360) ax_pha.set_ylim(min_value, max_value) - ax_pha.legend(loc='upper left') + ax_pha.legend(loc="upper left") # Equality line - ax_pha.plot([min_value, max_value], [min_value, max_value], color='red') + ax_pha.plot([min_value, max_value], [min_value, max_value], color="red") ticks = range(0, 420, 60) ax_pha.set_xticks(ticks) ax_pha.set_yticks(ticks) - ax_pha.set_xlabel('Observed Phase [deg]') - ax_pha.set_ylabel('Modelled Phase [deg]') - ax_pha.set_title( - '{constituent} Phase'.format(constituent=constituent_name)) + ax_pha.set_xlabel("Observed Phase [deg]") + ax_pha.set_ylabel("Modelled Phase [deg]") + ax_pha.set_title("{constituent} Phase".format(constituent=constituent_name)) return fig def plot_diffs_on_domain( - diffs, meas_wl_harm, calc_method, constituent_name, grid, + diffs, + meas_wl_harm, + calc_method, + constituent_name, + grid, scale_fac=100, legend_scale=0.1, figsize=(9, 9), @@ -672,30 +683,38 @@ def plot_diffs_on_domain( # Plot the bathy underneath bathy, X, Y = get_bathy_data(grid) fig, ax = plt.subplots(1, 1, figsize=figsize) - mesh = ax.contourf(X, Y, bathy, cmap='spring') + mesh = ax.contourf(X, Y, bathy, cmap="spring") cbar = fig.colorbar(mesh) - cbar.set_label('depth [m]') + cbar.set_label("depth [m]") # Plot the differences as dots of varying radii # Multiply the differences by something big to see the results # on a map (D is in [m]) ax.scatter( - -meas_wl_harm.Lon, meas_wl_harm.Lat, - s=np.array(diffs) * scale_fac, marker='o', - c='blue', edgecolors='blue') + -meas_wl_harm.Lon, + meas_wl_harm.Lat, + s=np.array(diffs) * scale_fac, + marker="o", + c="blue", + edgecolors="blue", + ) # Legend and labels - ax.text( - -124.4, 47.875, 'Diff = {}cm'.format(legend_scale * scale_fac)) + ax.text(-124.4, 47.875, "Diff = {}cm".format(legend_scale * scale_fac)) ax.scatter( - -124.5, 47.9, - s=legend_scale * scale_fac, marker='o', - c='blue', edgecolors='blue') - ax.set_xlabel('Longitude [deg E]') - ax.set_ylabel('Latitude [deg N]') - ref = ( - 'Foreman et al' if calc_method == 'F95' else 'Masson & Cummins') + -124.5, + 47.9, + s=legend_scale * scale_fac, + marker="o", + c="blue", + edgecolors="blue", + ) + ax.set_xlabel("Longitude [deg E]") + ax.set_ylabel("Latitude [deg N]") + ref = "Foreman et al" if calc_method == "F95" else "Masson & Cummins" ax.set_title( - '{constituent} Differences ({ref})' - .format(constituent=constituent_name, ref=ref)) + "{constituent} Differences ({ref})".format( + constituent=constituent_name, ref=ref + ) + ) return fig @@ -711,16 +730,17 @@ def calc_diffs_meas_mod(runname, loc, grid): go_K1_all, D_F95_K1_all, D_M04_K1_all """ # Read in the measured data from Foreman et al (1995) and US sites - meas_wl_harm = pd.read_csv('obs_tidal_wlev_const_all.csv', sep=';') + meas_wl_harm = pd.read_csv("obs_tidal_wlev_const_all.csv", sep=";") meas_wl_harm = meas_wl_harm.rename( columns={ - 'M2 amp': 'M2_amp', - 'M2 phase (deg UT)': 'M2_pha', - 'K1 amp': 'K1_amp', - 'K1 phase (deg UT)': 'K1_pha', - }) + "M2 amp": "M2_amp", + "M2 phase (deg UT)": "M2_pha", + "K1 amp": "K1_amp", + "K1 phase (deg UT)": "K1_pha", + } + ) # Make an appropriately named csv file for results - outfile = 'wlev_harm_diffs_'+''.join(runname)+'.csv' + outfile = "wlev_harm_diffs_" + "".join(runname) + ".csv" D_F95_M2_all = [] D_M04_M2_all = [] Am_M2_all = [] @@ -735,61 +755,88 @@ def calc_diffs_meas_mod(runname, loc, grid): gm_K1_all = [] go_K1_all = [] # Get harmonics data - mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_amp_phase_data( - runname, loc) + mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha = get_amp_phase_data(runname, loc) # Get bathy data bathy, X, Y = get_bathy_data(grid) - with open(outfile, 'wb') as csvfile: - writer = csv.writer(csvfile, delimiter=',') - writer.writerow([ - 'Station Number', 'Station Name', 'Longitude', 'Latitude', - 'Modelled M2 amp', 'Observed M2 amp', - 'Modelled M2 phase', 'Observed M2 phase', - 'M2 Difference Foreman', 'M2 Difference Masson', - 'Modelled K1 amp', 'Observed K1 amp', - 'Modelled K1 phase', 'Observed K1 phase', - 'K1 Difference Foreman', 'K1 Difference Masson', - ]) + with open(outfile, "wb") as csvfile: + writer = csv.writer(csvfile, delimiter=",") + writer.writerow( + [ + "Station Number", + "Station Name", + "Longitude", + "Latitude", + "Modelled M2 amp", + "Observed M2 amp", + "Modelled M2 phase", + "Observed M2 phase", + "M2 Difference Foreman", + "M2 Difference Masson", + "Modelled K1 amp", + "Observed K1 amp", + "Modelled K1 phase", + "Observed K1 phase", + "K1 Difference Foreman", + "K1 Difference Masson", + ] + ) for t in np.arange(0, len(meas_wl_harm.Lat)): x1, y1 = geo_tools.find_closest_model_point( - -meas_wl_harm.Lon[t], meas_wl_harm.Lat[t], - X, Y, land_mask=bathy.mask) + -meas_wl_harm.Lon[t], meas_wl_harm.Lat[t], X, Y, land_mask=bathy.mask + ) if x1: # Observed constituents - Ao_M2 = meas_wl_harm.M2_amp[t]/100 # [m] - go_M2 = meas_wl_harm.M2_pha[t] # [degrees UTC] - Ao_K1 = meas_wl_harm.K1_amp[t]/100 # [m] - go_K1 = meas_wl_harm.K1_pha[t] # [degrees UTC] + Ao_M2 = meas_wl_harm.M2_amp[t] / 100 # [m] + go_M2 = meas_wl_harm.M2_pha[t] # [degrees UTC] + Ao_K1 = meas_wl_harm.K1_amp[t] / 100 # [m] + go_K1 = meas_wl_harm.K1_pha[t] # [degrees UTC] # Modelled constituents Am_M2 = mod_M2_amp[x1, y1] # [m] - gm_M2 = angles.normalize( - mod_M2_pha[x1, y1], 0, 360) # [degrees ????] + gm_M2 = angles.normalize(mod_M2_pha[x1, y1], 0, 360) # [degrees ????] Am_K1 = mod_K1_amp[x1, y1] # [m] - gm_K1 = angles.normalize( - mod_K1_pha[x1, y1], 0, 360) # [degrees ????] + gm_K1 = angles.normalize(mod_K1_pha[x1, y1], 0, 360) # [degrees ????] # Calculate differences two ways D_F95_M2 = sqrt( - (Ao_M2*np.cos(radians(go_M2)) - - Am_M2*np.cos(radians(gm_M2)))**2 - + (Ao_M2*np.sin(radians(go_M2)) - - Am_M2*np.sin(radians(gm_M2)))**2) + (Ao_M2 * np.cos(radians(go_M2)) - Am_M2 * np.cos(radians(gm_M2))) + ** 2 + + (Ao_M2 * np.sin(radians(go_M2)) - Am_M2 * np.sin(radians(gm_M2))) + ** 2 + ) D_M04_M2 = sqrt( 0.5 * (Am_M2**2 + Ao_M2**2) - - Am_M2*Ao_M2*cos(radians(gm_M2-go_M2))) + - Am_M2 * Ao_M2 * cos(radians(gm_M2 - go_M2)) + ) D_F95_K1 = sqrt( - (Ao_K1*np.cos(radians(go_K1)) - - Am_K1*np.cos(radians(gm_K1)))**2 - + (Ao_K1*np.sin(radians(go_K1)) - - Am_K1*np.sin(radians(gm_K1)))**2) + (Ao_K1 * np.cos(radians(go_K1)) - Am_K1 * np.cos(radians(gm_K1))) + ** 2 + + (Ao_K1 * np.sin(radians(go_K1)) - Am_K1 * np.sin(radians(gm_K1))) + ** 2 + ) D_M04_K1 = sqrt( 0.5 * (Am_K1**2 + Ao_K1**2) - - Am_K1*Ao_K1*cos(radians(gm_K1-go_K1))) + - Am_K1 * Ao_K1 * cos(radians(gm_K1 - go_K1)) + ) # Write results to csv - writer.writerow([ - str(t+1), meas_wl_harm.Site[t], - -meas_wl_harm.Lon[t], meas_wl_harm.Lat[t], - Am_M2, Ao_M2, gm_M2, go_M2, D_F95_M2, D_M04_M2, - Am_K1, Ao_K1, gm_K1, go_K1, D_F95_K1, D_M04_K1]) + writer.writerow( + [ + str(t + 1), + meas_wl_harm.Site[t], + -meas_wl_harm.Lon[t], + meas_wl_harm.Lat[t], + Am_M2, + Ao_M2, + gm_M2, + go_M2, + D_F95_M2, + D_M04_M2, + Am_K1, + Ao_K1, + gm_K1, + go_K1, + D_F95_K1, + D_M04_K1, + ] + ) # Append the latest result Am_M2_all.append(float(Am_M2)) Ao_M2_all.append(float(Ao_M2)) @@ -806,16 +853,32 @@ def calc_diffs_meas_mod(runname, loc, grid): else: # If no point found, fill difference fields with 9999 print( - 'No point found in current domain for station ' - + str(t+1)+' :(') - writer.writerow([ - str(t+1), meas_wl_harm.Site[t], - -meas_wl_harm.Lon[t], meas_wl_harm.Lat[t], - 9999, 9999]) + "No point found in current domain for station " + str(t + 1) + " :(" + ) + writer.writerow( + [ + str(t + 1), + meas_wl_harm.Site[t], + -meas_wl_harm.Lon[t], + meas_wl_harm.Lat[t], + 9999, + 9999, + ] + ) return ( - meas_wl_harm, Am_M2_all, Ao_M2_all, gm_M2_all, go_M2_all, - D_F95_M2_all, D_M04_M2_all, Am_K1_all, Ao_K1_all, - gm_K1_all, go_K1_all, D_F95_K1_all, D_M04_K1_all, + meas_wl_harm, + Am_M2_all, + Ao_M2_all, + gm_M2_all, + go_M2_all, + D_F95_M2_all, + D_M04_M2_all, + Am_K1_all, + Ao_K1_all, + gm_K1_all, + go_K1_all, + D_F95_K1_all, + D_M04_K1_all, ) @@ -829,7 +892,8 @@ def haversine(lon1, lat1, lon2, lat2): Use :py:func:`geo_tools.haversine` instead. """ raise DeprecationWarning( - 'tidetools.haversine() has been replaced by geo_tools.haversine()') + "tidetools.haversine() has been replaced by geo_tools.haversine()" + ) def plot_meas_mod_locations(measlon, measlat, modlon, modlat, X, Y, bathy): @@ -862,11 +926,11 @@ def plot_meas_mod_locations(measlon, measlat, modlon, modlat, X, Y, bathy): """ plt.contourf(X, Y, bathy) plt.colorbar() - plt.title('Domain of model (depths in m)') - plt.plot(modlon, modlat, 'g.', markersize=10, label='model') - plt.plot(measlon, measlat, 'm.', markersize=10, label='measured') - plt.xlim([modlon-0.1, modlon+0.1]) - plt.ylim([modlat-0.1, modlat+0.1]) + plt.title("Domain of model (depths in m)") + plt.plot(modlon, modlat, "g.", markersize=10, label="model") + plt.plot(measlon, measlat, "m.", markersize=10, label="measured") + plt.xlim([modlon - 0.1, modlon + 0.1]) + plt.ylim([modlat - 0.1, modlat + 0.1]) plt.legend(numpoints=1) @@ -900,25 +964,37 @@ def plot_wlev_const_transect(savename, statnums, runname, loc, grid, *args): # runname1, loc1, runname2, loc2 fig1 = plt.figure(figsize=(15, 5)) ax1 = fig1.add_subplot(111) - ax1.set_xlabel('Station number [-]') - ax1.set_ylabel('M2 amplitude [m]') + ax1.set_xlabel("Station number [-]") + ax1.set_ylabel("M2 amplitude [m]") fig2 = plt.figure(figsize=(15, 5)) ax2 = fig2.add_subplot(111) - ax2.set_xlabel('Station number [-]') - ax2.set_ylabel('K1 amplitude [m]') + ax2.set_xlabel("Station number [-]") + ax2.set_ylabel("K1 amplitude [m]") fig3 = plt.figure(figsize=(15, 5)) ax3 = fig3.add_subplot(111) - ax3.set_xlabel('Station number[-]') - ax3.set_ylabel('M2 phase [degrees]') + ax3.set_xlabel("Station number[-]") + ax3.set_ylabel("M2 phase [degrees]") fig4 = plt.figure(figsize=(15, 5)) ax4 = fig4.add_subplot(111) - ax4.set_xlabel('Station number[-]') - ax4.set_ylabel('K1 phase [degrees]') + ax4.set_xlabel("Station number[-]") + ax4.set_ylabel("K1 phase [degrees]") # Get the modelled data - (meas_wl_harm, Am_M2_all, Ao_M2_all, gm_M2_all, go_M2_all, D_F95_M2_all, - D_M04_M2_all, Am_K1_all, Ao_K1_all, gm_K1_all, go_K1_all, D_F95_K1_all, - D_M04_K1_all) = calc_diffs_meas_mod(runname, loc, grid) + ( + meas_wl_harm, + Am_M2_all, + Ao_M2_all, + gm_M2_all, + go_M2_all, + D_F95_M2_all, + D_M04_M2_all, + Am_K1_all, + Ao_K1_all, + gm_K1_all, + go_K1_all, + D_F95_K1_all, + D_M04_K1_all, + ) = calc_diffs_meas_mod(runname, loc, grid) Am_M2_all = np.array(Am_M2_all) Ao_M2_all = np.array(Ao_M2_all) gm_M2_all = np.array(gm_M2_all) @@ -934,23 +1010,34 @@ def plot_wlev_const_transect(savename, statnums, runname, loc, grid, *args): some_model_phas_K1 = np.array([gm_K1_all[statnums]]) x = np.array(range(0, len(statnums))) # Plot the M2 model data - ax1.plot(x, some_model_amps_M2[0, :], 'b-o', label='single model') + ax1.plot(x, some_model_amps_M2[0, :], "b-o", label="single model") # Plot the K1 model data - ax2.plot(x, some_model_amps_K1[0, :], 'b--o', label='single model') - ax3.plot(x, some_model_phas_M2[0, :], 'b-o', label='single model') - ax4.plot(x, some_model_phas_K1[0, :], 'b--o', label='single model') + ax2.plot(x, some_model_amps_K1[0, :], "b--o", label="single model") + ax3.plot(x, some_model_phas_M2[0, :], "b-o", label="single model") + ax4.plot(x, some_model_phas_K1[0, :], "b--o", label="single model") if len(args) > 0: # Assuming we will only be adding an additional 3 lines, # define 3 colours - colours = ['g', 'm', 'k', 'r', 'y'] - for r in range(0, int(len(args)/2)): - runname = args[2*r] - loc = args[2*r+1] - (meas_wl_harm, Am_M2_all, Ao_M2_all, gm_M2_all, go_M2_all, - D_F95_M2_all, D_M04_M2_all, Am_K1_all, Ao_K1_all, gm_K1_all, - go_K1_all, D_F95_K1_all, D_M04_K1_all) = calc_diffs_meas_mod( - runname, loc, grid) + colours = ["g", "m", "k", "r", "y"] + for r in range(0, int(len(args) / 2)): + runname = args[2 * r] + loc = args[2 * r + 1] + ( + meas_wl_harm, + Am_M2_all, + Ao_M2_all, + gm_M2_all, + go_M2_all, + D_F95_M2_all, + D_M04_M2_all, + Am_K1_all, + Ao_K1_all, + gm_K1_all, + go_K1_all, + D_F95_K1_all, + D_M04_K1_all, + ) = calc_diffs_meas_mod(runname, loc, grid) Am_M2_all = np.array(Am_M2_all) Ao_M2_all = np.array(Ao_M2_all) gm_M2_all = np.array(gm_M2_all) @@ -964,60 +1051,58 @@ def plot_wlev_const_transect(savename, statnums, runname, loc, grid, *args): some_model_phas_M2 = np.array([gm_M2_all[statnums]]) some_model_phas_K1 = np.array([gm_K1_all[statnums]]) x = np.array(range(0, len(statnums))) - ax1.plot( - x, some_model_amps_M2[0, :], - '-o', color=colours[r], label='model') + ax1.plot(x, some_model_amps_M2[0, :], "-o", color=colours[r], label="model") ax2.plot( - x, some_model_amps_K1[0, :], - '--o', color=colours[r], label='model') - ax3.plot( - x, some_model_phas_M2[0, :], - '-o', color=colours[r], label='model') + x, some_model_amps_K1[0, :], "--o", color=colours[r], label="model" + ) + ax3.plot(x, some_model_phas_M2[0, :], "-o", color=colours[r], label="model") ax4.plot( - x, some_model_phas_K1[0, :], - '--o', color=colours[r], label='model') + x, some_model_phas_K1[0, :], "--o", color=colours[r], label="model" + ) some_meas_amps_M2 = np.array([Ao_M2_all[statnums]]) some_meas_amps_K1 = np.array([Ao_K1_all[statnums]]) some_meas_phas_M2 = np.array([go_M2_all[statnums]]) some_meas_phas_K1 = np.array([go_K1_all[statnums]]) # M2 - ax1.plot(x, some_meas_amps_M2[0, :], 'r-o', label='measured') + ax1.plot(x, some_meas_amps_M2[0, :], "r-o", label="measured") ax1.set_xticks(x) - ax1.set_xticklabels(statnums+1) - ax1.legend(loc='lower right') - ax1.set_title('Line through stations '+str(statnums)) + ax1.set_xticklabels(statnums + 1) + ax1.legend(loc="lower right") + ax1.set_title("Line through stations " + str(statnums)) fig1.savefig( - 'meas_mod_wlev_transect_M2_'+''.join(runname)+'_'+savename+'.pdf') + "meas_mod_wlev_transect_M2_" + "".join(runname) + "_" + savename + ".pdf" + ) # K1 - ax2.plot(x, some_meas_amps_K1[0, :], 'r--o', label='measured') + ax2.plot(x, some_meas_amps_K1[0, :], "r--o", label="measured") ax2.set_xticks(x) - ax2.set_xticklabels(statnums+1) - ax2.legend(loc='lower right') - ax2.set_title('Line through stations '+str(statnums)) + ax2.set_xticklabels(statnums + 1) + ax2.legend(loc="lower right") + ax2.set_title("Line through stations " + str(statnums)) fig2.savefig( - 'meas_mod_wlev_transect_K1_'+''.join(runname)+'_'+savename+'.pdf') + "meas_mod_wlev_transect_K1_" + "".join(runname) + "_" + savename + ".pdf" + ) # M2 - ax3.plot(x, some_meas_phas_M2[0, :], 'r-o', label='measured') + ax3.plot(x, some_meas_phas_M2[0, :], "r-o", label="measured") ax3.set_xticks(x) - ax3.set_xticklabels(statnums+1) - ax3.legend(loc='lower right') - ax3.set_title('Line through stations '+str(statnums)) + ax3.set_xticklabels(statnums + 1) + ax3.legend(loc="lower right") + ax3.set_title("Line through stations " + str(statnums)) fig3.savefig( - 'meas_mod_wlev_transect_M2_phas'+''.join(runname)+'_'+savename+'.pdf') + "meas_mod_wlev_transect_M2_phas" + "".join(runname) + "_" + savename + ".pdf" + ) # K1 - ax4.plot(x, some_meas_phas_K1[0, :], 'r--o', label='measured') + ax4.plot(x, some_meas_phas_K1[0, :], "r--o", label="measured") ax4.set_xticks(x) - ax4.set_xticklabels(statnums+1) - ax4.legend(loc='lower right') - ax4.set_title('Line through stations '+str(statnums)) + ax4.set_xticklabels(statnums + 1) + ax4.legend(loc="lower right") + ax4.set_title("Line through stations " + str(statnums)) fig2.savefig( - 'meas_mod_wlev_transect_K1_phas'+''.join(runname)+'_'+savename+'.pdf') + "meas_mod_wlev_transect_K1_phas" + "".join(runname) + "_" + savename + ".pdf" + ) def plot_wlev_transect_map( - grid, stn_nums, - stn_file='obs_tidal_wlev_const_all.csv', - figsize=(9, 9) + grid, stn_nums, stn_file="obs_tidal_wlev_const_all.csv", figsize=(9, 9) ): """Plot a map of the coastline and the transect of water level stations, which are plotted in :py:func:`plot_wlev_M2_const_transect`. @@ -1042,14 +1127,14 @@ def plot_wlev_transect_map( """ fig, ax = plt.subplots(1, 1, figsize=figsize) # Add a coastline - viz_tools.plot_coastline(ax, grid, coords='map') + viz_tools.plot_coastline(ax, grid, coords="map") # Get the measured data - meas_wl_harm = pd.read_csv(stn_file, sep=';') + meas_wl_harm = pd.read_csv(stn_file, sep=";") sitelats = np.array(meas_wl_harm.Lat[stn_nums]) sitelons = np.array(-meas_wl_harm.Lon[stn_nums]) # Plot the transect line - ax.plot(sitelons, sitelats, 'm-o') - ax.set_title('Location of Select Stations') + ax.plot(sitelons, sitelats, "m-o") + ax.set_title("Location of Select Stations") return fig @@ -1066,7 +1151,7 @@ def plot_coastline(grid): :returns: coastline map """ - viz_tools.plot_coastline(plt.gc(), grid, coords='map', color='black') + viz_tools.plot_coastline(plt.gc(), grid, coords="map", color="black") def get_composite_harms2(): @@ -1084,8 +1169,12 @@ def get_composite_harms2(): """ runnames = [ - '50s_15-21Sep', '50s_22-25Sep', '50s_26-29Sep', '50s_30Sep-6Oct', - '50s_7-13Oct'] + "50s_15-21Sep", + "50s_22-25Sep", + "50s_26-29Sep", + "50s_30Sep-6Oct", + "50s_7-13Oct", + ] runlength = np.array([7.0, 4.0, 4.0, 7.0, 7.0]) mod_M2_eta_real1 = 0.0 @@ -1095,25 +1184,24 @@ def get_composite_harms2(): for runnum in range(0, len(runnames)): harmT = NC.Dataset( - '/data/dlatorne/MEOPAR/SalishSea/results/' - + runnames[runnum]+'/Tidal_Harmonics_eta.nc', 'r') + "/data/dlatorne/MEOPAR/SalishSea/results/" + + runnames[runnum] + + "/Tidal_Harmonics_eta.nc", + "r", + ) # Get imaginary and real components - mod_M2_eta_real1 += ( - harmT.variables['M2_eta_real'][0, :, :]*runlength[runnum]) - mod_M2_eta_imag1 += ( - harmT.variables['M2_eta_imag'][0, :, :]*runlength[runnum]) - mod_K1_eta_real1 += ( - harmT.variables['K1_eta_real'][0, :, :]*runlength[runnum]) - mod_K1_eta_imag1 += ( - harmT.variables['K1_eta_imag'][0, :, :]*runlength[runnum]) + mod_M2_eta_real1 += harmT.variables["M2_eta_real"][0, :, :] * runlength[runnum] + mod_M2_eta_imag1 += harmT.variables["M2_eta_imag"][0, :, :] * runlength[runnum] + mod_K1_eta_real1 += harmT.variables["K1_eta_real"][0, :, :] * runlength[runnum] + mod_K1_eta_imag1 += harmT.variables["K1_eta_imag"][0, :, :] * runlength[runnum] totaldays = sum(runlength) - mod_M2_eta_real = mod_M2_eta_real1/totaldays - mod_M2_eta_imag = mod_M2_eta_imag1/totaldays - mod_K1_eta_real = mod_K1_eta_real1/totaldays - mod_K1_eta_imag = mod_K1_eta_imag1/totaldays - mod_M2_amp = np.sqrt(mod_M2_eta_real**2+mod_M2_eta_imag**2) + mod_M2_eta_real = mod_M2_eta_real1 / totaldays + mod_M2_eta_imag = mod_M2_eta_imag1 / totaldays + mod_K1_eta_real = mod_K1_eta_real1 / totaldays + mod_K1_eta_imag = mod_K1_eta_imag1 / totaldays + mod_M2_amp = np.sqrt(mod_M2_eta_real**2 + mod_M2_eta_imag**2) mod_M2_pha = -np.degrees(np.arctan2(mod_M2_eta_imag, mod_M2_eta_real)) - mod_K1_amp = np.sqrt(mod_K1_eta_real**2+mod_K1_eta_imag**2) + mod_K1_amp = np.sqrt(mod_K1_eta_real**2 + mod_K1_eta_imag**2) mod_K1_pha = -np.degrees(np.arctan2(mod_K1_eta_imag, mod_K1_eta_real)) return mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha @@ -1135,30 +1223,23 @@ def get_composite_harms(runnames, loc): :rtypes: 4-tuple of numpy.ndarray instances """ results = {} - vars = 'M2_eta_real M2_eta_imag K1_eta_real K1_eta_imag'.split() - runlengths = { - runname: get_run_length(runname, loc) for runname in runnames} + vars = "M2_eta_real M2_eta_imag K1_eta_real K1_eta_imag".split() + runlengths = {runname: get_run_length(runname, loc) for runname in runnames} for k, runname in enumerate(runnames): - filename = os.path.join(loc, runnames[k], 'Tidal_Harmonics_eta.nc') + filename = os.path.join(loc, runnames[k], "Tidal_Harmonics_eta.nc") harmT = NC.Dataset(filename) for var in vars: try: - results[var] += ( - harmT.variables[var][0, ...] * runlengths[runname]) + results[var] += harmT.variables[var][0, ...] * runlengths[runname] except KeyError: - results[var] = ( - harmT.variables[var][0, ...] * runlengths[runname]) + results[var] = harmT.variables[var][0, ...] * runlengths[runname] totaldays = sum(runlengths.itervalues()) for var in vars: results[var] /= totaldays - mod_M2_amp = np.sqrt( - results['M2_eta_real']**2 + results['M2_eta_imag']**2) - mod_M2_pha = -np.degrees( - np.arctan2(results['M2_eta_imag'], results['M2_eta_real'])) - mod_K1_amp = np.sqrt( - results['K1_eta_real']**2 + results['K1_eta_imag']**2) - mod_K1_pha = -np.degrees( - np.arctan2(results['K1_eta_imag'], results['K1_eta_real'])) + mod_M2_amp = np.sqrt(results["M2_eta_real"] ** 2 + results["M2_eta_imag"] ** 2) + mod_M2_pha = -np.degrees(np.arctan2(results["M2_eta_imag"], results["M2_eta_real"])) + mod_K1_amp = np.sqrt(results["K1_eta_real"] ** 2 + results["K1_eta_imag"] ** 2) + mod_K1_pha = -np.degrees(np.arctan2(results["K1_eta_imag"], results["K1_eta_real"])) return mod_M2_amp, mod_K1_amp, mod_M2_pha, mod_K1_pha @@ -1195,51 +1276,49 @@ def get_composite_harms_uv(runname, loc): mod_K1_v_imag1 = 0.0 for runnum in range(0, len(runname)): - harmU = NC.Dataset(loc+runname[runnum]+'/Tidal_Harmonics_U.nc', 'r') + harmU = NC.Dataset(loc + runname[runnum] + "/Tidal_Harmonics_U.nc", "r") # Get imaginary and real components - mod_M2_u_real1 += ( - harmU.variables['M2_u_real'][0, :, :]*runlength[runnum]) - mod_M2_u_imag1 += ( - harmU.variables['M2_u_imag'][0, :, :]*runlength[runnum]) - mod_K1_u_real1 += ( - harmU.variables['K1_u_real'][0, :, :]*runlength[runnum]) - mod_K1_u_imag1 += ( - harmU.variables['K1_u_imag'][0, :, :]*runlength[runnum]) + mod_M2_u_real1 += harmU.variables["M2_u_real"][0, :, :] * runlength[runnum] + mod_M2_u_imag1 += harmU.variables["M2_u_imag"][0, :, :] * runlength[runnum] + mod_K1_u_real1 += harmU.variables["K1_u_real"][0, :, :] * runlength[runnum] + mod_K1_u_imag1 += harmU.variables["K1_u_imag"][0, :, :] * runlength[runnum] for runnum in range(0, len(runname)): - harmV = NC.Dataset(loc+runname[runnum]+'/Tidal_Harmonics_V.nc', 'r') + harmV = NC.Dataset(loc + runname[runnum] + "/Tidal_Harmonics_V.nc", "r") # Get imaginary and real components - mod_M2_v_real1 += ( - harmV.variables['M2_v_real'][0, :, :]*runlength[runnum]) - mod_M2_v_imag1 += ( - harmV.variables['M2_v_imag'][0, :, :]*runlength[runnum]) - mod_K1_v_real1 += ( - harmV.variables['K1_v_real'][0, :, :]*runlength[runnum]) - mod_K1_v_imag1 += ( - harmV.variables['K1_v_imag'][0, :, :]*runlength[runnum]) + mod_M2_v_real1 += harmV.variables["M2_v_real"][0, :, :] * runlength[runnum] + mod_M2_v_imag1 += harmV.variables["M2_v_imag"][0, :, :] * runlength[runnum] + mod_K1_v_real1 += harmV.variables["K1_v_real"][0, :, :] * runlength[runnum] + mod_K1_v_imag1 += harmV.variables["K1_v_imag"][0, :, :] * runlength[runnum] totaldays = sum(runlength) - mod_M2_u_real = mod_M2_u_real1/totaldays - mod_M2_u_imag = mod_M2_u_imag1/totaldays - mod_K1_u_real = mod_K1_u_real1/totaldays - mod_K1_u_imag = mod_K1_u_imag1/totaldays - mod_M2_v_real = mod_M2_v_real1/totaldays - mod_M2_v_imag = mod_M2_v_imag1/totaldays - mod_K1_v_real = mod_K1_v_real1/totaldays - mod_K1_v_imag = mod_K1_v_imag1/totaldays - - mod_M2_u_amp = np.sqrt(mod_M2_u_real**2+mod_M2_u_imag**2) + mod_M2_u_real = mod_M2_u_real1 / totaldays + mod_M2_u_imag = mod_M2_u_imag1 / totaldays + mod_K1_u_real = mod_K1_u_real1 / totaldays + mod_K1_u_imag = mod_K1_u_imag1 / totaldays + mod_M2_v_real = mod_M2_v_real1 / totaldays + mod_M2_v_imag = mod_M2_v_imag1 / totaldays + mod_K1_v_real = mod_K1_v_real1 / totaldays + mod_K1_v_imag = mod_K1_v_imag1 / totaldays + + mod_M2_u_amp = np.sqrt(mod_M2_u_real**2 + mod_M2_u_imag**2) mod_M2_u_pha = -np.degrees(np.arctan2(mod_M2_u_imag, mod_M2_u_real)) - mod_K1_u_amp = np.sqrt(mod_K1_u_real**2+mod_K1_u_imag**2) + mod_K1_u_amp = np.sqrt(mod_K1_u_real**2 + mod_K1_u_imag**2) mod_K1_u_pha = -np.degrees(np.arctan2(mod_K1_u_imag, mod_K1_u_real)) - mod_M2_v_amp = np.sqrt(mod_M2_v_real**2+mod_M2_v_imag**2) + mod_M2_v_amp = np.sqrt(mod_M2_v_real**2 + mod_M2_v_imag**2) mod_M2_v_pha = -np.degrees(np.arctan2(mod_M2_v_imag, mod_M2_v_real)) - mod_K1_v_amp = np.sqrt(mod_K1_v_real**2+mod_K1_v_imag**2) + mod_K1_v_amp = np.sqrt(mod_K1_v_real**2 + mod_K1_v_imag**2) mod_K1_v_pha = -np.degrees(np.arctan2(mod_K1_v_imag, mod_K1_v_real)) return ( - mod_M2_u_amp, mod_M2_u_pha, mod_M2_v_amp, mod_M2_v_pha, mod_K1_u_amp, - mod_K1_u_pha, mod_K1_v_amp, mod_K1_v_pha, + mod_M2_u_amp, + mod_M2_u_pha, + mod_M2_v_amp, + mod_M2_v_pha, + mod_K1_u_amp, + mod_K1_u_pha, + mod_K1_v_amp, + mod_K1_v_pha, ) @@ -1258,18 +1337,18 @@ def get_current_harms(runname, loc): :returns: mod_M2_u_amp, mod_M2_u_pha, mod_M2_v_amp, mod_M2_v_pha """ # u - harmu = NC.Dataset(loc+runname+'/Tidal_Harmonics_U.nc', 'r') - mod_M2_u_real = harmu.variables['M2_u_real'][0, :, :] - mod_M2_u_imag = harmu.variables['M2_u_imag'][0, :, :] + harmu = NC.Dataset(loc + runname + "/Tidal_Harmonics_U.nc", "r") + mod_M2_u_real = harmu.variables["M2_u_real"][0, :, :] + mod_M2_u_imag = harmu.variables["M2_u_imag"][0, :, :] # Convert to amplitude and phase - mod_M2_u_amp = np.sqrt(mod_M2_u_real**2+mod_M2_u_imag**2) + mod_M2_u_amp = np.sqrt(mod_M2_u_real**2 + mod_M2_u_imag**2) mod_M2_u_pha = -np.degrees(np.arctan2(mod_M2_u_imag, mod_M2_u_real)) # v - harmv = NC.Dataset(loc+runname+'/Tidal_Harmonics_V.nc', 'r') - mod_M2_v_real = harmv.variables['M2_v_real'][0, :, :] - mod_M2_v_imag = harmv.variables['M2_v_imag'][0, :, :] + harmv = NC.Dataset(loc + runname + "/Tidal_Harmonics_V.nc", "r") + mod_M2_v_real = harmv.variables["M2_v_real"][0, :, :] + mod_M2_v_imag = harmv.variables["M2_v_imag"][0, :, :] # Convert to amplitude and phase - mod_M2_v_amp = np.sqrt(mod_M2_v_real**2+mod_M2_v_imag**2) + mod_M2_v_amp = np.sqrt(mod_M2_v_real**2 + mod_M2_v_imag**2) mod_M2_v_pha = -np.degrees(np.arctan2(mod_M2_v_imag, mod_M2_v_real)) return mod_M2_u_amp, mod_M2_u_pha, mod_M2_v_amp, mod_M2_v_pha @@ -1286,11 +1365,11 @@ def get_run_length(runname, loc): :returns: length of run in days """ - resfile = os.path.join(loc, runname, 'namelist') + resfile = os.path.join(loc, runname, "namelist") nl = namelist.namelist2dict(resfile) - timestep = nl['namdom'][0]['rn_rdt'] - start_time = nl['nam_diaharm'][0]['nit000_han'] - end_time = nl['nam_diaharm'][0]['nitend_han'] + timestep = nl["namdom"][0]["rn_rdt"] + start_time = nl["nam_diaharm"][0]["nit000_han"] + end_time = nl["nam_diaharm"][0]["nitend_han"] run_length = (end_time - start_time + 1) * timestep / 60 / 60 / 24 # days return run_length @@ -1356,17 +1435,17 @@ def ap2ep(Au, PHIu, Av, PHIv): # Version 2, May 2002 # Assume the input phase lags are in degrees and convert them in radians. - PHIu = PHIu/180*pi - PHIv = PHIv/180*pi + PHIu = PHIu / 180 * pi + PHIv = PHIv / 180 * pi # Make complex amplitudes for u and v i = cmath.sqrt(-1) - u = Au*cmath.exp(-i*PHIu) - v = Av*cmath.exp(-i*PHIv) + u = Au * cmath.exp(-i * PHIu) + v = Av * cmath.exp(-i * PHIv) # Calculate complex radius of anticlockwise and clockwise circles: - wp = (u+i*v)/2 # for anticlockwise circles - wm = ((u-i*v)/2).conjugate() # for clockwise circles + wp = (u + i * v) / 2 # for anticlockwise circles + wm = ((u - i * v) / 2).conjugate() # for clockwise circles # and their amplitudes and angles Wp = abs(wp) Wm = abs(wm) @@ -1374,35 +1453,35 @@ def ap2ep(Au, PHIu, Av, PHIv): THETAm = cmath.phase(wm) # calculate ep-parameters (ellipse parameters) - SEMA = Wp+Wm # Semi Major Axis, or maximum speed - SEMI = Wp-Wm # Semin Minor Axis, or minimum speed - ECC = SEMI/SEMA # Eccentricity + SEMA = Wp + Wm # Semi Major Axis, or maximum speed + SEMI = Wp - Wm # Semin Minor Axis, or minimum speed + ECC = SEMI / SEMA # Eccentricity # Phase angle, the time (in angle) when the velocity reaches the maximum - PHA = (THETAm-THETAp)/2 + PHA = (THETAm - THETAp) / 2 # Inclination, the angle between the semi major axis and x-axis (or u-axis) - INC = (THETAm+THETAp)/2 + INC = (THETAm + THETAp) / 2 # convert to degrees for output - PHA = PHA/pi*180 - INC = INC/pi*180 - THETAp = THETAp/pi*180 - THETAm = THETAm/pi*180 + PHA = PHA / pi * 180 + INC = INC / pi * 180 + THETAp = THETAp / pi * 180 + THETAm = THETAm / pi * 180 # Map the resultant angles to the range of [0, 360]. # PHA=mod(PHA+360, 360) - PHA = (PHA+360) % 360 + PHA = (PHA + 360) % 360 # INC=mod(INC+360, 360) - INC = (INC+360) % 360 + INC = (INC + 360) % 360 # Mar. 2, 2002 Revision by Zhigang Xu (REVISION_1) # Change the southern major axes to northern major axes to conform the tidal # analysis convention (cf. Foreman, 1977, p. 13, Manual For Tidal Currents # Analysis Prediction, available in www.ios.bc.ca/ios/osap/people/foreman.htm) - k = float(INC)/180 - INC = INC-k*180 - PHA = PHA+k*180 + k = float(INC) / 180 + INC = INC - k * 180 + PHA = PHA + k * 180 PHA = PHA % 360 - return SEMA, ECC, INC, PHA + return SEMA, ECC, INC, PHA # Authorship Copyright: # # The author retains the copyright of this program, while you are welcome @@ -1433,8 +1512,8 @@ def ap2ep(Au, PHIu, Av, PHIv): # convention. -def convert_to_hours(time_model, reftime='None'): - """ Interpolates the datetime values into an array of hours from a +def convert_to_hours(time_model, reftime="None"): + """Interpolates the datetime values into an array of hours from a determined starting point :arg time_model: array of model output time as datetime objects @@ -1448,13 +1527,13 @@ def convert_to_hours(time_model, reftime='None'): :returns tp_wrt_epoch, times with respect to the beginning of the input in seconds """ - if reftime == 'None': + if reftime == "None": epoc = time_model[0] else: epoc = reftime tp_wrt_epoc = [] for t in time_model: - tp_wrt_epoc.append((t-epoc).total_seconds()/3600) + tp_wrt_epoc.append((t - epoc).total_seconds() / 3600) return tp_wrt_epoc @@ -1479,10 +1558,11 @@ def double(x, M2amp, M2pha, K1amp, K1pha, mean): :returns:(mean + M2amp*np.cos(M2FREQ*x-M2pha*np.pi/180.) +K1amp*np.cos(K1FREQ*x-K1pha*np.pi/180.)) """ - return( - mean + - M2amp * np.cos((CorrTides['M2']['freq'] * x - M2pha) * np.pi / 180.) + - K1amp * np.cos((CorrTides['K1']['freq'] * x - K1pha) * np.pi / 180)) + return ( + mean + + M2amp * np.cos((CorrTides["M2"]["freq"] * x - M2pha) * np.pi / 180.0) + + K1amp * np.cos((CorrTides["K1"]["freq"] * x - K1pha) * np.pi / 180) + ) def quadruple(x, M2amp, M2pha, K1amp, K1pha, S2amp, S2pha, O1amp, O1pha, mean): @@ -1500,18 +1580,31 @@ def quadruple(x, M2amp, M2pha, K1amp, K1pha, S2amp, S2pha, O1amp, O1pha, mean): :returns: function for fitting 4 frequencies """ - return( - mean + - M2amp * np.cos((CorrTides['M2']['freq'] * x - M2pha) * np.pi / 180) + - K1amp * np.cos((CorrTides['K1']['freq'] * x - K1pha) * np.pi / 180) + - S2amp * np.cos((CorrTides['S2']['freq'] * x - S2pha) * np.pi / 180) + - O1amp * np.cos((CorrTides['O1']['freq'] * x - O1pha) * np.pi / 180)) + return ( + mean + + M2amp * np.cos((CorrTides["M2"]["freq"] * x - M2pha) * np.pi / 180) + + K1amp * np.cos((CorrTides["K1"]["freq"] * x - K1pha) * np.pi / 180) + + S2amp * np.cos((CorrTides["S2"]["freq"] * x - S2pha) * np.pi / 180) + + O1amp * np.cos((CorrTides["O1"]["freq"] * x - O1pha) * np.pi / 180) + ) def sextuple( - x, M2amp, M2pha, K1amp, K1pha, - S2amp, S2pha, O1amp, O1pha, - N2amp, N2pha, P1amp, P1pha, mean): + x, + M2amp, + M2pha, + K1amp, + K1pha, + S2amp, + S2pha, + O1amp, + O1pha, + N2amp, + N2pha, + P1amp, + P1pha, + mean, +): """Function for the fit, assuming 6 constituents of importance are: M2, K2, S1, O1, N2 and P1. @@ -1526,21 +1619,37 @@ def sextuple( :returns: function for fitting 6 frequencies """ - return( - mean + - M2amp * np.cos((CorrTides['M2']['freq'] * x - M2pha) * np.pi / 180) + - K1amp * np.cos((CorrTides['K1']['freq'] * x - K1pha) * np.pi / 180) + - S2amp * np.cos((CorrTides['S2']['freq'] * x - S2pha) * np.pi / 180) + - O1amp * np.cos((CorrTides['O1']['freq'] * x - O1pha) * np.pi / 180) + - N2amp * np.cos((CorrTides['N2']['freq'] * x - N2pha) * np.pi / 180) + - P1amp * np.cos((CorrTides['P1']['freq'] * x - P1pha) * np.pi / 180)) + return ( + mean + + M2amp * np.cos((CorrTides["M2"]["freq"] * x - M2pha) * np.pi / 180) + + K1amp * np.cos((CorrTides["K1"]["freq"] * x - K1pha) * np.pi / 180) + + S2amp * np.cos((CorrTides["S2"]["freq"] * x - S2pha) * np.pi / 180) + + O1amp * np.cos((CorrTides["O1"]["freq"] * x - O1pha) * np.pi / 180) + + N2amp * np.cos((CorrTides["N2"]["freq"] * x - N2pha) * np.pi / 180) + + P1amp * np.cos((CorrTides["P1"]["freq"] * x - P1pha) * np.pi / 180) + ) def octuple( - x, M2amp, M2pha, K1amp, K1pha, - S2amp, S2pha, O1amp, O1pha, - N2amp, N2pha, P1amp, P1pha, - K2amp, K2pha, Q1amp, Q1pha, mean): + x, + M2amp, + M2pha, + K1amp, + K1pha, + S2amp, + S2pha, + O1amp, + O1pha, + N2amp, + N2pha, + P1amp, + P1pha, + K2amp, + K2pha, + Q1amp, + Q1pha, + mean, +): """Function for the fit, for all the constituents: M2, K2, S1, O1, N2, P1, K2 and Q1. @@ -1555,20 +1664,21 @@ def octuple( :returns: function for fitting 8 frequencies """ - return( - mean + - M2amp * np.cos((CorrTides['M2']['freq'] * x - M2pha) * np.pi / 180) + - K1amp * np.cos((CorrTides['K1']['freq'] * x - K1pha) * np.pi / 180) + - S2amp * np.cos((CorrTides['S2']['freq'] * x - S2pha) * np.pi / 180) + - O1amp * np.cos((CorrTides['O1']['freq'] * x - O1pha) * np.pi / 180) + - N2amp * np.cos((CorrTides['N2']['freq'] * x - N2pha) * np.pi / 180) + - P1amp * np.cos((CorrTides['P1']['freq'] * x - P1pha) * np.pi / 180) + - K2amp * np.cos((CorrTides['K2']['freq'] * x - K2pha) * np.pi / 180) + - Q1amp * np.cos((CorrTides['Q1']['freq'] * x - Q1pha) * np.pi / 180)) + return ( + mean + + M2amp * np.cos((CorrTides["M2"]["freq"] * x - M2pha) * np.pi / 180) + + K1amp * np.cos((CorrTides["K1"]["freq"] * x - K1pha) * np.pi / 180) + + S2amp * np.cos((CorrTides["S2"]["freq"] * x - S2pha) * np.pi / 180) + + O1amp * np.cos((CorrTides["O1"]["freq"] * x - O1pha) * np.pi / 180) + + N2amp * np.cos((CorrTides["N2"]["freq"] * x - N2pha) * np.pi / 180) + + P1amp * np.cos((CorrTides["P1"]["freq"] * x - P1pha) * np.pi / 180) + + K2amp * np.cos((CorrTides["K2"]["freq"] * x - K2pha) * np.pi / 180) + + Q1amp * np.cos((CorrTides["Q1"]["freq"] * x - Q1pha) * np.pi / 180) + ) def convention_pha_amp(fitted_amp, fitted_pha): - """ This function takes the fitted parameters given for phase and + """This function takes the fitted parameters given for phase and amplitude of the tidal analysis and returns them following the tidal parameter convention; amplitude is positive and phase is between -180 and +180 degrees. @@ -1624,23 +1734,23 @@ def fittit(uaus, time, nconst): fitfunction = double # The first two harmonic parameters are always M2 and K1 - apparam['M2'] = {'amp': [], 'phase': []} - apparam['K1'] = {'amp': [], 'phase': []} + apparam["M2"] = {"amp": [], "phase": []} + apparam["K1"] = {"amp": [], "phase": []} if nconst > 2: fitfunction = quadruple - apparam['S2'] = {'amp': [], 'phase': []} - apparam['O1'] = {'amp': [], 'phase': []} + apparam["S2"] = {"amp": [], "phase": []} + apparam["O1"] = {"amp": [], "phase": []} if nconst > 4: fitfunction = sextuple - apparam['N2'] = {'amp': [], 'phase': []} - apparam['P1'] = {'amp': [], 'phase': []} + apparam["N2"] = {"amp": [], "phase": []} + apparam["P1"] = {"amp": [], "phase": []} if nconst > 6: fitfunction = octuple - apparam['K2'] = {'amp': [], 'phase': []} - apparam['Q1'] = {'amp': [], 'phase': []} + apparam["K2"] = {"amp": [], "phase": []} + apparam["Q1"] = {"amp": [], "phase": []} # CASE 1: a time series of velocities with depth at a single location. if uaus.ndim == 2: @@ -1665,13 +1775,14 @@ def fittit(uaus, time, nconst): # Rotating to have a positive amplitude and a phase between # [-180, 180] for k in np.arange(nconst): - fitted[2*k], fitted[2*k+1] = convention_pha_amp( - fitted[2*k], fitted[2*k+1]) + fitted[2 * k], fitted[2 * k + 1] = convention_pha_amp( + fitted[2 * k], fitted[2 * k + 1] + ) # Putting the amplitude and phase of each constituent of this # particlar depth in the right location within the dictionary. for const, k in zip(apparam, np.arange(0, nconst)): - apparam[const]['amp'][dep] = fitted[2*k] - apparam[const]['phase'][dep] = fitted[2*k+1] + apparam[const]["amp"][dep] = fitted[2 * k] + apparam[const]["phase"][dep] = fitted[2 * k + 1] # CASE 2 : a time series of an area of velocities at a single depth elif uaus.ndim == 3: @@ -1682,16 +1793,16 @@ def fittit(uaus, time, nconst): for i in np.arange(0, uaus.shape[1]): for j in np.arange(0, uaus.shape[2]): - if uaus[:, i, j].any() != 0.: - fitted, cov = curve_fit( - fitfunction, time[:], uaus[:, i, j]) + if uaus[:, i, j].any() != 0.0: + fitted, cov = curve_fit(fitfunction, time[:], uaus[:, i, j]) for k in np.arange(nconst): - fitted[2*k], fitted[2*k+1] = convention_pha_amp( - fitted[2*k], fitted[2*k+1]) + fitted[2 * k], fitted[2 * k + 1] = convention_pha_amp( + fitted[2 * k], fitted[2 * k + 1] + ) for const, k in zip(apparam, np.arange(0, nconst)): - apparam[const]['amp'][i, j] = fitted[2*k] - apparam[const]['phase'][i, j] = fitted[2*k+1] + apparam[const]["amp"][i, j] = fitted[2 * k] + apparam[const]["phase"][i, j] = fitted[2 * k + 1] # CASE 3: a time series of an area of velocities with depth elif uaus.ndim == 4: @@ -1703,33 +1814,36 @@ def fittit(uaus, time, nconst): for dep in np.arange(0, uaus.shape[1]): for i in np.arange(0, uaus.shape[2]): for j in np.arange(0, uaus.shape[3]): - if uaus[:, dep, i, j].any() != 0.: + if uaus[:, dep, i, j].any() != 0.0: fitted, cov = curve_fit( - fitfunction, time[:], uaus[:, dep, i, j]) + fitfunction, time[:], uaus[:, dep, i, j] + ) for k in np.arange(nconst): - fitted[2*k], fitted[2*k+1] = convention_pha_amp( - fitted[2*k], fitted[2*k+1]) + fitted[2 * k], fitted[2 * k + 1] = convention_pha_amp( + fitted[2 * k], fitted[2 * k + 1] + ) for const, k in zip(apparam, np.arange(0, nconst)): - apparam[const]['amp'][dep, i, j] = fitted[2*k] - apparam[const]['phase'][dep, i, j] = fitted[2*k+1] + apparam[const]["amp"][dep, i, j] = fitted[2 * k] + apparam[const]["phase"][dep, i, j] = fitted[2 * k + 1] # Case 4: a time series of a single location with a single depth. else: - thesize = (0) + thesize = 0 for const, ap in apparam.items(): for key2 in ap: ap[key2] = np.zeros(thesize) - if uaus[:].any() != 0.: + if uaus[:].any() != 0.0: fitted, cov = curve_fit(fitfunction, time[:], uaus[:]) for k in np.arange(nconst): - fitted[2*k], fitted[2*k+1] = convention_pha_amp( - fitted[2*k], fitted[2*k+1]) + fitted[2 * k], fitted[2 * k + 1] = convention_pha_amp( + fitted[2 * k], fitted[2 * k + 1] + ) for const, k in zip(apparam, np.arange(0, nconst)): - apparam[const]['amp'] = fitted[2*k] - apparam[const]['phase'] = fitted[2*k+1] + apparam[const]["amp"] = fitted[2 * k] + apparam[const]["phase"] = fitted[2 * k + 1] # Mask the zero values for const, ap in apparam.items(): @@ -1739,7 +1853,7 @@ def fittit(uaus, time, nconst): return apparam -def filter_timeseries(record, winlen=39, method='box'): +def filter_timeseries(record, winlen=39, method="box"): """Filter a timeseries. Developed for wind and tidal filtering, but can be modified for use @@ -1781,27 +1895,27 @@ def filter_timeseries(record, winlen=39, method='box'): weight = np.zeros(w, dtype=int) # Select filter method - if method == 'doodson': + if method == "doodson": # Doodson bandpass filter (winlen must be 39) weight[[1, 2, 5, 6, 10, 11, 13, 16, 18]] = 1 weight[[0, 3, 8]] = 2 centerval = 0 - elif method == 'box': + elif method == "box": # Box filter weight[:] = 1 centerval = 1 else: - raise ValueError('Invalid filter method: {}'.format(method)) + raise ValueError("Invalid filter method: {}".format(method)) # Loop through record for i in range(record_length): # Adjust window length for end cases - W = min(i, w, record_length-i-1) + W = min(i, w, record_length - i - 1) Weight = weight[:W] Weight = np.append(Weight[::-1], np.append(centerval, Weight)) if sum(Weight) != 0: - Weight = (Weight/sum(Weight)) + Weight = Weight / sum(Weight) # Expand weight dims so it can operate on record window for dim in range(record.ndim - 1): @@ -1809,7 +1923,7 @@ def filter_timeseries(record, winlen=39, method='box'): # Apply mean over window length if W > 0: - filtered[i, ...] = np.sum(record[i-W:i+W+1, ...] * Weight, axis=0) + filtered[i, ...] = np.sum(record[i - W : i + W + 1, ...] * Weight, axis=0) else: filtered[i, ...] = record[i, ...] diff --git a/SalishSeaTools/salishsea_tools/timeseries_tools.py b/SalishSeaTools/salishsea_tools/timeseries_tools.py index e3d6fc91..623cb316 100644 --- a/SalishSeaTools/salishsea_tools/timeseries_tools.py +++ b/SalishSeaTools/salishsea_tools/timeseries_tools.py @@ -27,18 +27,17 @@ def load_NEMO_timeseries( - filenames, mask, field, dim, index=0, spacing=1, - shape='grid', unstagger_dim=None + filenames, mask, field, dim, index=0, spacing=1, shape="grid", unstagger_dim=None ): - """ - """ + """ """ # Reshape mask, grid, and depth tmask, coords, ngrid, ngrid_water = reshape_coords( - mask, dim, index=index, spacing=spacing) + mask, dim, index=index, spacing=spacing + ) # Initialize output array - date = np.empty(0, dtype='datetime64[ns]') + date = np.empty(0, dtype="datetime64[ns]") data = np.empty((0, ngrid_water)) # Loop through filenames @@ -52,46 +51,47 @@ def load_NEMO_timeseries( # Reshape field data_trim = reshape_to_ts( - data_grid.values, tmask, ngrid, ngrid_water, spacing=spacing) + data_grid.values, tmask, ngrid, ngrid_water, spacing=spacing + ) # Store trimmed arrays date = np.concatenate([date, data_grid.time_counter.values]) data = np.concatenate([data, data_trim], axis=0) # Reshape to grid - if shape is 'grid': + if shape is "grid": # Correct for depth dimension name - if dim.find('depth') is not -1: - dim1, dim2, dimslice = 'gridY', 'gridX', 'z' - elif dim.find('y') is not -1: - dim1, dim2, dimslice = 'gridZ', 'gridX', 'y' - elif dim.find('x') is not -1: - dim1, dim2, dimslice = 'gridZ', 'gridY', 'x' + if dim.find("depth") is not -1: + dim1, dim2, dimslice = "gridY", "gridX", "z" + elif dim.find("y") is not -1: + dim1, dim2, dimslice = "gridZ", "gridX", "y" + elif dim.find("x") is not -1: + dim1, dim2, dimslice = "gridZ", "gridY", "x" # Reshape data to grid data = reshape_to_grid( - data, [coords[dim1], coords[dim2]], - mask.gdept_0.isel(**{'t': 0, dimslice: 0}).shape + data, + [coords[dim1], coords[dim2]], + mask.gdept_0.isel(**{"t": 0, dimslice: 0}).shape, ) # Redefine coords for grid coords = { - 'depth': mask.gdept_1d.isel(t=0).values, - 'gridZ': mask.z.values, - 'gridY': mask.y.values, - 'gridX': mask.x.values + "depth": mask.gdept_1d.isel(t=0).values, + "gridZ": mask.z.values, + "gridY": mask.y.values, + "gridX": mask.x.values, } # Coords dict - coords['date'] = date + coords["date"] = date return data, coords def make_filename_list( - timerange, qty, model='nowcast', resolution='h', - path='/results/SalishSea' + timerange, qty, model="nowcast", resolution="h", path="/results/SalishSea" ): """Return a sequential list of Nowcast results filenames to be passed into `xarray.open_mfdataset` or `timeseries_tools.load_NEMO_timeseries`. @@ -124,10 +124,11 @@ def make_filename_list( filenames = [] while date < enddate: - datestr1 = date.strftime('%d%b%y').lower() - datestr2 = date.strftime('%Y%m%d') - filename = 'SalishSea_1{}_{}_{}_grid_{}.nc'.format( - resolution, datestr2, datestr2, qty) + datestr1 = date.strftime("%d%b%y").lower() + datestr2 = date.strftime("%Y%m%d") + filename = "SalishSea_1{}_{}_{}_grid_{}.nc".format( + resolution, datestr2, datestr2, qty + ) filenames.append(os.path.join(path, model, datestr1, filename)) date = date + timedelta(days=1) @@ -140,39 +141,40 @@ def reshape_coords(mask_in, dim_in, index=0, spacing=1): """ # Correct for depth dimension name - if dim_in.find('depth') is not -1: - dim = 'deptht' + if dim_in.find("depth") is not -1: + dim = "deptht" else: dim = dim_in # Create full gridded mask, grid and depth Numpy ndarrays - gridZ, gridY, gridX = np.meshgrid( - mask_in.z, mask_in.y, mask_in.x, indexing='ij') - gridmask = xr.Dataset({ - 'tmask': ( - ['deptht', 'y', 'x'], - mask_in.tmask.isel(t=0).values.astype(bool), - ), - 'depth': (['deptht', 'y', 'x'], mask_in.gdept_0.isel(t=0).values), - 'gridZ': (['deptht', 'y', 'x'], gridZ), - 'gridY': (['deptht', 'y', 'x'], gridY), - 'gridX': (['deptht', 'y', 'x'], gridX)}, - coords={'deptht': mask_in.gdept_1d.isel(t=0).values, - 'y': mask_in.y, 'x': mask_in.x}) + gridZ, gridY, gridX = np.meshgrid(mask_in.z, mask_in.y, mask_in.x, indexing="ij") + gridmask = xr.Dataset( + { + "tmask": ( + ["deptht", "y", "x"], + mask_in.tmask.isel(t=0).values.astype(bool), + ), + "depth": (["deptht", "y", "x"], mask_in.gdept_0.isel(t=0).values), + "gridZ": (["deptht", "y", "x"], gridZ), + "gridY": (["deptht", "y", "x"], gridY), + "gridX": (["deptht", "y", "x"], gridX), + }, + coords={ + "deptht": mask_in.gdept_1d.isel(t=0).values, + "y": mask_in.y, + "x": mask_in.x, + }, + ) # Slice and subsample mask mask = gridmask.tmask.isel(**{dim: index}).values[::spacing, ::spacing] # Slice and subsample grid and depth into dict coords = { - 'depth': - gridmask.depth.isel(**{dim: index}).values[::spacing, ::spacing], - 'gridZ': - gridmask.gridZ.isel(**{dim: index}).values[::spacing, ::spacing], - 'gridY': - gridmask.gridY.isel(**{dim: index}).values[::spacing, ::spacing], - 'gridX': - gridmask.gridX.isel(**{dim: index}).values[::spacing, ::spacing], + "depth": gridmask.depth.isel(**{dim: index}).values[::spacing, ::spacing], + "gridZ": gridmask.gridZ.isel(**{dim: index}).values[::spacing, ::spacing], + "gridY": gridmask.gridY.isel(**{dim: index}).values[::spacing, ::spacing], + "gridX": gridmask.gridX.isel(**{dim: index}).values[::spacing, ::spacing], } # Number of grid points @@ -181,23 +183,21 @@ def reshape_coords(mask_in, dim_in, index=0, spacing=1): # Reshape mask, grid, and depth mask = mask.reshape(ngrid) - coords['depth'] = coords['depth'].reshape(ngrid)[mask] - coords['gridZ'] = coords['gridZ'].reshape(ngrid)[mask] - coords['gridY'] = coords['gridY'].reshape(ngrid)[mask] - coords['gridX'] = coords['gridX'].reshape(ngrid)[mask] + coords["depth"] = coords["depth"].reshape(ngrid)[mask] + coords["gridZ"] = coords["gridZ"].reshape(ngrid)[mask] + coords["gridY"] = coords["gridY"].reshape(ngrid)[mask] + coords["gridX"] = coords["gridX"].reshape(ngrid)[mask] return mask, coords, ngrid, ngrid_water def reshape_coords_GEM(grid, mask_in): - """ - """ + """ """ coords = {} # Create full gridded mask, grid and depth Numpy ndarrays - coords['gridY'], coords['gridX'] = np.meshgrid( - grid.y, grid.x, indexing='ij') + coords["gridY"], coords["gridX"] = np.meshgrid(grid.y, grid.x, indexing="ij") # Number of grid points ngrid = mask_in.shape[0] * mask_in.shape[1] @@ -205,15 +205,14 @@ def reshape_coords_GEM(grid, mask_in): # Reshape mask, grid, and depth mask = mask_in.reshape(ngrid) - coords['gridY'] = coords['gridY'].reshape(ngrid)[mask.astype(bool)] - coords['gridX'] = coords['gridX'].reshape(ngrid)[mask.astype(bool)] + coords["gridY"] = coords["gridY"].reshape(ngrid)[mask.astype(bool)] + coords["gridX"] = coords["gridX"].reshape(ngrid)[mask.astype(bool)] return mask, coords, ngrid, ngrid_water def reshape_to_ts(data_grid, mask, ngrid, ngrid_water, spacing=1): - """ - """ + """ """ # Convert to Numpy ndarray, subsample, and reshape data_flat = data_grid[:, ::spacing, ::spacing].reshape((-1, ngrid)) diff --git a/SalishSeaTools/salishsea_tools/unit_conversions.py b/SalishSeaTools/salishsea_tools/unit_conversions.py index ccc37a19..42a52f14 100644 --- a/SalishSeaTools/salishsea_tools/unit_conversions.py +++ b/SalishSeaTools/salishsea_tools/unit_conversions.py @@ -33,10 +33,17 @@ __all__ = [ - 'PSU_TEOS', 'TEOS_PSU', 'psu_teos', 'teos_psu', - 'M_PER_S__KM_PER_HR', 'M_PER_S__KNOTS', 'mps_kph', 'mps_knots', - 'wind_to_from', 'bearing_heading', - 'humanize_time_of_day', + "PSU_TEOS", + "TEOS_PSU", + "psu_teos", + "teos_psu", + "M_PER_S__KM_PER_HR", + "M_PER_S__KNOTS", + "mps_kph", + "mps_knots", + "wind_to_from", + "bearing_heading", + "humanize_time_of_day", ] @@ -126,8 +133,24 @@ def wind_to_from(wind_to): def bearing_heading( bearing, headings=( - 'N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', - 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N'), + "N", + "NNE", + "NE", + "ENE", + "E", + "ESE", + "SE", + "SSE", + "S", + "SSW", + "SW", + "WSW", + "W", + "WNW", + "NW", + "NNW", + "N", + ), ): """Convert a compass bearing to a heading. @@ -159,17 +182,17 @@ def humanize_time_of_day(date_time): e.g. early Monday afternoon :rtype: str """ - day_of_week = date_time.format('dddd') + day_of_week = date_time.format("dddd") if date_time.hour < 6: - part_of_day = '' - early_late = 'overnight' + part_of_day = "" + early_late = "overnight" elif date_time.hour < 12: - part_of_day = 'morning' - early_late = 'early' if date_time.hour < 9 else 'late' + part_of_day = "morning" + early_late = "early" if date_time.hour < 9 else "late" elif 12 <= date_time.hour < 18: - part_of_day = 'afternoon' - early_late = 'early' if date_time.hour < 15 else 'late' + part_of_day = "afternoon" + early_late = "early" if date_time.hour < 15 else "late" else: - part_of_day = 'evening' - early_late = 'early' if date_time.hour < 21 else 'late' - return ' '.join((early_late, day_of_week, part_of_day)).rstrip() + part_of_day = "evening" + early_late = "early" if date_time.hour < 21 else "late" + return " ".join((early_late, day_of_week, part_of_day)).rstrip() diff --git a/SalishSeaTools/salishsea_tools/utilities.py b/SalishSeaTools/salishsea_tools/utilities.py index daf4ddfb..bcbc251b 100644 --- a/SalishSeaTools/salishsea_tools/utilities.py +++ b/SalishSeaTools/salishsea_tools/utilities.py @@ -20,8 +20,10 @@ import glob import os -def findnamelist(namelist, year, month, day, - pathname = '/results/SalishSea/nowcast-green'): + +def findnamelist( + namelist, year, month, day, pathname="/results/SalishSea/nowcast-green" +): """Find the most recent namelist from a results file. arg str namelist: name of the namelist you are looking for @@ -36,11 +38,11 @@ def findnamelist(namelist, year, month, day, """ myday = arrow.get(year, month, day) - pathname = '/results/SalishSea/nowcast-green' - directory = myday.format('DDMMMYY').lower() + pathname = "/results/SalishSea/nowcast-green" + directory = myday.format("DDMMMYY").lower() mynamelist = glob.glob(os.path.join(pathname, directory, namelist)) while not mynamelist: myday = myday.shift(days=-1) - directory = myday.format('DDMMMYY').lower() + directory = myday.format("DDMMMYY").lower() mynamelist = glob.glob(os.path.join(pathname, directory, namelist)) return mynamelist[0] diff --git a/SalishSeaTools/salishsea_tools/visualisations.py b/SalishSeaTools/salishsea_tools/visualisations.py index d08cab71..c96835ef 100644 --- a/SalishSeaTools/salishsea_tools/visualisations.py +++ b/SalishSeaTools/salishsea_tools/visualisations.py @@ -24,13 +24,19 @@ def contour_thalweg( - axes, var, bathy, mesh_mask, clevels=None, - mesh_mask_depth_var='gdept_0', cmap='hsv', land_colour='burlywood', + axes, + var, + bathy, + mesh_mask, + clevels=None, + mesh_mask_depth_var="gdept_0", + cmap="hsv", + land_colour="burlywood", xcoord_distance=True, - thalweg_file='/home/sallen/MEOPAR/Tools/bathymetry/thalweg_working.txt', + thalweg_file="/home/sallen/MEOPAR/Tools/bathymetry/thalweg_working.txt", cbar_args=None, mesh_args=None, - method='contourf' + method="contourf", ): """Contour the data stored in var along the domain thalweg. @@ -81,56 +87,81 @@ def contour_thalweg( :returns: matplotlib colorbar object """ - thalweg_pts = np.loadtxt(thalweg_file, delimiter=' ', dtype=int) + thalweg_pts = np.loadtxt(thalweg_file, delimiter=" ", dtype=int) depth = mesh_mask.variables[mesh_mask_depth_var][:] dep_thal, distance, var_thal = load_thalweg( - depth[0, ...], var, bathy['nav_lon'][:], bathy['nav_lat'][:], - thalweg_pts) + depth[0, ...], var, bathy["nav_lon"][:], bathy["nav_lat"][:], thalweg_pts + ) if xcoord_distance: xx_thal = distance - axes.set_xlabel('Distance along thalweg [km]') + axes.set_xlabel("Distance along thalweg [km]") else: xx_thal, _ = np.meshgrid(np.arange(var_thal.shape[-1]), dep_thal[:, 0]) - axes.set_xlabel('Thalweg index') + axes.set_xlabel("Thalweg index") # Determine contour levels clevels_default = { - 'salinity': [ - 26, 27, 28, 29, 30, 30.2, 30.4, 30.6, 30.8, 31, 32, 33, 34 + "salinity": [26, 27, 28, 29, 30, 30.2, 30.4, 30.6, 30.8, 31, 32, 33, 34], + "temperature": [ + 6.9, + 7, + 7.5, + 8, + 8.5, + 9, + 9.8, + 9.9, + 10.3, + 10.5, + 11, + 11.5, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, ], - 'temperature': [ - 6.9, 7, 7.5, 8, 8.5, 9, 9.8, 9.9, 10.3, 10.5, 11, 11.5, 12, - 13, 14, 15, 16, 17, 18, 19 - ] } if isinstance(clevels, str): try: clevels = clevels_default[clevels] except KeyError: - raise KeyError('no default clevels defined for {}'.format(clevels)) + raise KeyError("no default clevels defined for {}".format(clevels)) # Prepare for plotting by filling in grid points just above bathymetry var_plot = _fill_in_bathy(var_thal, mesh_mask, thalweg_pts) - if method == 'pcolormesh': + if method == "pcolormesh": if mesh_args is None: mesh = axes.pcolormesh(xx_thal, dep_thal, var_plot, cmap=cmap) else: mesh = axes.pcolormesh(xx_thal, dep_thal, var_plot, cmap=cmap, **mesh_args) - axes.set_xlim((np.min(xx_thal),np.max(xx_thal))) + axes.set_xlim((np.min(xx_thal), np.max(xx_thal))) else: if mesh_args is None: - mesh = axes.contourf(xx_thal, dep_thal, var_plot, clevels, cmap=cmap, - extend='both') + mesh = axes.contourf( + xx_thal, dep_thal, var_plot, clevels, cmap=cmap, extend="both" + ) else: - mesh = axes.contourf(xx_thal, dep_thal, var_plot, clevels, cmap=cmap, - extend='both', **mesh_args) + mesh = axes.contourf( + xx_thal, + dep_thal, + var_plot, + clevels, + cmap=cmap, + extend="both", + **mesh_args + ) _add_bathy_patch( - xx_thal, bathy['Bathymetry'][:], thalweg_pts, axes, color=land_colour) + xx_thal, bathy["Bathymetry"][:], thalweg_pts, axes, color=land_colour + ) if cbar_args is None: cbar = plt.colorbar(mesh, ax=axes) else: cbar = plt.colorbar(mesh, ax=axes, **cbar_args) axes.invert_yaxis() - axes.set_ylabel('Depth [m]') + axes.set_ylabel("Depth [m]") return cbar @@ -157,7 +188,7 @@ def _add_bathy_patch(xcoord, bathy, thalweg_pts, ax, color, zmin=450): # Look up bottom bathymetry along thalweg thalweg_bottom = bathy[thalweg_pts[:, 0], thalweg_pts[:, 1]] # Construct bathy polygon - poly = np.zeros((thalweg_bottom.shape[0]+2, 2)) + poly = np.zeros((thalweg_bottom.shape[0] + 2, 2)) poly[0, :] = 0, zmin poly[1:-1, 0] = xcoord[0, :] poly[1:-1:, 1] = thalweg_bottom @@ -217,16 +248,29 @@ def _fill_in_bathy(variable, mesh_mask, thalweg_pts): :returns: newvar, the filled numpy array """ - mbathy = mesh_mask.variables['mbathy'][0, :, :] + mbathy = mesh_mask.variables["mbathy"][0, :, :] newvar = np.copy(variable) mbathy = mbathy[thalweg_pts[:, 0], thalweg_pts[:, 1]] for i, level in enumerate(mbathy): - newvar[level, i] = variable[level-1, i] + newvar[level, i] = variable[level - 1, i] return newvar -def contour_layer_grid(axes,data,mask,clevels=10,lat=None,lon=None,cmap=None,var_name=None, - land_colour='burlywood',is_depth_avg=False,is_pcolmesh=False,title='',cbar_args=None, + +def contour_layer_grid( + axes, + data, + mask, + clevels=10, + lat=None, + lon=None, + cmap=None, + var_name=None, + land_colour="burlywood", + is_depth_avg=False, + is_pcolmesh=False, + title="", + cbar_args=None, ): """Contour 2d data at an arbitrary klevel on the model grid @@ -273,26 +317,25 @@ def contour_layer_grid(axes,data,mask,clevels=10,lat=None,lon=None,cmap=None,var :returns: matplotlib colorbar object """ - mdata = np.ma.masked_where(mask==0,data) + mdata = np.ma.masked_where(mask == 0, data) viz_tools.set_aspect(axes) if cmap == None: - cbMIN, cbMAX, cmap = visualisations.retrieve_cmap(var_name,is_depth_avg) + cbMIN, cbMAX, cmap = visualisations.retrieve_cmap(var_name, is_depth_avg) cmap = plt.get_cmap(cmocean.cm.algae) if is_pcolmesh: mesh = axes.pcolormesh(mdata, cmap=cmap) else: - mesh= axes.contourf(mdata,clevels,cmap=cmap) + mesh = axes.contourf(mdata, clevels, cmap=cmap) - axes.set_xlabel('X index') - axes.set_ylabel('Y index') + axes.set_xlabel("X index") + axes.set_ylabel("Y index") axes.set_title(title) axes.set_axis_bgcolor(land_colour) - if cbar_args is None: cbar = plt.colorbar(mesh, ax=axes) else: @@ -301,7 +344,7 @@ def contour_layer_grid(axes,data,mask,clevels=10,lat=None,lon=None,cmap=None,var return cbar -def plot_drifters(ax, DATA, DRIFT_OBJS=None, color='red', cutoff=24, zorder=15): +def plot_drifters(ax, DATA, DRIFT_OBJS=None, color="red", cutoff=24, zorder=15): """Plot a drifter track from ODL Drifter observations. :arg time_ind: Time index (current drifter position, track will be visible @@ -330,79 +373,95 @@ def plot_drifters(ax, DATA, DRIFT_OBJS=None, color='red', cutoff=24, zorder=15): if DATA.time.shape[0] > 0: # Convert time boundaries to datetime.datetime to allow operations/slicing - starttime = nc_tools.xarraytime_to_datetime(DATA.time[ 0]) - endtime = nc_tools.xarraytime_to_datetime(DATA.time[-1]) + starttime = nc_tools.xarraytime_to_datetime(DATA.time[0]) + endtime = nc_tools.xarraytime_to_datetime(DATA.time[-1]) # Color plot cutoff time_cutoff = endtime - datetime.timedelta(hours=cutoff) - if DRIFT_OBJS is not None: # --- Update line objects only + if DRIFT_OBJS is not None: # --- Update line objects only # Plot drifter track (gray) - DRIFT_OBJS['L_old'][0].set_data( + DRIFT_OBJS["L_old"][0].set_data( DATA.lon.sel(time=slice(starttime, time_cutoff)), - DATA.lat.sel(time=slice(starttime, time_cutoff))) + DATA.lat.sel(time=slice(starttime, time_cutoff)), + ) # Plot drifter track (color) - DRIFT_OBJS['L_new'][0].set_data( + DRIFT_OBJS["L_new"][0].set_data( DATA.lon.sel(time=slice(time_cutoff, endtime)), - DATA.lat.sel(time=slice(time_cutoff, endtime))) + DATA.lat.sel(time=slice(time_cutoff, endtime)), + ) # Plot drifter position - DRIFT_OBJS['P'][0].set_data( - DATA.lon.sel(time=endtime, method='nearest'), - DATA.lat.sel(time=endtime, method='nearest')) + DRIFT_OBJS["P"][0].set_data( + DATA.lon.sel(time=endtime, method="nearest"), + DATA.lat.sel(time=endtime, method="nearest"), + ) - else: # ------------------------ Plot new line objects instances + else: # ------------------------ Plot new line objects instances # Define drifter objects dict DRIFT_OBJS = {} # Plot drifter track (gray) - DRIFT_OBJS['L_old'] = ax.plot( + DRIFT_OBJS["L_old"] = ax.plot( DATA.lon.sel(time=slice(starttime, time_cutoff)), DATA.lat.sel(time=slice(starttime, time_cutoff)), - '-', linewidth=2, color='gray', zorder=zorder) + "-", + linewidth=2, + color="gray", + zorder=zorder, + ) # Plot drifter track (color) - DRIFT_OBJS['L_new'] = ax.plot( + DRIFT_OBJS["L_new"] = ax.plot( DATA.lon.sel(time=slice(time_cutoff, endtime)), DATA.lat.sel(time=slice(time_cutoff, endtime)), - '-', linewidth=2, color=color, zorder=zorder+1) + "-", + linewidth=2, + color=color, + zorder=zorder + 1, + ) # Plot drifter position - DRIFT_OBJS['P'] = ax.plot( - DATA.lon.sel(time=endtime, method='nearest'), - DATA.lat.sel(time=endtime, method='nearest'), - 'o', color=color, zorder=zorder+2) + DRIFT_OBJS["P"] = ax.plot( + DATA.lon.sel(time=endtime, method="nearest"), + DATA.lat.sel(time=endtime, method="nearest"), + "o", + color=color, + zorder=zorder + 2, + ) else: - if DRIFT_OBJS is not None: # --- Update line objects only + if DRIFT_OBJS is not None: # --- Update line objects only # Update drifter tracks - DRIFT_OBJS['L_old'][0].set_data([], []) # gray - DRIFT_OBJS['L_new'][0].set_data([], []) # color - DRIFT_OBJS['P' ][0].set_data([], []) # position + DRIFT_OBJS["L_old"][0].set_data([], []) # gray + DRIFT_OBJS["L_new"][0].set_data([], []) # color + DRIFT_OBJS["P"][0].set_data([], []) # position else: DRIFT_OBJS = {} - DRIFT_OBJS['L_old'] = ax.plot([], [], '-', - linewidth=2, color='gray', zorder=zorder) + DRIFT_OBJS["L_old"] = ax.plot( + [], [], "-", linewidth=2, color="gray", zorder=zorder + ) # Plot drifter track (color) - DRIFT_OBJS['L_new'] = ax.plot([], [], '-', - linewidth=2, color=color, zorder=zorder+1) + DRIFT_OBJS["L_new"] = ax.plot( + [], [], "-", linewidth=2, color=color, zorder=zorder + 1 + ) # Plot drifter position - DRIFT_OBJS['P'] = ax.plot([], [], 'o', color=color, zorder=zorder+2) + DRIFT_OBJS["P"] = ax.plot([], [], "o", color=color, zorder=zorder + 2) return DRIFT_OBJS -def create_figure(ax, DATA, coords='map', window=[-125, -122.5, 48, 50]): - """ Boilerplate figure code like coastline, aspect ratio, axis lims, etc. +def create_figure(ax, DATA, coords="map", window=[-125, -122.5, 48, 50]): + """Boilerplate figure code like coastline, aspect ratio, axis lims, etc. .. note:: @@ -411,12 +470,13 @@ def create_figure(ax, DATA, coords='map', window=[-125, -122.5, 48, 50]): """ raise DeprecationWarning( - 'create_figure has been deprecated. Call plot formatting functions ' - 'individually instead.') + "create_figure has been deprecated. Call plot formatting functions " + "individually instead." + ) def plot_tracers( - ax, qty, DATA, C=None, coords='map', clim=[0, 35, 1], cmap='jet', zorder=0 + ax, qty, DATA, C=None, coords="map", clim=[0, 35, 1], cmap="jet", zorder=0 ): """Plot a horizontal slice of NEMO tracers as filled contours. @@ -428,14 +488,25 @@ def plot_tracers( """ raise DeprecationWarning( - 'plot_tracers has been deprecated. Plot NEMO results directly using ' - 'matplotlib.pyplot.contourf or equivalent instead.') - + "plot_tracers has been deprecated. Plot NEMO results directly using " + "matplotlib.pyplot.contourf or equivalent instead." + ) def plot_velocity( - ax, model, DATA, Q=None, coords='map', processed=False, spacing=5, - mask=True, color='black', scale=20, headwidth=3, linewidth=0, zorder=5 + ax, + model, + DATA, + Q=None, + coords="map", + processed=False, + spacing=5, + mask=True, + color="black", + scale=20, + headwidth=3, + linewidth=0, + zorder=5, ): """Plot a horizontal slice of NEMO or GEM velocities as quiver objects. Accepts subsampled u and v fields via the **processed** keyword @@ -449,84 +520,92 @@ def plot_velocity( """ raise DeprecationWarning( - 'plot_velocity has been deprecated. Plot NEMO results directly using ' - 'matplotlib.pyplot.quiver or equivalent instead.') + "plot_velocity has been deprecated. Plot NEMO results directly using " + "matplotlib.pyplot.quiver or equivalent instead." + ) -def retrieve_cmap(varname,deep_bool): +def retrieve_cmap(varname, deep_bool): """takes 2 args: string varname - name of a variable from nowcast-green output boolean deep_bool - indicates whether the variable is depth-integrated or not returns 2 ints(min and max value of range), and string identifying cmap""" - var_namemap ={'Fraser_tracer': {'varname':'Fraser_tracer'}, - 'ammonium': {'varname':'NH4'}, - 'NH4': {'varname':'NH4'}, - 'biogenic_silicon': {'varname':'bSi'}, - 'bSi': {'varname':'bSi'}, - 'ciliates': {'varname':'MYRI'}, - 'MYRI': {'varname':'MYRI'}, - 'diatoms': {'varname':'PHY2'}, - 'PHY2': {'varname':'PHY2'}, - 'dissolved_organic_nitrogen': {'varname':'dissolved_organic_nitrogen'}, - 'flagellates': {'varname':'PHY'}, - 'PHY': {'varname':'PHY'}, - 'mesozooplankton': {'varname':'MESZ'}, - 'MESZ': {'varname':'MESZ'}, - 'microzooplankton': {'varname':'MICZ'}, - 'MICZ': {'varname':'MICZ'}, - 'nitrate': {'varname':'NO3'}, - 'NO3': {'varname':'NO3'}, - 'particulate_organic_nitrogen': {'varname':'PON'}, - 'POC': {'varname':'PON'}, - 'PON': {'varname':'PON'}, - 'dissolved_organic_nitrogen': {'varname':'DON'}, - 'DOC': {'varname':'DON'}, - 'DON': {'varname':'DON'}, - - 'silicon': {'varname':'Si'}, - 'Si': {'varname':'Si'}} - - #dictionary of colour ranges - var_colour_ranges = { + var_namemap = { + "Fraser_tracer": {"varname": "Fraser_tracer"}, + "ammonium": {"varname": "NH4"}, + "NH4": {"varname": "NH4"}, + "biogenic_silicon": {"varname": "bSi"}, + "bSi": {"varname": "bSi"}, + "ciliates": {"varname": "MYRI"}, + "MYRI": {"varname": "MYRI"}, + "diatoms": {"varname": "PHY2"}, + "PHY2": {"varname": "PHY2"}, + "dissolved_organic_nitrogen": {"varname": "dissolved_organic_nitrogen"}, + "flagellates": {"varname": "PHY"}, + "PHY": {"varname": "PHY"}, + "mesozooplankton": {"varname": "MESZ"}, + "MESZ": {"varname": "MESZ"}, + "microzooplankton": {"varname": "MICZ"}, + "MICZ": {"varname": "MICZ"}, + "nitrate": {"varname": "NO3"}, + "NO3": {"varname": "NO3"}, + "particulate_organic_nitrogen": {"varname": "PON"}, + "POC": {"varname": "PON"}, + "PON": {"varname": "PON"}, + "dissolved_organic_nitrogen": {"varname": "DON"}, + "DOC": {"varname": "DON"}, + "DON": {"varname": "DON"}, + "silicon": {"varname": "Si"}, + "Si": {"varname": "Si"}, + } - 'Fraser_tracer':{'colorBarMinimum': 0.0, 'colorBarMaximum': 140.0,'cmap': 'turbid'}, - 'MESZ': {'colorBarMinimum': 0.0, 'colorBarMaximum': 3.0,'cmap': 'algae'}, - 'MICZ': {'colorBarMinimum': 0.0, 'colorBarMaximum': 4.0,'cmap': 'algae'}, - 'MYRI': {'colorBarMinimum': 0.0, 'colorBarMaximum': 5.0,'cmap': 'algae'}, - 'NH4': {'colorBarMinimum': 0.0, 'colorBarMaximum': 10.0,'cmap': 'matter'}, - 'NO3': {'colorBarMinimum': 0.0, 'colorBarMaximum': 40.0,'cmap': 'tempo'}, - 'PON': {'colorBarMinimum': 0.0, 'colorBarMaximum': 2.0,'cmap': 'amp'}, - 'DON': {'colorBarMinimum': 0.0, 'colorBarMaximum': 20.0,'cmap': 'amp'}, - 'O2': {'colorBarMinimum': 0.0, 'colorBarMaximum': 140.0,'cmap': 'turbid'}, - 'PHY': {'colorBarMinimum': 0.0, 'colorBarMaximum': 6.0, 'cmap': 'algae'}, - 'PHY2': {'colorBarMinimum': 0.0, 'colorBarMaximum': 15.0,'cmap': 'algae'}, - 'Si': {'colorBarMinimum': 0.0, 'colorBarMaximum': 70.0,'cmap': 'turbid'}, - 'bSi': {'colorBarMinimum': 0.0, 'colorBarMaximum': 70.0,'cmap': 'turbid'}, - - 'Fraser_tracer_int':{'colorBarMinimum': 0.0, 'colorBarMaximum': 6500,'cmap': 'turbid'}, - 'MESZ_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 140,'cmap': 'algae'}, - 'MICZ_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 350,'cmap': 'algae'}, - 'MYRI_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 75,'cmap': 'algae'}, - 'NH4_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 1500,'cmap': 'matter'}, - 'NO3_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 24000,'cmap': 'tempo'}, - 'PON_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 600,'cmap': 'amp'}, - 'DON_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 2500,'cmap': 'amp'}, - 'O2_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 1000,'cmap': 'turbid'}, - 'PHY_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 100,'cmap': 'algae'}, - 'PHY2_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 350,'cmap': 'algae'}, - 'Si_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 40000,'cmap': 'turbid'}, - 'bSi_int': {'colorBarMinimum': 0.0, 'colorBarMaximum': 40000,'cmap': 'turbid'}, -} + # dictionary of colour ranges + var_colour_ranges = { + "Fraser_tracer": { + "colorBarMinimum": 0.0, + "colorBarMaximum": 140.0, + "cmap": "turbid", + }, + "MESZ": {"colorBarMinimum": 0.0, "colorBarMaximum": 3.0, "cmap": "algae"}, + "MICZ": {"colorBarMinimum": 0.0, "colorBarMaximum": 4.0, "cmap": "algae"}, + "MYRI": {"colorBarMinimum": 0.0, "colorBarMaximum": 5.0, "cmap": "algae"}, + "NH4": {"colorBarMinimum": 0.0, "colorBarMaximum": 10.0, "cmap": "matter"}, + "NO3": {"colorBarMinimum": 0.0, "colorBarMaximum": 40.0, "cmap": "tempo"}, + "PON": {"colorBarMinimum": 0.0, "colorBarMaximum": 2.0, "cmap": "amp"}, + "DON": {"colorBarMinimum": 0.0, "colorBarMaximum": 20.0, "cmap": "amp"}, + "O2": {"colorBarMinimum": 0.0, "colorBarMaximum": 140.0, "cmap": "turbid"}, + "PHY": {"colorBarMinimum": 0.0, "colorBarMaximum": 6.0, "cmap": "algae"}, + "PHY2": {"colorBarMinimum": 0.0, "colorBarMaximum": 15.0, "cmap": "algae"}, + "Si": {"colorBarMinimum": 0.0, "colorBarMaximum": 70.0, "cmap": "turbid"}, + "bSi": {"colorBarMinimum": 0.0, "colorBarMaximum": 70.0, "cmap": "turbid"}, + "Fraser_tracer_int": { + "colorBarMinimum": 0.0, + "colorBarMaximum": 6500, + "cmap": "turbid", + }, + "MESZ_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 140, "cmap": "algae"}, + "MICZ_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 350, "cmap": "algae"}, + "MYRI_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 75, "cmap": "algae"}, + "NH4_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 1500, "cmap": "matter"}, + "NO3_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 24000, "cmap": "tempo"}, + "PON_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 600, "cmap": "amp"}, + "DON_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 2500, "cmap": "amp"}, + "O2_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 1000, "cmap": "turbid"}, + "PHY_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 100, "cmap": "algae"}, + "PHY2_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 350, "cmap": "algae"}, + "Si_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 40000, "cmap": "turbid"}, + "bSi_int": {"colorBarMinimum": 0.0, "colorBarMaximum": 40000, "cmap": "turbid"}, + } dp = var_namemap[varname] - vn = dp['varname'] - if (deep_bool == True): - vn = vn + '_int' + vn = dp["varname"] + if deep_bool == True: + vn = vn + "_int" dict_pull = var_colour_ranges[vn] - cbMIN = dict_pull['colorBarMinimum'] + cbMIN = dict_pull["colorBarMinimum"] print() - cbMAX = dict_pull['colorBarMaximum'] - cmap_name = dict_pull['cmap'] + cbMAX = dict_pull["colorBarMaximum"] + cmap_name = dict_pull["cmap"] return cbMIN, cbMAX, cmap_name diff --git a/SalishSeaTools/salishsea_tools/viz_tools.py b/SalishSeaTools/salishsea_tools/viz_tools.py index 381fabf6..916c7063 100644 --- a/SalishSeaTools/salishsea_tools/viz_tools.py +++ b/SalishSeaTools/salishsea_tools/viz_tools.py @@ -38,12 +38,12 @@ def calc_abs_max(array): def plot_coastline( axes, bathymetry, - coords='grid', + coords="grid", isobath=0, xslice=None, yslice=None, - color='black', - server='local', + color="black", + server="local", zorder=2, ): """Plot the coastline contour line from bathymetry on the axes. @@ -95,48 +95,65 @@ def plot_coastline( """ # Index names based on results server - if server == 'local': - lon_name = 'nav_lon' - lat_name = 'nav_lat' - bathy_name = 'Bathymetry' - elif server == 'ERDDAP': - lon_name = 'longitude' - lat_name = 'latitude' - bathy_name = 'bathymetry' + if server == "local": + lon_name = "nav_lon" + lat_name = "nav_lat" + bathy_name = "Bathymetry" + elif server == "ERDDAP": + lon_name = "longitude" + lat_name = "latitude" + bathy_name = "bathymetry" else: - raise ValueError('Unknown results server name: {}'.format(server)) - - if any(( - xslice is None and yslice is not None, - xslice is not None and yslice is None, - )): - raise ValueError('Both xslice and yslice must be specified') - if not hasattr(bathymetry, 'variables'): + raise ValueError("Unknown results server name: {}".format(server)) + + if any( + ( + xslice is None and yslice is not None, + xslice is not None and yslice is None, + ) + ): + raise ValueError("Both xslice and yslice must be specified") + if not hasattr(bathymetry, "variables"): bathy = nc.Dataset(bathymetry) else: bathy = bathymetry depths = bathy.variables[bathy_name] - if coords == 'map': + if coords == "map": lats = bathy.variables[lat_name] lons = bathy.variables[lon_name] if xslice is None and yslice is None: contour_lines = axes.contour( - np.array(lons), np.array(lats), np.array(depths), - [isobath], colors=color, zorder=zorder) + np.array(lons), + np.array(lats), + np.array(depths), + [isobath], + colors=color, + zorder=zorder, + ) else: contour_lines = axes.contour( - lons[yslice, xslice], lats[yslice, xslice], - depths[yslice, xslice].data, [isobath], colors=color, - zorder=zorder) + lons[yslice, xslice], + lats[yslice, xslice], + depths[yslice, xslice].data, + [isobath], + colors=color, + zorder=zorder, + ) else: if xslice is None and yslice is None: contour_lines = axes.contour( - np.array(depths), [isobath], colors=color, zorder=zorder) + np.array(depths), [isobath], colors=color, zorder=zorder + ) else: contour_lines = axes.contour( - xslice, yslice, depths[yslice, xslice].data, - [isobath], colors=color, zorder=zorder) - if not hasattr(bathymetry, 'variables'): + xslice, + yslice, + depths[yslice, xslice].data, + [isobath], + colors=color, + zorder=zorder, + ) + if not hasattr(bathymetry, "variables"): bathy.close() return contour_lines @@ -144,13 +161,13 @@ def plot_coastline( def plot_land_mask( axes, bathymetry, - coords='grid', + coords="grid", isobath=0, xslice=None, yslice=None, - color='black', - server='local', - zorder=1 + color="black", + server="local", + zorder=1, ): """Plot land areas from bathymetry as solid colour polygons on the axes. @@ -201,57 +218,72 @@ def plot_land_mask( """ # Index names based on results server - if server == 'local': - lon_name = 'nav_lon' - lat_name = 'nav_lat' - bathy_name = 'Bathymetry' - elif server == 'ERDDAP': - lon_name = 'longitude' - lat_name = 'latitude' - bathy_name = 'bathymetry' + if server == "local": + lon_name = "nav_lon" + lat_name = "nav_lat" + bathy_name = "Bathymetry" + elif server == "ERDDAP": + lon_name = "longitude" + lat_name = "latitude" + bathy_name = "bathymetry" else: - raise ValueError('Unknown results server name: {}'.format(server)) - - if any(( - xslice is None and yslice is not None, - xslice is not None and yslice is None, - )): - raise ValueError('Both xslice and yslice must be specified') - if not hasattr(bathymetry, 'variables'): + raise ValueError("Unknown results server name: {}".format(server)) + + if any( + ( + xslice is None and yslice is not None, + xslice is not None and yslice is None, + ) + ): + raise ValueError("Both xslice and yslice must be specified") + if not hasattr(bathymetry, "variables"): bathy = nc.Dataset(bathymetry) else: bathy = bathymetry depths = bathy.variables[bathy_name] contour_interval = [-0.01, isobath + 0.01] - if coords == 'map': + if coords == "map": lats = bathy.variables[lat_name] lons = bathy.variables[lon_name] if xslice is None and yslice is None: contour_fills = axes.contourf( - np.array(lons), np.array(lats), np.array(depths), - contour_interval, colors=color, zorder=zorder) + np.array(lons), + np.array(lats), + np.array(depths), + contour_interval, + colors=color, + zorder=zorder, + ) else: contour_fills = axes.contourf( - lons[yslice, xslice], lats[yslice, xslice], - depths[yslice, xslice].data, contour_interval, colors=color, - zorder=zorder) + lons[yslice, xslice], + lats[yslice, xslice], + depths[yslice, xslice].data, + contour_interval, + colors=color, + zorder=zorder, + ) else: if xslice is None and yslice is None: - contour_fills = axes.contourf(np.array(depths), - contour_interval, colors=color, - zorder=zorder) + contour_fills = axes.contourf( + np.array(depths), contour_interval, colors=color, zorder=zorder + ) else: contour_fills = axes.contourf( - xslice, yslice, depths[yslice, xslice].data, - contour_interval, colors=color, zorder=zorder) - if not hasattr(bathymetry, 'variables'): + xslice, + yslice, + depths[yslice, xslice].data, + contour_interval, + colors=color, + zorder=zorder, + ) + if not hasattr(bathymetry, "variables"): bathy.close() return contour_fills def plot_boundary( - ax, grid, mask, dim='depth', index=0, coords='grid', - color='burlywood', zorder=10 + ax, grid, mask, dim="depth", index=0, coords="grid", color="burlywood", zorder=10 ): """Plot the land boundary for a given NEMO domain slice. @@ -289,44 +321,52 @@ def plot_boundary( indexslice = index # Determine coordinate system and orientation - if dim == 'depth': - dimslice = 'z' + if dim == "depth": + dimslice = "z" indexslice = abs(depth.values - index).argmin() - if coords == 'map': + if coords == "map": dim1, dim2 = grid.nav_lon, grid.nav_lat - elif coords == 'grid': + elif coords == "grid": dim1, dim2 = grid.x, grid.y else: - raise ValueError('Unknown coordinate system: {}'.format(coords)) - elif dim == 'y': - if coords == 'map': + raise ValueError("Unknown coordinate system: {}".format(coords)) + elif dim == "y": + if coords == "map": dim1, dim2 = grid.nav_lon.isel(**{dim: index}), depth - elif coords == 'grid': + elif coords == "grid": dim1, dim2 = grid.x, depth else: - raise ValueError('Unknown coordinate system: {}'.format(coords)) - elif dim == 'x': - if coords == 'map': + raise ValueError("Unknown coordinate system: {}".format(coords)) + elif dim == "x": + if coords == "map": dim1, dim2 = grid.nav_lat.isel(**{dim: index}), depth - elif coords == 'grid': + elif coords == "grid": dim1, dim2 = grid.y, depth else: - raise ValueError('Unknown coordinate system: {}'.format(coords)) + raise ValueError("Unknown coordinate system: {}".format(coords)) else: - raise ValueError('Unknown dimension: {}'.format(dim)) + raise ValueError("Unknown dimension: {}".format(dim)) # Plot landmask and boundary contour patch = ax.contourf( - dim1, dim2, mask.tmask.isel(**{'t': 0, dimslice: indexslice}), - [-0.01, 0.01], colors=color, zorder=zorder + dim1, + dim2, + mask.tmask.isel(**{"t": 0, dimslice: indexslice}), + [-0.01, 0.01], + colors=color, + zorder=zorder, ) boundary = ax.contour( - dim1, dim2, mask.tmask.isel(**{'t': 0, dimslice: indexslice}), - [0], colors='k', zorder=zorder + dim1, + dim2, + mask.tmask.isel(**{"t": 0, dimslice: indexslice}), + [0], + colors="k", + zorder=zorder, ) # Invert depth axis - if dim == 'x' or dim == 'y': + if dim == "x" or dim == "y": ax.invert_yaxis() return patch, boundary @@ -334,10 +374,10 @@ def plot_boundary( def set_aspect( axes, - aspect=5/4.4, - coords='grid', + aspect=5 / 4.4, + coords="grid", lats=None, - adjustable='box', + adjustable="box", ): """Set the aspect ratio for the axes. @@ -378,7 +418,7 @@ def set_aspect( :kbd:`coords='map'`, and use the default :kbd:`lats=None`. """ - if coords == 'map' and lats is not None: + if coords == "map" and lats is not None: aspect = 1 / np.cos(np.median(lats) * np.pi / 180) axes.set_aspect(aspect, adjustable=adjustable) return aspect @@ -425,7 +465,7 @@ def unstagger_xarray(qty, index): return qty -def rotate_vel(u_in, v_in, origin='grid'): +def rotate_vel(u_in, v_in, origin="grid"): """Rotate u and v component values to either E-N or model grid. The origin argument sets the input coordinates ('grid' or 'map') @@ -445,13 +485,12 @@ def rotate_vel(u_in, v_in, origin='grid'): """ # Determine rotation direction - if origin == 'grid': - fac = 1 - elif origin == 'map': + if origin == "grid": + fac = 1 + elif origin == "map": fac = -1 else: - raise ValueError('Invalid origin value: {origin}'.format( - origin=origin)) + raise ValueError("Invalid origin value: {origin}".format(origin=origin)) # Rotate velocities theta_rad = 29 * np.pi / 180 @@ -588,7 +627,10 @@ def rotate_vel_bybearing(u_in, v_in, coords, origin="grid"): # A is the angle counterclockwise from due east in radians A = np.empty_like(longitude) - A[:, 0:-1] = np.arctan2(np.cos(yA) * np.sin(yB) - np.sin(yA) * np.cos(yB) * np.cos(xB-xA), np.sin(xB-xA) * np.cos(yB)) + A[:, 0:-1] = np.arctan2( + np.cos(yA) * np.sin(yB) - np.sin(yA) * np.cos(yB) * np.cos(xB - xA), + np.sin(xB - xA) * np.cos(yB), + ) A[:, -1] = A[:, -2] # Rotate velocities diff --git a/SalishSeaTools/salishsea_tools/wind_tools.py b/SalishSeaTools/salishsea_tools/wind_tools.py index b9b6e873..33d082a1 100644 --- a/SalishSeaTools/salishsea_tools/wind_tools.py +++ b/SalishSeaTools/salishsea_tools/wind_tools.py @@ -42,10 +42,14 @@ __all__ = [ - 'calc_wind_avg_at_point', - 'M_PER_S__KM_PER_HR', 'M_PER_S__KNOTS', 'mps_kph', 'mps_knots', - 'wind_to_from', 'bearing_heading', - 'wind_speed_dir', + "calc_wind_avg_at_point", + "M_PER_S__KM_PER_HR", + "M_PER_S__KNOTS", + "mps_kph", + "mps_knots", + "wind_to_from", + "bearing_heading", + "wind_speed_dir", ] @@ -79,7 +83,9 @@ def get_EC_observations(station, start_day, end_day): # Call get_EC_observations from stormtools wind_spd, wind_dir, temp, times, lat, lon = stormtools.get_EC_observations( - station, start_day, end_day, + station, + start_day, + end_day, ) return wind_spd, wind_dir, temp, times, lat, lon @@ -99,17 +105,17 @@ def parse_DFO_buoy_date(line): # -- thus the multiple cases if (int(line[3]) > 2020) & (int(line[3]) < 202020): year, month, day = int(line[3][:4]), int(line[3][4:]), int(line[4]) - HHMM = f'{int(line[5]):04d}' + HHMM = f"{int(line[5]):04d}" elif int(line[3]) > 202020: year, month, day = int(line[3][:4]), int(line[3][4:6]), int(line[3][6:]) - HHMM = f'{int(line[4]):04d}' + HHMM = f"{int(line[4]):04d}" elif int(line[4]) > 12: year = int(line[3]) - MMDD, HHMM = [f'{int(l):04d}' for l in line[4:6]] + MMDD, HHMM = [f"{int(l):04d}" for l in line[4:6]] month, day = int(MMDD[:2]), int(MMDD[2:]) else: year = int(line[3]) - HHMM = f'{int(line[6]):04d}' + HHMM = f"{int(line[6]):04d}" month, day = int(line[4]), int(line[5]) hour, minute = int(HHMM[:2]), int(HHMM[2:]) date = datetime(year, month, day, hour, minute, 0) @@ -131,16 +137,16 @@ def read_DFO_buoy(station, year): # Station ID dict station_ids = { - 'Halibut Bank': 46146, - 'Sentry Shoal': 46131, + "Halibut Bank": 46146, + "Sentry Shoal": 46131, } # Data url - url = 'https://www.meds-sdmm.dfo-mpo.gc.ca/alphapro/wave/waveshare/fbyears' + url = "https://www.meds-sdmm.dfo-mpo.gc.ca/alphapro/wave/waveshare/fbyears" # Open the *.zip file from url using Pandas - ID = f'C{station_ids[station]}' - file = os.path.join(url, ID, f'{ID.lower()}_{year}.zip') + ID = f"C{station_ids[station]}" + file = os.path.join(url, ID, f"{ID.lower()}_{year}.zip") csv = pd.read_csv(file, header=None) # Initialize parsing booleans @@ -170,8 +176,8 @@ def read_DFO_buoy(station, year): # Read wind data elif gotwind == False: gotwind = True - wdir.append(float(line_parsed[0].split('W')[0])) - wspd.append(float(line_parsed[1].split('W')[0])) + wdir.append(float(line_parsed[0].split("W")[0])) + wspd.append(float(line_parsed[1].split("W")[0])) # Transform angle to deg CCW from east wdir = 270 - np.array(wdir) @@ -200,7 +206,7 @@ def wind_speed_dir(u_wind, v_wind): speed = np.sqrt(u_wind**2 + v_wind**2) dir = np.arctan2(v_wind, u_wind) dir = np.rad2deg(dir + (dir < 0) * 2 * np.pi) - speed_dir = namedtuple('speed_dir', 'speed, dir') + speed_dir = namedtuple("speed_dir", "speed, dir") return speed_dir(speed, dir) @@ -235,29 +241,29 @@ def calc_wind_avg_at_point(date_time, weather_path, windji, avg_hrs=-4): :raises: :py:exc:`IndexError` if :kbd:`avg_hrs` is outside the range :kbd:`[-24, 0]`. """ - weather_filename_tmpl = 'hrdps_y{0.year:4d}m{0.month:02d}d{0.day:02d}.nc' + weather_filename_tmpl = "hrdps_y{0.year:4d}m{0.month:02d}d{0.day:02d}.nc" try: - weather_file = Path( - weather_path, weather_filename_tmpl.format(date_time)) + weather_file = Path(weather_path, weather_filename_tmpl.format(date_time)) grid_weather = nc_tools.dataset_from_path(weather_file) except IOError: weather_file = Path( - weather_path, 'fcst', weather_filename_tmpl.format(date_time)) + weather_path, "fcst", weather_filename_tmpl.format(date_time) + ) grid_weather = nc_tools.dataset_from_path(weather_file) - wind_u, wind_v, wind_t = nc_tools.uv_wind_timeseries_at_point( - grid_weather, *windji) + wind_u, wind_v, wind_t = nc_tools.uv_wind_timeseries_at_point(grid_weather, *windji) if date_time.hour < abs(avg_hrs): grid_weather = nc_tools.dataset_from_path( weather_file.with_name( - weather_filename_tmpl.format(date_time.shift(days=-1)))) - wind_prev_day = nc_tools.uv_wind_timeseries_at_point( - grid_weather, *windji) + weather_filename_tmpl.format(date_time.shift(days=-1)) + ) + ) + wind_prev_day = nc_tools.uv_wind_timeseries_at_point(grid_weather, *windji) wind_u = np.concatenate((wind_prev_day.u, wind_u)) wind_v = np.concatenate((wind_prev_day.v, wind_v)) wind_t = np.concatenate((wind_prev_day.time, wind_t)) - i_date_time = np.where(wind_t == date_time.floor('hour'))[0].item() + i_date_time = np.where(wind_t == date_time.floor("hour"))[0].item() i_date_time_p1 = i_date_time + 1 - u_avg = np.mean(wind_u[(i_date_time_p1 + avg_hrs):i_date_time_p1]) - v_avg = np.mean(wind_v[(i_date_time_p1 + avg_hrs):i_date_time_p1]) - wind_avg = namedtuple('wind_avg', 'u, v') + u_avg = np.mean(wind_u[(i_date_time_p1 + avg_hrs) : i_date_time_p1]) + v_avg = np.mean(wind_v[(i_date_time_p1 + avg_hrs) : i_date_time_p1]) + wind_avg = namedtuple("wind_avg", "u, v") return wind_avg(u_avg, v_avg) diff --git a/SalishSeaTools/setup.py b/SalishSeaTools/setup.py index ed92853a..47617456 100644 --- a/SalishSeaTools/setup.py +++ b/SalishSeaTools/setup.py @@ -21,46 +21,46 @@ python_classifiers = [ - 'Programming Language :: Python :: {0}'.format(py_version) - for py_version in ['3.8', '3.9'] + "Programming Language :: Python :: {0}".format(py_version) + for py_version in ["3.8", "3.9"] ] other_classifiers = [ - 'Development Status :: ' + __pkg_metadata__.DEV_STATUS, - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: Implementation :: CPython', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX :: Linux', - 'Operating System :: Unix', - 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'Intended Audience :: Education', - 'Intended Audience :: Developers', + "Development Status :: " + __pkg_metadata__.DEV_STATUS, + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: Implementation :: CPython", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Operating System :: Unix", + "Environment :: Console", + "Intended Audience :: Science/Research", + "Intended Audience :: Education", + "Intended Audience :: Developers", ] try: - long_description = open('README.rst', 'rt').read() + long_description = open("README.rst", "rt").read() except IOError: - long_description = '' + long_description = "" install_requires = [ # see envs/requirements.txt for versions most recently used in development - 'angles', - 'arrow>=1.0.0', - 'bottleneck', - 'cmocean', - 'f90nml', - 'gsw', - 'matplotlib', - 'netCDF4', - 'numpy', - 'openpyxl', - 'pandas', - 'python-dateutil', - 'pytz', - 'requests', - 'retrying', - 'scipy', - 'tqdm', - 'xarray', + "angles", + "arrow>=1.0.0", + "bottleneck", + "cmocean", + "f90nml", + "gsw", + "matplotlib", + "netCDF4", + "numpy", + "openpyxl", + "pandas", + "python-dateutil", + "pytz", + "requests", + "retrying", + "scipy", + "tqdm", + "xarray", ] setup( @@ -68,14 +68,12 @@ version=__pkg_metadata__.VERSION, description=__pkg_metadata__.DESCRIPTION, long_description=long_description, - author='Doug Latornell and the Salish Sea MEOPAR Project Contributors', - author_email='djl@douglatornell.ca', - url=( - 'https://salishsea-meopar-tools.readthedocs.org/en/latest/' - 'SalishSeaTools/'), - license='Apache License, Version 2.0', + author="Doug Latornell and the Salish Sea MEOPAR Project Contributors", + author_email="djl@douglatornell.ca", + url=("https://salishsea-meopar-tools.readthedocs.org/en/latest/" "SalishSeaTools/"), + license="Apache License, Version 2.0", classifiers=python_classifiers + other_classifiers, - platforms=['MacOS X', 'Linux'], + platforms=["MacOS X", "Linux"], install_requires=install_requires, - packages=['salishsea_tools'], + packages=["salishsea_tools"], ) diff --git a/SalishSeaTools/tests/conftest.py b/SalishSeaTools/tests/conftest.py index 08eb8802..b86916f7 100644 --- a/SalishSeaTools/tests/conftest.py +++ b/SalishSeaTools/tests/conftest.py @@ -27,7 +27,7 @@ def nc_dataset(): Remove the created file as a clean-up operation. """ - dataset = nc.Dataset('foo', 'w') + dataset = nc.Dataset("foo", "w") yield dataset dataset.close() - os.remove('foo') + os.remove("foo") diff --git a/SalishSeaTools/tests/test_bathy_tools.py b/SalishSeaTools/tests/test_bathy_tools.py index c80e4a46..505d8cdd 100644 --- a/SalishSeaTools/tests/test_bathy_tools.py +++ b/SalishSeaTools/tests/test_bathy_tools.py @@ -1,6 +1,8 @@ """Unit tests for bathy_tools. """ + from __future__ import division + """ Copyright 2013-2021 The Salish Sea MEOPAR contributors and The University of British Columbia @@ -26,43 +28,40 @@ @pytest.fixture def depths(request): - bathy = nc.Dataset('foo', 'w') - bathy.createDimension('x', 3) - bathy.createDimension('y', 5) - depths = bathy.createVariable('Bathymetry', float, ('y', 'x')) + bathy = nc.Dataset("foo", "w") + bathy.createDimension("x", 3) + bathy.createDimension("y", 5) + depths = bathy.createVariable("Bathymetry", float, ("y", "x")) def teardown(): bathy.close() - os.remove('foo') + os.remove("foo") + request.addfinalizer(teardown) return depths def test_smooth_neighbours_d1_lt_d2(): - """smooth_neighbours returns expected value for depth1 < depth2 - """ + """smooth_neighbours returns expected value for depth1 < depth2""" d1, d2 = bathy_tools.smooth_neighbours(0.2, 1, 2) assert d1, d2 == (1.3, 1.7) def test_smooth_neighbours_d1_gt_d2(): - """smooth_neighbours returns expected value for depth1 > depth2 - """ + """smooth_neighbours returns expected value for depth1 > depth2""" d1, d2 = bathy_tools.smooth_neighbours(0.2, 2, 1) assert d1, d2 == (1.7, 1.3) def test_calc_norm_depth_diffs_degenerate(depths): - """calc_norm_depth_diffs returns zeros for delta_lat=delta_lon=0 - """ + """calc_norm_depth_diffs returns zeros for delta_lat=delta_lon=0""" depths = np.ones((5, 3)) diffs = bathy_tools.calc_norm_depth_diffs(depths, 0, 0) np.testing.assert_array_equal(diffs, np.zeros_like(depths)) def test_calc_norm_depth_diffs_1_lat_step(depths): - """calc_norm_depth_diffs returns expected diffs for delta_lat=1 - """ + """calc_norm_depth_diffs returns expected diffs for delta_lat=1""" depths = np.ones((5, 3)) depths[1, 1] = 2 diffs = bathy_tools.calc_norm_depth_diffs(depths, 1, 0) @@ -72,8 +71,7 @@ def test_calc_norm_depth_diffs_1_lat_step(depths): def test_calc_norm_depth_diffs_1_lon_step(depths): - """calc_norm_depth_diffs returns expected diffs for delta_lon=1 - """ + """calc_norm_depth_diffs returns expected diffs for delta_lon=1""" depths = np.ones((5, 3)) depths[1, 1] = 2 diffs = bathy_tools.calc_norm_depth_diffs(depths, 0, 1) @@ -83,8 +81,7 @@ def test_calc_norm_depth_diffs_1_lon_step(depths): def test_argmax_single_max(depths): - """argmax return expected indices for single max value - """ + """argmax return expected indices for single max value""" depths = np.zeros((5, 3)) depths[1, 2] = 42 result = bathy_tools.argmax(depths) @@ -92,8 +89,7 @@ def test_argmax_single_max(depths): def test_argmax_2_max(depths): - """argmax return expected indices for single max value - """ + """argmax return expected indices for single max value""" depths = np.zeros((5, 3)) depths[1, 2] = 42 depths[2, 1] = 42 @@ -102,8 +98,7 @@ def test_argmax_2_max(depths): def test_argmax_all_equal(depths): - """argmax return expected indices for single max value - """ + """argmax return expected indices for single max value""" depths = np.ones((5, 3)) result = bathy_tools.argmax(depths) assert result == (0, 0) diff --git a/SalishSeaTools/tests/test_data_tools.py b/SalishSeaTools/tests/test_data_tools.py index b006b953..80c017e4 100644 --- a/SalishSeaTools/tests/test_data_tools.py +++ b/SalishSeaTools/tests/test_data_tools.py @@ -64,7 +64,7 @@ class TestOncJsonToDataset: def test_onc_json_to_dataset_teos_10_salinity(self): onc_json = json.loads( - '''\ + """\ { "citations": [ "Ocean Networks Canada Society. 2023. Strait of Georgia East Conductivity Temperature Depth Deployed 2023-03-17. Ocean Networks Canada Society. https://doi.org/10.34943/9e6cf493-892f-4da0-9eb4-16254e7da48c." @@ -114,8 +114,8 @@ def test_onc_json_to_dataset_teos_10_salinity(self): "unitOfMeasure": "C" } ] - } - ''' + } + """ ) ds = data_tools.onc_json_to_dataset(onc_json) assert ds.attrs["station"] == "SEVIP" @@ -124,15 +124,18 @@ def test_onc_json_to_dataset_teos_10_salinity(self): assert ds.salinity.name == "salinity" expected = [teos_tools.psu_teos(d) for d in [30.9339, 30.9338]] numpy.testing.assert_array_equal(ds.salinity.data, expected) - expected = numpy.array([ - arrow.get(t).naive for t in [ - "2023-12-12T00:00:01.013Z", - "2023-12-12T00:00:02.006Z" - ] - ], dtype='datetime64[ns]') + expected = numpy.array( + [ + arrow.get(t).naive + for t in ["2023-12-12T00:00:01.013Z", "2023-12-12T00:00:02.006Z"] + ], + dtype="datetime64[ns]", + ) numpy.testing.assert_array_equal(ds.salinity.coords["sampleTime"], expected) assert ds.salinity.dims == ("sampleTime",) - numpy.testing.assert_array_equal(ds.salinity.attrs["qaqcFlag"], numpy.array([1, 1])) + numpy.testing.assert_array_equal( + ds.salinity.attrs["qaqcFlag"], numpy.array([1, 1]) + ) assert ds.salinity.attrs["sensorName"] == "Reference Salinity" assert ds.salinity.attrs["unitOfMeasure"] == "g/kg" assert ds.salinity.attrs["actualSamples"] == 2 @@ -140,22 +143,25 @@ def test_onc_json_to_dataset_teos_10_salinity(self): assert "temperature" in ds.data_vars assert ds.temperature.name == "temperature" numpy.testing.assert_array_equal(ds.temperature.data, [9.5185, 9.5185]) - expected = numpy.array([ - arrow.get(t).naive for t in [ - "2023-12-12T00:00:01.013Z", - "2023-12-12T00:00:02.006Z" - ] - ], dtype='datetime64[ns]') + expected = numpy.array( + [ + arrow.get(t).naive + for t in ["2023-12-12T00:00:01.013Z", "2023-12-12T00:00:02.006Z"] + ], + dtype="datetime64[ns]", + ) numpy.testing.assert_array_equal(ds.temperature.coords["sampleTime"], expected) assert ds.temperature.dims == ("sampleTime",) - numpy.testing.assert_array_equal(ds.temperature.attrs["qaqcFlag"], numpy.array([1, 1])) + numpy.testing.assert_array_equal( + ds.temperature.attrs["qaqcFlag"], numpy.array([1, 1]) + ) assert ds.temperature.attrs["sensorName"] == "Temperature" assert ds.temperature.attrs["unitOfMeasure"] == "C" assert ds.temperature.attrs["actualSamples"] == 2 def test_onc_json_to_dataset_psu_salinity(self): onc_json = json.loads( - '''\ + """\ { "citations": [ "Ocean Networks Canada Society. 2023. Strait of Georgia East Conductivity Temperature Depth Deployed 2023-03-17. Ocean Networks Canada Society. https://doi.org/10.34943/9e6cf493-892f-4da0-9eb4-16254e7da48c." @@ -189,8 +195,8 @@ def test_onc_json_to_dataset_psu_salinity(self): "unitOfMeasure": "psu" } ] - } - ''' + } + """ ) ds = data_tools.onc_json_to_dataset(onc_json, teos=False) assert ds.attrs["station"] == "SEVIP" @@ -198,15 +204,18 @@ def test_onc_json_to_dataset_psu_salinity(self): assert "salinity" in ds.data_vars assert ds.salinity.name == "salinity" numpy.testing.assert_array_equal(ds.salinity.data, [30.9339, 30.9338]) - expected = numpy.array([ - arrow.get(t).naive for t in [ - "2023-12-12T00:00:01.013Z", - "2023-12-12T00:00:02.006Z" - ] - ], dtype='datetime64[ns]') + expected = numpy.array( + [ + arrow.get(t).naive + for t in ["2023-12-12T00:00:01.013Z", "2023-12-12T00:00:02.006Z"] + ], + dtype="datetime64[ns]", + ) numpy.testing.assert_array_equal(ds.salinity.coords["sampleTime"], expected) assert ds.salinity.dims == ("sampleTime",) - numpy.testing.assert_array_equal(ds.salinity.attrs["qaqcFlag"], numpy.array([1, 1])) + numpy.testing.assert_array_equal( + ds.salinity.attrs["qaqcFlag"], numpy.array([1, 1]) + ) assert ds.salinity.attrs["sensorName"] == "Practical Salinity" assert ds.salinity.attrs["unitOfMeasure"] == "psu" assert ds.salinity.attrs["actualSamples"] == 2 diff --git a/SalishSeaTools/tests/test_hg_commands.py b/SalishSeaTools/tests/test_hg_commands.py index d4353a9b..6e726007 100644 --- a/SalishSeaTools/tests/test_hg_commands.py +++ b/SalishSeaTools/tests/test_hg_commands.py @@ -1,5 +1,6 @@ """Unit tests for hg_commands module. """ + """ Copyright 2013-2021 The Salish Sea MEOPAR contributors and The University of British Columbia @@ -24,72 +25,81 @@ from salishsea_tools import hg_commands as hg -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_default_url(mock_chk_out): - """default_url returns expected result - """ - mock_chk_out.return_value = 'foo' + """default_url returns expected result""" + mock_chk_out.return_value = "foo" url = hg.default_url() - assert url == 'foo' + assert url == "foo" -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_default_url_with_repo(mock_chk_out): - """default_url uses expected command when repo arg is provided - """ - mock_chk_out.return_value = 'foo' - hg.default_url('bar') + """default_url uses expected command when repo arg is provided""" + mock_chk_out.return_value = "foo" + hg.default_url("bar") mock_chk_out.assert_called_once_with( - 'hg -R bar paths default'.split(), universal_newlines=True) + "hg -R bar paths default".split(), universal_newlines=True + ) -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_default_url_no_repo(mock_chk_out): - """default_url returns None when called on non-top level hg repo dir - """ - mock_chk_out.side_effect = subprocess.CalledProcessError(1, 'cmd') - url = hg.default_url('bar/baz') + """default_url returns None when called on non-top level hg repo dir""" + mock_chk_out.side_effect = subprocess.CalledProcessError(1, "cmd") + url = hg.default_url("bar/baz") assert url is None -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_heads_tip_rev(mock_chk_out): - """heads uses expected command with default revs list - """ - hg.heads('foo') + """heads uses expected command with default revs list""" + hg.heads("foo") mock_chk_out.assert_called_once_with( - 'hg -R foo heads .'.split(), universal_newlines=True) + "hg -R foo heads .".split(), universal_newlines=True + ) -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_heads_multiple_revs(mock_chk_out): - """heads uses expected command with multiple revs - """ - hg.heads('foo', revs=['bar', 'baz']) + """heads uses expected command with multiple revs""" + hg.heads("foo", revs=["bar", "baz"]) mock_chk_out.assert_called_once_with( - 'hg -R foo heads bar baz'.split(), universal_newlines=True) + "hg -R foo heads bar baz".split(), universal_newlines=True + ) @pytest.mark.parametrize( - 'kwargs, expected', + "kwargs, expected", [ - ({'repo': None, 'rev': None, 'file': None, 'verbose': False}, - 'hg parents'.split()), - ({'repo': 'foo', 'rev': None, 'file': None, 'verbose': False}, - 'hg parents -R foo'.split()), - ({'repo': None, 'rev': 42, 'file': None, 'verbose': False}, - ['hg', 'parents', '-r', 42]), - ({'repo': None, 'rev': 'd56ed390617c', 'file': None, 'verbose': False}, - 'hg parents -r d56ed390617c'.split()), - ({'repo': None, 'rev': None, 'file': 'foo', 'verbose': False}, - 'hg parents foo'.split()), - ({'repo': None, 'rev': None, 'file': None, 'verbose': True}, - 'hg parents -v'.split()), - ] + ( + {"repo": None, "rev": None, "file": None, "verbose": False}, + "hg parents".split(), + ), + ( + {"repo": "foo", "rev": None, "file": None, "verbose": False}, + "hg parents -R foo".split(), + ), + ( + {"repo": None, "rev": 42, "file": None, "verbose": False}, + ["hg", "parents", "-r", 42], + ), + ( + {"repo": None, "rev": "d56ed390617c", "file": None, "verbose": False}, + "hg parents -r d56ed390617c".split(), + ), + ( + {"repo": None, "rev": None, "file": "foo", "verbose": False}, + "hg parents foo".split(), + ), + ( + {"repo": None, "rev": None, "file": None, "verbose": True}, + "hg parents -v".split(), + ), + ], ) -@patch('salishsea_tools.hg_commands.subprocess.check_output') +@patch("salishsea_tools.hg_commands.subprocess.check_output") def test_parents_default_args(mock_chk_out, kwargs, expected): - """parents uses expected command with default args - """ + """parents uses expected command with default args""" hg.parents(**kwargs) mock_chk_out.assert_called_once_with(expected, universal_newlines=True) diff --git a/SalishSeaTools/tests/test_namelist.py b/SalishSeaTools/tests/test_namelist.py index 2c33e544..ebc4a796 100644 --- a/SalishSeaTools/tests/test_namelist.py +++ b/SalishSeaTools/tests/test_namelist.py @@ -9,6 +9,7 @@ GNU Lesser General Public License, Version 3 (https://www.gnu.org/copyleft/lesser.html) """ + import unittest from io import StringIO @@ -19,6 +20,7 @@ class NameListTestCase(unittest.TestCase): """ Some very basic test cases. """ + def test_simple_group(self): """ Test simple namelist group with values of different types. @@ -30,68 +32,81 @@ def test_simple_group(self): " string = 'test'\n" " true = .TRUE.\n" " false = .FALSE.\n" - "/") + "/" + ) namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "float": 0.75, - "integer": 700, - "string": "test", - "true": True, - "false": False - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "float": 0.75, + "integer": 700, + "string": "test", + "true": True, + "false": False, + } + ] + }, + ) def test_double_quote_string(self): """ Test simple namelist group with string value enclosed in double quotes. """ - group = ( - "&group\n" - ' string = "test"\n' - "/") + group = "&group\n" ' string = "test"\n' "/" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "string": "test", - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "string": "test", + } + ] + }, + ) def test_empty_string(self): """ Test simple namelist group with empty string value. """ - group = ( - "&group\n" - ' string1 = ""\n' - " string2 = ''\n" - "/") + group = "&group\n" ' string1 = ""\n' " string2 = ''\n" "/" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "string1": "", - "string2": "", - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "string1": "", + "string2": "", + } + ] + }, + ) def test_group_ends_w_amp_end(self): """ Test simple namelist group with &end as end token. """ - group = ( - "&group\n" - " float = 0.75\n" - "&end") + group = "&group\n" " float = 0.75\n" "&end" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "float": 0.75, - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "float": 0.75, + } + ] + }, + ) def test_ignore_empty_group(self): """ - Ignore empty namelist group. + Ignore empty namelist group. """ - group = ( - "&group\n" - "&end") + group = "&group\n" "&end" namelist_dict = namelist2dict(StringIO(group)) self.assertEqual(namelist_dict, {}) @@ -99,30 +114,26 @@ def test_heterogeneous_list(self): """ Test list of heterogeneous values. """ - group = ( - "&group\n" - " foo = 0.75, 700, 'test', .TRUE.\n" - "/") + group = "&group\n" " foo = 0.75, 700, 'test', .TRUE.\n" "/" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "foo": [0.75, 700, "test", True] - }]}) + self.assertEqual(namelist_dict, {"group": [{"foo": [0.75, 700, "test", True]}]}) def test_array_element_assignment(self): """ Test simple namelist group with assignment to array element. """ - group = ( - "&group\n" - " float(1) = 0.75\n" - " float(2) = 0.85\n" - "&end") + group = "&group\n" " float(1) = 0.75\n" " float(2) = 0.85\n" "&end" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "float": [0.75, 0.85], - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "float": [0.75, 0.85], + } + ] + }, + ) def test_same_name_groups_append_to_group_list(self): """ @@ -134,17 +145,22 @@ def test_same_name_groups_append_to_group_list(self): "&end\n" "&group\n" " float = 0.85\n" - "&end\n") + "&end\n" + ) namelist_dict = namelist2dict(StringIO(groups)) - self.assertEqual(namelist_dict, - {"group": [ - { - "float": 0.75, - }, - { - "float": 0.85, - }, - ]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "float": 0.75, + }, + { + "float": 0.85, + }, + ] + }, + ) def test_complex_single_line_group(self): """ @@ -152,27 +168,19 @@ def test_complex_single_line_group(self): """ group = "&list a=1, b=1,2 c='12 / !' / " namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"list": [{ - "a": 1, - "b": [1, 2], - "c": "12 / !" - }]}) + self.assertEqual( + namelist_dict, {"list": [{"a": 1, "b": [1, 2], "c": "12 / !"}]} + ) def test_complex_multiple_group(self): """ Same as test_complex_single_line_group() just split over lines. """ - group = ( - "&list a=1\n" - "b=1,2, c='12 / !' /") + group = "&list a=1\n" "b=1,2, c='12 / !' /" namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"list": [{ - "a": 1, - "b": [1, 2], - "c": "12 / !" - }]}) + self.assertEqual( + namelist_dict, {"list": [{"a": 1, "b": [1, 2], "c": "12 / !"}]} + ) def test_complex_numbers(self): """ @@ -184,16 +192,23 @@ def test_complex_numbers(self): " number_b = (1.2,3.4)\n" " number_c = (-1.2,0.0)\n" " number_d = (0.0, 1.0)\n" - "/") + "/" + ) namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"complex_group": [{ - "number_a": 1.0 + 2.0j, - "number_b": 1.2 + 3.4j, - "number_c": -1.2 + 0.0j, - "number_d": 0.0j + 1.0j - }]}) + self.assertEqual( + namelist_dict, + { + "complex_group": [ + { + "number_a": 1.0 + 2.0j, + "number_b": 1.2 + 3.4j, + "number_c": -1.2 + 0.0j, + "number_d": 0.0j + 1.0j, + } + ] + }, + ) def test_group_mixed_and_lists(self): """ @@ -209,19 +224,26 @@ def test_group_mixed_and_lists(self): " attributes = 'vx' 'vy' 'vz'\n" " file_name_prefix = './DATA/mess/'\n" " override = .TRUE.\n" - "/\n") + "/\n" + ) namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"receiver": [{ - "station": "XX02", - "location": "a", - "lon": 12.51, - "lat": -0.01, - "depth": 1.0, - "attributes": ["vx", "vy", "vz"], - "file_name_prefix": "./DATA/mess/", - "override": True - }]}) + self.assertEqual( + namelist_dict, + { + "receiver": [ + { + "station": "XX02", + "location": "a", + "lon": 12.51, + "lat": -0.01, + "depth": 1.0, + "attributes": ["vx", "vy", "vz"], + "file_name_prefix": "./DATA/mess/", + "override": True, + } + ] + }, + ) def test_multiple_groups(self): """ @@ -248,36 +270,40 @@ def test_multiple_groups(self): " attributes = 'vx' 'vy' 'vz'\n" " file_name_prefix = './DATA/mess/'\n" " override = .TRUE.\n" - "/\n") + "/\n" + ) namelist_dict = namelist2dict(StringIO(group)) - self.assertEqual(namelist_dict, - {"group": [{ - "float": 0.75, - "integer": 700, - "string": "test", - "true": True, - "false": False - }], - "list": [{ - "a": 1, - "b": [1, 2], - "c": "12 / !" - }, { - "a": 1, - "b": [1, 2], - "c": "12 / !" - }], - "receiver": [{ - "station": "XX02", - "location": "a", - "lon": 12.51, - "lat": -0.01, - "depth": 1.0, - "attributes": ["vx", "vy", "vz"], - "file_name_prefix": "./DATA/mess/", - "override": True - }]}) + self.assertEqual( + namelist_dict, + { + "group": [ + { + "float": 0.75, + "integer": 700, + "string": "test", + "true": True, + "false": False, + } + ], + "list": [ + {"a": 1, "b": [1, 2], "c": "12 / !"}, + {"a": 1, "b": [1, 2], "c": "12 / !"}, + ], + "receiver": [ + { + "station": "XX02", + "location": "a", + "lon": 12.51, + "lat": -0.01, + "depth": 1.0, + "attributes": ["vx", "vy", "vz"], + "file_name_prefix": "./DATA/mess/", + "override": True, + } + ], + }, + ) def test_real_world_example(self): """ @@ -310,22 +336,30 @@ def test_real_world_example(self): "&XXDATA \n" " XXREAL = 1., \n" " XXINTEGER = 2, \n" - " XXCOMPLEX = (3.,4.)/") + " XXCOMPLEX = (3.,4.)/" + ) namelist_dict = namelist2dict(StringIO(groups)) - self.assertEqual(namelist_dict, { - "TTDATA": [{ - "TTREAL": 1.0, - "TTINTEGER": 2, - "TTCOMPLEX": 3.0 + 4.0j, - "TTCHAR": "namelist", - "TTBOOL": True}], - "AADATA": [{ - "AAREAL": [1.0, 1.0, 2.0, 3.0], - "AAINTEGER": [2, 2, 3, 4], - "AACOMPLEX": [3.0 + 4.0j, 3.0 + 4.0j, 5.0 + 6.0j, 7.0 + 7.0j], - "AACHAR": ["namelist", "namelist", "array", " the lot"], - "AABOOL": [True, True, False, False]}], - "XXDATA": [{ - "XXREAL": 1.0, - "XXINTEGER": 2, - "XXCOMPLEX": 3.0 + 4.0j}]}) + self.assertEqual( + namelist_dict, + { + "TTDATA": [ + { + "TTREAL": 1.0, + "TTINTEGER": 2, + "TTCOMPLEX": 3.0 + 4.0j, + "TTCHAR": "namelist", + "TTBOOL": True, + } + ], + "AADATA": [ + { + "AAREAL": [1.0, 1.0, 2.0, 3.0], + "AAINTEGER": [2, 2, 3, 4], + "AACOMPLEX": [3.0 + 4.0j, 3.0 + 4.0j, 5.0 + 6.0j, 7.0 + 7.0j], + "AACHAR": ["namelist", "namelist", "array", " the lot"], + "AABOOL": [True, True, False, False], + } + ], + "XXDATA": [{"XXREAL": 1.0, "XXINTEGER": 2, "XXCOMPLEX": 3.0 + 4.0j}], + }, + ) diff --git a/SalishSeaTools/tests/test_nc_tools.py b/SalishSeaTools/tests/test_nc_tools.py index e68ea02b..cfd2aded 100644 --- a/SalishSeaTools/tests/test_nc_tools.py +++ b/SalishSeaTools/tests/test_nc_tools.py @@ -29,82 +29,78 @@ from salishsea_tools import nc_tools -@pytest.mark.parametrize('path, args, kwargs, expected', [ - ('foo/bar.nc', [], {}, 'foo/bar.nc'), - ('foo/bar.nc', ['w'], {}, 'foo/bar.nc'), - ('foo/bar.nc', ['w'], {'format': 'NETCDF4_CLASSIC'}, 'foo/bar.nc'), - (Path('foo/bar.nc'), [], {}, 'foo/bar.nc'), - (Path('foo/bar.nc'), ['w'], {}, 'foo/bar.nc'), - (Path('foo/bar.nc'), ['w'], {'format': 'NETCDF4_CLASSIC'}, 'foo/bar.nc'), -]) +@pytest.mark.parametrize( + "path, args, kwargs, expected", + [ + ("foo/bar.nc", [], {}, "foo/bar.nc"), + ("foo/bar.nc", ["w"], {}, "foo/bar.nc"), + ("foo/bar.nc", ["w"], {"format": "NETCDF4_CLASSIC"}, "foo/bar.nc"), + (Path("foo/bar.nc"), [], {}, "foo/bar.nc"), + (Path("foo/bar.nc"), ["w"], {}, "foo/bar.nc"), + (Path("foo/bar.nc"), ["w"], {"format": "NETCDF4_CLASSIC"}, "foo/bar.nc"), + ], +) def test_dataset_from_path(path, args, kwargs, expected): - """dataset_from_path calls netCDF4.Dataset w/ path as str, args & kwargs - """ - with patch.object(nc_tools.nc, 'Dataset') as m_Dataset: + """dataset_from_path calls netCDF4.Dataset w/ path as str, args & kwargs""" + with patch.object(nc_tools.nc, "Dataset") as m_Dataset: dataset = nc_tools.dataset_from_path(path, *args, **kwargs) assert dataset == m_Dataset(expected, *args, **kwargs) def test_show_dataset_attrs_file_format(capsys, nc_dataset): - """show_dataset_attrs prints file_format attr - """ + """show_dataset_attrs prints file_format attr""" nc_tools.show_dataset_attrs(nc_dataset) out, err = capsys.readouterr() - assert out.splitlines()[0] == 'file format: NETCDF4' + assert out.splitlines()[0] == "file format: NETCDF4" def test_show_dataset_attrs_1_attr(capsys, nc_dataset): - """show_dataset_attrs prints attr name and value - """ - nc_dataset.Conventions = 'CF-1.6' + """show_dataset_attrs prints attr name and value""" + nc_dataset.Conventions = "CF-1.6" nc_tools.show_dataset_attrs(nc_dataset) out, err = capsys.readouterr() - assert out.splitlines()[1] == 'Conventions: CF-1.6' + assert out.splitlines()[1] == "Conventions: CF-1.6" def test_show_dataset_attrs_order(capsys, nc_dataset): - """show_dataset_attrs prints attr names & values in order they were set - """ - nc_dataset.Conventions = 'CF-1.6' - nc_dataset.title = 'Test Dataset' + """show_dataset_attrs prints attr names & values in order they were set""" + nc_dataset.Conventions = "CF-1.6" + nc_dataset.title = "Test Dataset" nc_tools.show_dataset_attrs(nc_dataset) out, err = capsys.readouterr() - assert out.splitlines()[2] == 'title: Test Dataset' + assert out.splitlines()[2] == "title: Test Dataset" def test_show_dimensions(capsys, nc_dataset): - """show_dimensions prints dimension string representation - """ - nc_dataset.createDimension('foo', 42) + """show_dimensions prints dimension string representation""" + nc_dataset.createDimension("foo", 42) nc_tools.show_dimensions(nc_dataset) - expected = ['"": name = \'foo\', size = 42'] + expected = ["\"\": name = 'foo', size = 42"] out, err = capsys.readouterr() assert out.splitlines() == expected def test_show_dimensions_order(capsys, nc_dataset): - """show_dimensions prints dimension in order they were defined - """ - nc_dataset.createDimension('foo', 42) - nc_dataset.createDimension('bar', 24) + """show_dimensions prints dimension in order they were defined""" + nc_dataset.createDimension("foo", 42) + nc_dataset.createDimension("bar", 24) nc_tools.show_dimensions(nc_dataset) expected = [ - '"": name = \'foo\', size = 42', - '"": name = \'bar\', size = 24', + "\"\": name = 'foo', size = 42", + "\"\": name = 'bar', size = 24", ] out, err = capsys.readouterr() assert out.splitlines() == expected def test_show_variables(capsys, nc_dataset): - """show_variables prints list of variable names - """ - nc_dataset.createDimension('x', 42) - nc_dataset.createVariable('foo', float, ('x',)) + """show_variables prints list of variable names""" + nc_dataset.createDimension("x", 42) + nc_dataset.createVariable("foo", float, ("x",)) nc_tools.show_variables(nc_dataset) @@ -113,11 +109,10 @@ def test_show_variables(capsys, nc_dataset): def test_show_variables_order(capsys, nc_dataset): - """show_variables prints list of variable names in order they were defined - """ - nc_dataset.createDimension('x', 42) - nc_dataset.createVariable('foo', float, ('x',)) - nc_dataset.createVariable('bar', float, ('x',)) + """show_variables prints list of variable names in order they were defined""" + nc_dataset.createDimension("x", 42) + nc_dataset.createVariable("foo", float, ("x",)) + nc_dataset.createVariable("bar", float, ("x",)) nc_tools.show_variables(nc_dataset) @@ -126,11 +121,10 @@ def test_show_variables_order(capsys, nc_dataset): def test_show_variable_attrs(capsys, nc_dataset): - """show_variable_attrs prints variable string representation - """ - nc_dataset.createDimension('x', 42) - foo = nc_dataset.createVariable('foo', float, ('x',)) - foo.units = 'm' + """show_variable_attrs prints variable string representation""" + nc_dataset.createDimension("x", 42) + foo = nc_dataset.createVariable("foo", float, ("x",)) + foo.units = "m" nc_tools.show_variable_attrs(nc_dataset) @@ -147,11 +141,10 @@ def test_show_variable_attrs(capsys, nc_dataset): def test_show_variable_attrs_order(capsys, nc_dataset): - """show_variable_attrs prints variables in order they were defined - """ - nc_dataset.createDimension('x', 42) - nc_dataset.createVariable('foo', float, ('x',)) - nc_dataset.createVariable('bar', float, ('x',)) + """show_variable_attrs prints variables in order they were defined""" + nc_dataset.createDimension("x", 42) + nc_dataset.createVariable("foo", float, ("x",)) + nc_dataset.createVariable("bar", float, ("x",)) nc_tools.show_variable_attrs(nc_dataset) @@ -172,14 +165,13 @@ def test_show_variable_attrs_order(capsys, nc_dataset): def test_show_variable_attrs_specified_var(capsys, nc_dataset): - """show_variable_attrs prints string repr of specified variable - """ - nc_dataset.createDimension('x', 42) - foo = nc_dataset.createVariable('foo', float, ('x',)) - foo.units = 'm' - nc_dataset.createVariable('bar', float, ('x',)) + """show_variable_attrs prints string repr of specified variable""" + nc_dataset.createDimension("x", 42) + foo = nc_dataset.createVariable("foo", float, ("x",)) + foo.units = "m" + nc_dataset.createVariable("bar", float, ("x",)) - nc_tools.show_variable_attrs(nc_dataset, 'foo') + nc_tools.show_variable_attrs(nc_dataset, "foo") expected = [ "", @@ -194,13 +186,12 @@ def test_show_variable_attrs_specified_var(capsys, nc_dataset): def test_show_variable_attrs_specified_var_order(capsys, nc_dataset): - """show_variable_attrs prints specified vars in order they were defined - """ - nc_dataset.createDimension('x', 42) - nc_dataset.createVariable('foo', float, ('x',)) - nc_dataset.createVariable('bar', float, ('x',)) + """show_variable_attrs prints specified vars in order they were defined""" + nc_dataset.createDimension("x", 42) + nc_dataset.createVariable("foo", float, ("x",)) + nc_dataset.createVariable("bar", float, ("x",)) - nc_tools.show_variable_attrs(nc_dataset, 'foo', 'bar') + nc_tools.show_variable_attrs(nc_dataset, "foo", "bar") expected = [ "", @@ -219,12 +210,10 @@ def test_show_variable_attrs_specified_var_order(capsys, nc_dataset): def test_time_origin_value(nc_dataset): - """time_origin returns expected Arrow instance - """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' + """time_origin returns expected Arrow instance""" + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" time_origin = nc_tools.time_origin(nc_dataset) assert time_origin == arrow.get(2002, 10, 26, 0, 0, 0) @@ -233,62 +222,52 @@ def test_time_origin_value_format2(nc_dataset): """time_origin returns expected Arrow instance. time_origin format is 'YYYY-MM-DD HH:mm:ss' """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-10-26 00:00:00' + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-10-26 00:00:00" time_origin = nc_tools.time_origin(nc_dataset) assert time_origin == arrow.get(2002, 10, 26, 0, 0, 0) def test_time_origin_UTC_timezone(nc_dataset): - """time_origin return value has UTC timezone - """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' + """time_origin return value has UTC timezone""" + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" time_origin = nc_tools.time_origin(nc_dataset) assert time_origin.tzinfo == dateutil.tz.tzutc() def test_time_origin_missing(nc_dataset): - """time_origin raises AttributeError if dataset lacks time_origin attr - """ + """time_origin raises AttributeError if dataset lacks time_origin attr""" with pytest.raises(AttributeError): - nc_dataset.createDimension('time_counter') - nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) + nc_dataset.createDimension("time_counter") + nc_dataset.createVariable("time_counter", float, ("time_counter",)) nc_tools.time_origin(nc_dataset) def test_time_counter_missing(nc_dataset): - """time_origin raises KeyError if dataset lacks time_counter variable - """ + """time_origin raises KeyError if dataset lacks time_counter variable""" with pytest.raises(KeyError): nc_tools.time_origin(nc_dataset) def test_timestamp_value(nc_dataset): - """timestamp returns expected Arrow instance - """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' - time_counter[:] = np.array([8.5 * 60*60]) + """timestamp returns expected Arrow instance""" + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" + time_counter[:] = np.array([8.5 * 60 * 60]) timestamp = nc_tools.timestamp(nc_dataset, 0) assert timestamp == arrow.get(2002, 10, 26, 8, 30, 0) def test_timestamp_value_list(nc_dataset): - """timestamp returns expected list of Arrow instances - """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' - time_counter[:] = np.array([0.5, 1.5]) * 60*60 + """timestamp returns expected list of Arrow instances""" + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" + time_counter[:] = np.array([0.5, 1.5]) * 60 * 60 timestamp = nc_tools.timestamp(nc_dataset, (0, 1)) expected = [ arrow.get(2002, 10, 26, 0, 30, 0), @@ -298,344 +277,343 @@ def test_timestamp_value_list(nc_dataset): def test_timestamp_index_error(nc_dataset): - """timestamp returns expected Arrow instance - """ - nc_dataset.createDimension('time_counter') - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' - time_counter[:] = np.array([8.5 * 60*60]) + """timestamp returns expected Arrow instance""" + nc_dataset.createDimension("time_counter") + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" + time_counter[:] = np.array([8.5 * 60 * 60]) with pytest.raises(IndexError): nc_tools.timestamp(nc_dataset, 1) -@pytest.mark.parametrize('datetimes, expected', [ - (False, arrow.Arrow), - (True, datetime.datetime), -]) +@pytest.mark.parametrize( + "datetimes, expected", + [ + (False, arrow.Arrow), + (True, datetime.datetime), + ], +) def test_ssh_timeseries_at_point_time_counter_type( - datetimes, expected, nc_dataset, + datetimes, + expected, + nc_dataset, ): - """Sea surface height timeseries time counter values have expected type - """ - nc_dataset.createDimension('time_counter') - nc_dataset.createDimension('y', 1) - nc_dataset.createDimension('x', 1) - ssh = nc_dataset.createVariable( - 'sossheig', float, ('time_counter', 'y', 'x')) + """Sea surface height timeseries time counter values have expected type""" + nc_dataset.createDimension("time_counter") + nc_dataset.createDimension("y", 1) + nc_dataset.createDimension("x", 1) + ssh = nc_dataset.createVariable("sossheig", float, ("time_counter", "y", "x")) ssh[:] = np.array([5.0, 5.3]) - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' - time_counter[:] = np.array([0.5, 1.5]) * 60*60 + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" + time_counter[:] = np.array([0.5, 1.5]) * 60 * 60 ssh_ts = nc_tools.ssh_timeseries_at_point(nc_dataset, 0, 0, datetimes) np.testing.assert_array_equal(ssh_ts.ssh, np.array([5.0, 5.3])) assert isinstance(ssh_ts.time[0], expected) -@pytest.mark.parametrize('datetimes, expected', [ - (False, arrow.Arrow), - (True, datetime.datetime), -]) +@pytest.mark.parametrize( + "datetimes, expected", + [ + (False, arrow.Arrow), + (True, datetime.datetime), + ], +) def test_uv_wind_timeseries_at_point_time_counter_type( - datetimes, expected, nc_dataset, + datetimes, + expected, + nc_dataset, ): - """u and v wind components timeseries time counter values have expected type - """ - nc_dataset.createDimension('time_counter') - nc_dataset.createDimension('y', 1) - nc_dataset.createDimension('x', 1) - u_wind = nc_dataset.createVariable( - 'u_wind', float, ('time_counter', 'y', 'x')) + """u and v wind components timeseries time counter values have expected type""" + nc_dataset.createDimension("time_counter") + nc_dataset.createDimension("y", 1) + nc_dataset.createDimension("x", 1) + u_wind = nc_dataset.createVariable("u_wind", float, ("time_counter", "y", "x")) u_wind[:] = np.array([-8.75, -4.41]) - v_wind = nc_dataset.createVariable( - 'v_wind', float, ('time_counter', 'y', 'x')) + v_wind = nc_dataset.createVariable("v_wind", float, ("time_counter", "y", "x")) v_wind[:] = np.array([0.43, -0.37]) - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2002-OCT-26 00:00:00' - time_counter[:] = np.array([0.5, 1.5]) * 60*60 + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2002-OCT-26 00:00:00" + time_counter[:] = np.array([0.5, 1.5]) * 60 * 60 wind_ts = nc_tools.uv_wind_timeseries_at_point(nc_dataset, 0, 0, datetimes) np.testing.assert_array_equal(wind_ts.u, np.array([-8.75, -4.41])) np.testing.assert_array_equal(wind_ts.v, np.array([0.43, -0.37])) assert isinstance(wind_ts.time[0], expected) -@patch('salishsea_tools.nc_tools._notebook_hg_url') -@patch('salishsea_tools.nc_tools._nc_file_hg_url') +@patch("salishsea_tools.nc_tools._notebook_hg_url") +@patch("salishsea_tools.nc_tools._nc_file_hg_url") def test_init_dataset_attrs(mock_nhu, mock_nfhu, nc_dataset): - """init_dataset_attrs initializes dataset global attrs - """ + """init_dataset_attrs initializes dataset global attrs""" nc_tools.init_dataset_attrs( - nc_dataset, 'Test Dataset', 'TestDatasetNotebook', 'test_dataset.nc') - assert nc_dataset.Conventions == 'CF-1.6' + nc_dataset, "Test Dataset", "TestDatasetNotebook", "test_dataset.nc" + ) + assert nc_dataset.Conventions == "CF-1.6" -@patch('salishsea_tools.nc_tools._notebook_hg_url') -@patch('salishsea_tools.nc_tools._nc_file_hg_url') +@patch("salishsea_tools.nc_tools._notebook_hg_url") +@patch("salishsea_tools.nc_tools._nc_file_hg_url") def test_init_dataset_attrs_quiet(mock_nhu, mock_nfhu, capsys, nc_dataset): - """init_dataset_attrs prints no output when quiet=True - """ + """init_dataset_attrs prints no output when quiet=True""" nc_tools.init_dataset_attrs( - nc_dataset, 'Test Dataset', 'TestDatasetNotebook', 'test_dataset.nc', - quiet=True) + nc_dataset, "Test Dataset", "TestDatasetNotebook", "test_dataset.nc", quiet=True + ) out, err = capsys.readouterr() - assert out == '' + assert out == "" -@patch('salishsea_tools.nc_tools._notebook_hg_url') -@patch('salishsea_tools.nc_tools._nc_file_hg_url') +@patch("salishsea_tools.nc_tools._notebook_hg_url") +@patch("salishsea_tools.nc_tools._nc_file_hg_url") def test_init_dataset_attrs_no_oversrite( - mock_nhu, mock_nfhu, capsys, nc_dataset, + mock_nhu, + mock_nfhu, + capsys, + nc_dataset, ): - """init_dataset_attrs does not overwrite existing attrs - """ - nc_dataset.Conventions = 'CF-1.6' + """init_dataset_attrs does not overwrite existing attrs""" + nc_dataset.Conventions = "CF-1.6" nc_tools.init_dataset_attrs( - nc_dataset, 'Test Dataset', 'TestDatasetNotebook', 'test_dataset.nc') + nc_dataset, "Test Dataset", "TestDatasetNotebook", "test_dataset.nc" + ) out, err = capsys.readouterr() assert out.splitlines()[0] == ( - 'Existing attribute value found, not overwriting: Conventions') + "Existing attribute value found, not overwriting: Conventions" + ) -@patch('salishsea_tools.nc_tools._notebook_hg_url') -@patch('salishsea_tools.nc_tools._nc_file_hg_url') +@patch("salishsea_tools.nc_tools._notebook_hg_url") +@patch("salishsea_tools.nc_tools._nc_file_hg_url") def test_init_dataset_attrs_no_oversrite_quiet( - mock_nhu, mock_nfhu, capsys, nc_dataset, + mock_nhu, + mock_nfhu, + capsys, + nc_dataset, ): - """init_dataset_attrs suppresses no-overwrite notice when quiet=True - """ - nc_dataset.Conventions = 'CF-1.6' - nc_dataset.history = 'foo' + """init_dataset_attrs suppresses no-overwrite notice when quiet=True""" + nc_dataset.Conventions = "CF-1.6" + nc_dataset.history = "foo" nc_tools.init_dataset_attrs( - nc_dataset, 'Test Dataset', 'TestDatasetNotebook', 'test_dataset.nc', - quiet=True) + nc_dataset, "Test Dataset", "TestDatasetNotebook", "test_dataset.nc", quiet=True + ) out, err = capsys.readouterr() - assert out == '' - assert nc_dataset.history == 'foo' + assert out == "" + assert nc_dataset.history == "foo" @patch( - 'salishsea_tools.nc_tools.hg.default_url', - return_value='ssh://hg@bitbucket.org/SalishSeaCast/foo') + "salishsea_tools.nc_tools.hg.default_url", + return_value="ssh://hg@bitbucket.org/SalishSeaCast/foo", +) def test_notebook_hg_url(mock_dflt_url): - """_notebook_hg_url returns expected URL - """ - url = nc_tools._notebook_hg_url('bar.ipynb') - assert url == 'https://bitbucket.org/SalishSeaCast/foo/src/tip/bar.ipynb' + """_notebook_hg_url returns expected URL""" + url = nc_tools._notebook_hg_url("bar.ipynb") + assert url == "https://bitbucket.org/SalishSeaCast/foo/src/tip/bar.ipynb" def test_notebook_hg_url_no_notebook_name(): - """_notebook_hg_url returns REQUIRED if notebook name arg is empty - """ - url = nc_tools._notebook_hg_url('') - assert url == 'REQUIRED' + """_notebook_hg_url returns REQUIRED if notebook name arg is empty""" + url = nc_tools._notebook_hg_url("") + assert url == "REQUIRED" -@patch('salishsea_tools.nc_tools.hg.default_url', return_value=None) +@patch("salishsea_tools.nc_tools.hg.default_url", return_value=None) def test_notebook_hg_url_REQUIRED(mock_dflt_url): - """_notebook_hg_url returns REQUIRED if bitbucket not in repo URL - """ - url = nc_tools._notebook_hg_url('foo') - assert url == 'REQUIRED' + """_notebook_hg_url returns REQUIRED if bitbucket not in repo URL""" + url = nc_tools._notebook_hg_url("foo") + assert url == "REQUIRED" @patch( - 'salishsea_tools.nc_tools.hg.default_url', - return_value='ssh://hg@bitbucket.org/SalishSeaCast/foo') + "salishsea_tools.nc_tools.hg.default_url", + return_value="ssh://hg@bitbucket.org/SalishSeaCast/foo", +) def test_notebook_hg_url_adds_ipynb(mock_dflt_url): - """_notebook_hg_url adds .ipynb extension if notebook name lacks it - """ - url = nc_tools._notebook_hg_url('bar') - assert url == 'https://bitbucket.org/SalishSeaCast/foo/src/tip/bar.ipynb' + """_notebook_hg_url adds .ipynb extension if notebook name lacks it""" + url = nc_tools._notebook_hg_url("bar") + assert url == "https://bitbucket.org/SalishSeaCast/foo/src/tip/bar.ipynb" @patch( - 'salishsea_tools.nc_tools.hg.default_url', - return_value='ssh://hg@bitbucket.org/SalishSeaCast/foo') + "salishsea_tools.nc_tools.hg.default_url", + return_value="ssh://hg@bitbucket.org/SalishSeaCast/foo", +) def test_nc_file_hg_url(mock_dflt_url): - """_nc_file_hg_url returns expected URL - """ - url = nc_tools._nc_file_hg_url('../bar/baz.nc') - assert url == 'https://bitbucket.org/SalishSeaCast/foo/src/tip/baz.nc' + """_nc_file_hg_url returns expected URL""" + url = nc_tools._nc_file_hg_url("../bar/baz.nc") + assert url == "https://bitbucket.org/SalishSeaCast/foo/src/tip/baz.nc" def test_nc_file_hg_url_no_nc_filepath(): - """_nc_file_hg_url returns REQUIRED if nc_filepath arg is empty - """ - url = nc_tools._nc_file_hg_url('') - assert url == 'REQUIRED' + """_nc_file_hg_url returns REQUIRED if nc_filepath arg is empty""" + url = nc_tools._nc_file_hg_url("") + assert url == "REQUIRED" -@patch('salishsea_tools.nc_tools.hg.default_url', return_value=None) +@patch("salishsea_tools.nc_tools.hg.default_url", return_value=None) def test_nc_file_hg_url_REQUIRED(mock_dflt_url): - """_nc_file_hg_url returns REQUIRED if bitbucket not in repo URL - """ - url = nc_tools._nc_file_hg_url('../bar/baz.nc') - assert url == 'REQUIRED' + """_nc_file_hg_url returns REQUIRED if bitbucket not in repo URL""" + url = nc_tools._nc_file_hg_url("../bar/baz.nc") + assert url == "REQUIRED" def test_check_dataset_attrs_reqd_dataset_attrs(capsys, nc_dataset): - """check_dataset_attrs warns of missing required dataset attributes - """ + """check_dataset_attrs warns of missing required dataset attributes""" nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() reqd_attrs = ( - 'Conventions', - 'title', - 'institution', - 'source', - 'references', - 'history', - 'comment', + "Conventions", + "title", + "institution", + "source", + "references", + "history", + "comment", ) for line, expected in enumerate(reqd_attrs): assert out.splitlines()[line] == ( - 'Missing required dataset attribute: {}'.format(expected)) + "Missing required dataset attribute: {}".format(expected) + ) def test_check_dataset_attrs_reqd_dataset_attr_values(capsys, nc_dataset): - """check_dataset_attrs warns of missing reqd dataset attr values - """ + """check_dataset_attrs warns of missing reqd dataset attr values""" reqd_attrs = ( - 'Conventions', - 'title', - 'institution', - 'source', - 'references', - 'history', + "Conventions", + "title", + "institution", + "source", + "references", + "history", ) for attr in reqd_attrs: - nc_dataset.setncattr(attr, '') + nc_dataset.setncattr(attr, "") nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() for line, attr in enumerate(reqd_attrs): assert out.splitlines()[line] == ( - 'Missing value for dataset attribute: {}'.format(attr)) + "Missing value for dataset attribute: {}".format(attr) + ) def test_check_dataset_attrs_url_reqd(capsys, nc_dataset): - """check_dataset_attrs warns of source or references set to REQUIRED - """ + """check_dataset_attrs warns of source or references set to REQUIRED""" empty_reqd_attrs = ( - 'Conventions', - 'title', - 'institution', - 'references', + "Conventions", + "title", + "institution", + "references", ) for attr in empty_reqd_attrs: - nc_dataset.setncattr(attr, 'foo') + nc_dataset.setncattr(attr, "foo") REQUIRED_reqd_attrs = ( - 'source', - 'references', + "source", + "references", ) for attr in REQUIRED_reqd_attrs: - nc_dataset.setncattr(attr, 'REQUIRED') + nc_dataset.setncattr(attr, "REQUIRED") nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() for line, attr in enumerate(REQUIRED_reqd_attrs): assert out.splitlines()[line] == ( - 'Missing value for dataset attribute: {}'.format(attr)) + "Missing value for dataset attribute: {}".format(attr) + ) def test_check_dataset_attrs_good(capsys, nc_dataset): - """check_dataset_attrs prints nothing when all reqd attts present w/ value - """ + """check_dataset_attrs prints nothing when all reqd attts present w/ value""" dataset_attrs = ( - ('Conventions', 'CF-1.6'), - ('title', 'Test Dataset'), - ('institution', 'Unit Tests'), - ('source', 'foo'), - ('references', 'bar'), - ('history', 'was'), - ('comment', ''), + ("Conventions", "CF-1.6"), + ("title", "Test Dataset"), + ("institution", "Unit Tests"), + ("source", "foo"), + ("references", "bar"), + ("history", "was"), + ("comment", ""), ) for attr, value in dataset_attrs: nc_dataset.setncattr(attr, value) nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() - assert out == '' + assert out == "" def test_check_dataset_attrs_reqd_var_attrs(capsys, nc_dataset): - """check_dataset_attrs warns of missing required variable attributes - """ + """check_dataset_attrs warns of missing required variable attributes""" dataset_attrs = ( - ('Conventions', 'CF-1.6'), - ('title', 'Test Dataset'), - ('institution', 'Unit Tests'), - ('source', 'foo'), - ('references', 'bar'), - ('history', 'was'), - ('comment', ''), + ("Conventions", "CF-1.6"), + ("title", "Test Dataset"), + ("institution", "Unit Tests"), + ("source", "foo"), + ("references", "bar"), + ("history", "was"), + ("comment", ""), ) for attr, value in dataset_attrs: nc_dataset.setncattr(attr, value) - nc_dataset.createDimension('x', 42) - nc_dataset.createVariable('foo', float, ('x',)) + nc_dataset.createDimension("x", 42) + nc_dataset.createVariable("foo", float, ("x",)) nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() reqd_attrs = ( - 'units', - 'long_name', + "units", + "long_name", ) for line, expected in enumerate(reqd_attrs): assert out.splitlines()[line] == ( - 'Missing required variable attribute for foo: {}'.format(expected)) + "Missing required variable attribute for foo: {}".format(expected) + ) def test_check_dataset_attrs_reqd_var_attr_values(capsys, nc_dataset): - """check_dataset_attrs warns of missing reqd variable attr values - """ + """check_dataset_attrs warns of missing reqd variable attr values""" dataset_attrs = ( - ('Conventions', 'CF-1.6'), - ('title', 'Test Dataset'), - ('institution', 'Unit Tests'), - ('source', 'foo'), - ('references', 'bar'), - ('history', 'was'), - ('comment', ''), + ("Conventions", "CF-1.6"), + ("title", "Test Dataset"), + ("institution", "Unit Tests"), + ("source", "foo"), + ("references", "bar"), + ("history", "was"), + ("comment", ""), ) for attr, value in dataset_attrs: nc_dataset.setncattr(attr, value) - nc_dataset.createDimension('x', 42) - foo = nc_dataset.createVariable('foo', float, ('x',)) + nc_dataset.createDimension("x", 42) + foo = nc_dataset.createVariable("foo", float, ("x",)) reqd_attrs = ( - 'units', - 'long_name', + "units", + "long_name", ) for attr in reqd_attrs: - foo.setncattr(attr, '') + foo.setncattr(attr, "") nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() for line, expected in enumerate(reqd_attrs): assert out.splitlines()[line] == ( - 'Missing value for variable attribute for foo: {}' - .format(expected)) + "Missing value for variable attribute for foo: {}".format(expected) + ) def test_check_dataset_attrs_car_attrs_good(capsys, nc_dataset): - """check_dataset_attrs prints nothing when reqd var attrs present w/ values - """ + """check_dataset_attrs prints nothing when reqd var attrs present w/ values""" dataset_attrs = ( - ('Conventions', 'CF-1.6'), - ('title', 'Test Dataset'), - ('institution', 'Unit Tests'), - ('source', 'foo'), - ('references', 'bar'), - ('history', 'was'), - ('comment', ''), + ("Conventions", "CF-1.6"), + ("title", "Test Dataset"), + ("institution", "Unit Tests"), + ("source", "foo"), + ("references", "bar"), + ("history", "was"), + ("comment", ""), ) for attr, value in dataset_attrs: nc_dataset.setncattr(attr, value) - nc_dataset.createDimension('x', 42) - foo = nc_dataset.createVariable('foo', float, ('x',)) + nc_dataset.createDimension("x", 42) + foo = nc_dataset.createVariable("foo", float, ("x",)) reqd_attrs = ( - ('units', 'foo'), - ('long_name', 'bar'), + ("units", "foo"), + ("long_name", "bar"), ) for attr, value in reqd_attrs: foo.setncattr(attr, value) nc_tools.check_dataset_attrs(nc_dataset) out, err = capsys.readouterr() - assert out == '' + assert out == "" diff --git a/SalishSeaTools/tests/test_stormtools.py b/SalishSeaTools/tests/test_stormtools.py index f586ed34..e2571c8e 100644 --- a/SalishSeaTools/tests/test_stormtools.py +++ b/SalishSeaTools/tests/test_stormtools.py @@ -23,20 +23,24 @@ class TestStormSurgeRiskLevel(object): - """Unit tests for storm_surge_risk_level() function. - """ + """Unit tests for storm_surge_risk_level() function.""" + def test_places_key_error(self): - m_ttide = Mock(name='ttide', pred_all=[42]) + m_ttide = Mock(name="ttide", pred_all=[42]) with pytest.raises(KeyError): - stormtools.storm_surge_risk_level('foo', 42.24, m_ttide) - - @pytest.mark.parametrize('max_ssh, expected', [ - (4.9, None), - (5.1, 'moderate risk'), - (5.4, 'extreme risk'), - ]) + stormtools.storm_surge_risk_level("foo", 42.24, m_ttide) + + @pytest.mark.parametrize( + "max_ssh, expected", + [ + (4.9, None), + (5.1, "moderate risk"), + (5.4, "extreme risk"), + ], + ) def test_risk_level(self, max_ssh, expected): - m_ttide = Mock(name='ttide', pred_all=[2]) + m_ttide = Mock(name="ttide", pred_all=[2]) risk_level = stormtools.storm_surge_risk_level( - 'Point Atkinson', max_ssh, m_ttide) + "Point Atkinson", max_ssh, m_ttide + ) assert risk_level == expected diff --git a/SalishSeaTools/tests/test_teos_tools.py b/SalishSeaTools/tests/test_teos_tools.py index e3d37d29..bdf59615 100644 --- a/SalishSeaTools/tests/test_teos_tools.py +++ b/SalishSeaTools/tests/test_teos_tools.py @@ -33,40 +33,53 @@ def test_TEOS_PSU_constant_value(): np.testing.assert_allclose(teos_tools.TEOS_PSU, expected) -@pytest.mark.parametrize('psu, expected', [ - (0, 0), - (35, 35.16504), - (30, 30.14146), - (70, 70.33008), -]) +@pytest.mark.parametrize( + "psu, expected", + [ + (0, 0), + (35, 35.16504), + (30, 30.14146), + (70, 70.33008), + ], +) def test_psu_teos(psu, expected): np.testing.assert_allclose(teos_tools.psu_teos(psu), expected) -@pytest.mark.parametrize('psu, expected', [ - (np.array([0, 30, 35, 70]), np.array([0, 30.14146, 35.16504, 70.33008])), - ([0, 30, 35, 70], np.array([0, 30.14146, 35.16504, 70.33008])), - ((0, 30, 35, 70), np.array([0, 30.14146, 35.16504, 70.33008])), -]) +@pytest.mark.parametrize( + "psu, expected", + [ + (np.array([0, 30, 35, 70]), np.array([0, 30.14146, 35.16504, 70.33008])), + ([0, 30, 35, 70], np.array([0, 30.14146, 35.16504, 70.33008])), + ((0, 30, 35, 70), np.array([0, 30.14146, 35.16504, 70.33008])), + ], +) def test_psu_teos_polymorphic_sequence(psu, expected): teos = teos_tools.psu_teos(psu) np.testing.assert_allclose(teos, expected) -@pytest.mark.parametrize('teos, expected', [ - (0, 0), - (35.16504, 35), - (30.14146, 30), - (70.33008, 70), -]) +@pytest.mark.parametrize( + "teos, expected", + [ + (0, 0), + (35.16504, 35), + (30.14146, 30), + (70.33008, 70), + ], +) def test_teos_psu(teos, expected): np.testing.assert_allclose(teos_tools.teos_psu(teos), expected) -@pytest.mark.parametrize('teos, expected', [ - (np.array([0, 30.14146, 35.16504, 70.33008]), np.array([0, 30, 35, 70])), - ([0, 30.14146, 35.16504, 70.33008], np.array([0, 30, 35, 70])), - ((0, 30.14146, 35.16504, 70.33008), np.array([0, 30, 35, 70])), -]) + +@pytest.mark.parametrize( + "teos, expected", + [ + (np.array([0, 30.14146, 35.16504, 70.33008]), np.array([0, 30, 35, 70])), + ([0, 30.14146, 35.16504, 70.33008], np.array([0, 30, 35, 70])), + ((0, 30.14146, 35.16504, 70.33008), np.array([0, 30, 35, 70])), + ], +) def test_teos_psu_polymorphic_sequence(teos, expected): psu = teos_tools.teos_psu(teos) np.testing.assert_allclose(psu, expected) diff --git a/SalishSeaTools/tests/test_tidetools.py b/SalishSeaTools/tests/test_tidetools.py index 400f73ca..111175f2 100644 --- a/SalishSeaTools/tests/test_tidetools.py +++ b/SalishSeaTools/tests/test_tidetools.py @@ -27,9 +27,10 @@ def test_get_run_length(tmp_path): runname = "test_run" run_dir = tmp_path / runname run_dir.mkdir() - test_namelist = run_dir / 'namelist' - test_namelist.write_text(textwrap.dedent( - ''' + test_namelist = run_dir / "namelist" + test_namelist.write_text( + textwrap.dedent( + """ !! Run timing control !! !! *Note*: The time step is set in the &namdom namelist in the namelist.domain @@ -55,7 +56,7 @@ def test_get_run_length(tmp_path): nn_stock = 12096 ! frequency of creation of a restart file (modulo referenced to 1) ln_clobber = .true. ! clobber (overwrite) an existing file &end - + &nam_diaharm ! Harmonic analysis of tidal constituents ('key_diaharm') !----------------------------------------------------------------------- nit000_han = 8641 ! First time step used for harmonic analysis @@ -65,8 +66,8 @@ def test_get_run_length(tmp_path): tname(1) = 'K1' tname(2) = 'M2' &end - - + + !! Domain configuration !! &namzgr ! vertical coordinates @@ -74,7 +75,7 @@ def test_get_run_length(tmp_path): ln_zco = .false. ! z-coordinate - full steps (T/F) ("key_zco" may also be defined) ln_zps = .true. ! z-coordinate - partial steps (T/F) &end - + &namdom ! space and time domain (bathymetry, mesh, timestep) !----------------------------------------------------------------------- nn_bathy = 1 ! compute (=0) or read (=1) the bathymetry file @@ -92,7 +93,8 @@ def test_get_run_length(tmp_path): rn_rdtmax = 300. ! maximum time step on tracers (used if nn_acc=1) rn_rdth = 300. ! depth variation of tracer time step (used if nn_acc=1) &end - ''' - )) - run_length = tidetools.get_run_length('test_run', test_namelist.parent.parent) + """ + ) + ) + run_length = tidetools.get_run_length("test_run", test_namelist.parent.parent) np.testing.assert_almost_equal(run_length, 2) diff --git a/SalishSeaTools/tests/test_unit_conversions.py b/SalishSeaTools/tests/test_unit_conversions.py index fd0fcef3..36f4e7a4 100644 --- a/SalishSeaTools/tests/test_unit_conversions.py +++ b/SalishSeaTools/tests/test_unit_conversions.py @@ -39,13 +39,15 @@ def test_KNOTS__M_PER_S_constant_value(): np.testing.assert_allclose(unit_conversions.KNOTS__M_PER_S, expected) -@pytest.mark.parametrize('m_per_s, expected', [ - (0, 0), - (1, 3.6), -]) +@pytest.mark.parametrize( + "m_per_s, expected", + [ + (0, 0), + (1, 3.6), + ], +) def test_mps_kph(m_per_s, expected): - np.testing.assert_allclose( - unit_conversions.mps_kph(m_per_s), expected) + np.testing.assert_allclose(unit_conversions.mps_kph(m_per_s), expected) def test_mps_kph_ndarray(): @@ -53,13 +55,17 @@ def test_mps_kph_ndarray(): np.testing.assert_allclose(kph, np.array([0, 3.6])) -@pytest.mark.parametrize('m_per_s, expected', [ - (0, 0), - (1, 1.94384), -]) +@pytest.mark.parametrize( + "m_per_s, expected", + [ + (0, 0), + (1, 1.94384), + ], +) def test_mps_knots(m_per_s, expected): np.testing.assert_allclose( - unit_conversions.mps_knots(m_per_s), expected, rtol=1e-05) + unit_conversions.mps_knots(m_per_s), expected, rtol=1e-05 + ) def test_mps_knots_ndarray(): @@ -67,13 +73,15 @@ def test_mps_knots_ndarray(): np.testing.assert_allclose(knots, np.array([0, 1.94384]), rtol=1e-05) -@pytest.mark.parametrize('knots, expected', [ - (0, 0), - (1, 0.514444), -]) +@pytest.mark.parametrize( + "knots, expected", + [ + (0, 0), + (1, 0.514444), + ], +) def test_knots_mps(knots, expected): - np.testing.assert_allclose( - unit_conversions.knots_mps(knots), expected, rtol=1e-05) + np.testing.assert_allclose(unit_conversions.knots_mps(knots), expected, rtol=1e-05) def test_knots_mps_ndarray(): @@ -81,74 +89,84 @@ def test_knots_mps_ndarray(): np.testing.assert_allclose(knots, np.array([0, 0.514444]), rtol=1e-05) -@pytest.mark.parametrize('wind_to, expected', [ - (0, 270), - (90, 180), - (180, 90), - (270, 0), - (359, 271), -]) +@pytest.mark.parametrize( + "wind_to, expected", + [ + (0, 270), + (90, 180), + (180, 90), + (270, 0), + (359, 271), + ], +) def test_wind_to_from(wind_to, expected): - np.testing.assert_allclose( - unit_conversions.wind_to_from(wind_to), expected) + np.testing.assert_allclose(unit_conversions.wind_to_from(wind_to), expected) def test_wind_to_from_ndarray(): - wind_from = unit_conversions.wind_to_from( - np.array([0, 90, 180, 270, 359])) + wind_from = unit_conversions.wind_to_from(np.array([0, 90, 180, 270, 359])) np.testing.assert_allclose(wind_from, np.array([270, 180, 90, 0, 271])) class TestBearingHeading(object): - """Unit tests for bearing_heading() function. - """ - @pytest.mark.parametrize('bearing, expected', [ - (0, 'N'), - (27, 'NNE'), - (359, 'N'), - ]) + """Unit tests for bearing_heading() function.""" + + @pytest.mark.parametrize( + "bearing, expected", + [ + (0, "N"), + (27, "NNE"), + (359, "N"), + ], + ) def test_default_16_points(self, bearing, expected): heading = unit_conversions.bearing_heading(bearing) assert heading == expected - @pytest.mark.parametrize('bearing, expected', [ - (0, 'N'), - (27, 'NE'), - (359, 'N'), - ]) + @pytest.mark.parametrize( + "bearing, expected", + [ + (0, "N"), + (27, "NE"), + (359, "N"), + ], + ) def test_8_points(self, bearing, expected): heading = unit_conversions.bearing_heading( - bearing, - headings=['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N']) + bearing, headings=["N", "NE", "E", "SE", "S", "SW", "W", "NW", "N"] + ) assert heading == expected class TestHumanizeTimeOfDay(object): - """Unit tests for humanize_time_of_day() function. - """ - @pytest.mark.parametrize('date_time, expected', [ - (arrow.get('2015-12-26 00:00:00'), 'overnight Saturday'), - (arrow.get('2015-12-26 02:15:42'), 'overnight Saturday'), - (arrow.get('2015-12-26 05:59:59'), 'overnight Saturday'), - (arrow.get('2015-12-26 06:00:00'), 'early Saturday morning'), - (arrow.get('2015-12-26 07:22:51'), 'early Saturday morning'), - (arrow.get('2015-12-26 08:59:59'), 'early Saturday morning'), - (arrow.get('2015-12-26 09:00:00'), 'late Saturday morning'), - (arrow.get('2015-12-26 09:52:43'), 'late Saturday morning'), - (arrow.get('2015-12-26 11:59:59'), 'late Saturday morning'), - (arrow.get('2015-12-25 12:00:00'), 'early Friday afternoon'), - (arrow.get('2015-12-25 13:36:11'), 'early Friday afternoon'), - (arrow.get('2015-12-25 14:59:59'), 'early Friday afternoon'), - (arrow.get('2015-12-25 15:00:00'), 'late Friday afternoon'), - (arrow.get('2015-12-25 16:09:21'), 'late Friday afternoon'), - (arrow.get('2015-12-25 17:59:59'), 'late Friday afternoon'), - (arrow.get('2015-12-27 18:00:00'), 'early Sunday evening'), - (arrow.get('2015-12-27 18:01:56'), 'early Sunday evening'), - (arrow.get('2015-12-27 20:59:59'), 'early Sunday evening'), - (arrow.get('2015-12-27 21:00:00'), 'late Sunday evening'), - (arrow.get('2015-12-27 23:43:43'), 'late Sunday evening'), - (arrow.get('2015-12-27 23:59:59'), 'late Sunday evening'), - ]) + """Unit tests for humanize_time_of_day() function.""" + + @pytest.mark.parametrize( + "date_time, expected", + [ + (arrow.get("2015-12-26 00:00:00"), "overnight Saturday"), + (arrow.get("2015-12-26 02:15:42"), "overnight Saturday"), + (arrow.get("2015-12-26 05:59:59"), "overnight Saturday"), + (arrow.get("2015-12-26 06:00:00"), "early Saturday morning"), + (arrow.get("2015-12-26 07:22:51"), "early Saturday morning"), + (arrow.get("2015-12-26 08:59:59"), "early Saturday morning"), + (arrow.get("2015-12-26 09:00:00"), "late Saturday morning"), + (arrow.get("2015-12-26 09:52:43"), "late Saturday morning"), + (arrow.get("2015-12-26 11:59:59"), "late Saturday morning"), + (arrow.get("2015-12-25 12:00:00"), "early Friday afternoon"), + (arrow.get("2015-12-25 13:36:11"), "early Friday afternoon"), + (arrow.get("2015-12-25 14:59:59"), "early Friday afternoon"), + (arrow.get("2015-12-25 15:00:00"), "late Friday afternoon"), + (arrow.get("2015-12-25 16:09:21"), "late Friday afternoon"), + (arrow.get("2015-12-25 17:59:59"), "late Friday afternoon"), + (arrow.get("2015-12-27 18:00:00"), "early Sunday evening"), + (arrow.get("2015-12-27 18:01:56"), "early Sunday evening"), + (arrow.get("2015-12-27 20:59:59"), "early Sunday evening"), + (arrow.get("2015-12-27 21:00:00"), "late Sunday evening"), + (arrow.get("2015-12-27 23:43:43"), "late Sunday evening"), + (arrow.get("2015-12-27 23:59:59"), "late Sunday evening"), + ], + ) def test_humanize_time_of_day(self, date_time, expected): result = unit_conversions.humanize_time_of_day(date_time) assert result == expected diff --git a/SalishSeaTools/tests/test_viz_tools.py b/SalishSeaTools/tests/test_viz_tools.py index 3f654c82..74d77d18 100644 --- a/SalishSeaTools/tests/test_viz_tools.py +++ b/SalishSeaTools/tests/test_viz_tools.py @@ -30,44 +30,50 @@ from salishsea_tools import viz_tools -@pytest.mark.usefixtures('nc_dataset') +@pytest.mark.usefixtures("nc_dataset") class TestCalcAbsMax(object): - @pytest.mark.parametrize('array, expected', [ - (np.arange(-5, 10, 0.1), 9.9), - (np.arange(-10, 5, 0.5), 10), - (np.array([42]), 42), - ]) + @pytest.mark.parametrize( + "array, expected", + [ + (np.arange(-5, 10, 0.1), 9.9), + (np.arange(-10, 5, 0.5), 10), + (np.array([42]), 42), + ], + ) def test_calc_abs_max_array(self, array, expected): abs_max = viz_tools.calc_abs_max(array) np.testing.assert_almost_equal(abs_max, expected) - @pytest.mark.parametrize('array, expected', [ - (np.arange(-5, 10, 0.1), 9.9), - (np.arange(-10, 5, 0.5), 10), - (np.array([42]), 42), - ]) + @pytest.mark.parametrize( + "array, expected", + [ + (np.arange(-5, 10, 0.1), 9.9), + (np.arange(-10, 5, 0.5), 10), + (np.array([42]), 42), + ], + ) def test_calc_abs_max_dataset(self, array, expected, nc_dataset): - nc_dataset.createDimension('x', len(array)) - foo = nc_dataset.createVariable('foo', float, ('x',)) + nc_dataset.createDimension("x", len(array)) + foo = nc_dataset.createVariable("foo", float, ("x",)) foo[:] = array abs_max = viz_tools.calc_abs_max(array) np.testing.assert_almost_equal(abs_max, expected) class TestPlotCoastline(object): - @patch('salishsea_tools.viz_tools.nc.Dataset') + @patch("salishsea_tools.viz_tools.nc.Dataset") def test_plot_coastline_defaults_bathy_file(self, m_dataset): axes = Mock() - viz_tools.plot_coastline(axes, 'bathyfile') + viz_tools.plot_coastline(axes, "bathyfile") - m_dataset.assert_called_once_with('bathyfile') + m_dataset.assert_called_once_with("bathyfile") m_dataset().close.assert_called_once_with() - @patch('salishsea_tools.viz_tools.nc.Dataset') + @patch("salishsea_tools.viz_tools.nc.Dataset") def test_plot_coastline_defaults_bathy_netCDF_obj(self, m_dataset): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} viz_tools.plot_coastline(axes, bathy) @@ -76,14 +82,14 @@ def test_plot_coastline_defaults_bathy_netCDF_obj(self, m_dataset): def test_plot_coastline_defaults(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} contour_lines = viz_tools.plot_coastline(axes, bathy) axes.contour.assert_called_once_with( - bathy.variables['Bathymetry'], + bathy.variables["Bathymetry"], [0], - colors='black', + colors="black", zorder=2, ) assert contour_lines == axes.contour() @@ -92,69 +98,67 @@ def test_plot_coastline_defaults(self): def test_plot_coastline_map_coords(self): axes, bathy = Mock(), Mock() bathy.variables = { - 'Bathymetry': Mock(), - 'nav_lat': Mock(), - 'nav_lon': Mock(), + "Bathymetry": Mock(), + "nav_lat": Mock(), + "nav_lon": Mock(), } - contour_lines = viz_tools.plot_coastline(axes, bathy, coords='map') + contour_lines = viz_tools.plot_coastline(axes, bathy, coords="map") axes.contour.assert_called_once_with( - bathy.variables['nav_lon'], - bathy.variables['nav_lat'], - bathy.variables['Bathymetry'], + bathy.variables["nav_lon"], + bathy.variables["nav_lat"], + bathy.variables["Bathymetry"], [0], - colors='black', + colors="black", zorder=2, ) assert contour_lines == axes.contour() def test_plot_coastline_isobath(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} - contour_lines = viz_tools.plot_coastline( - axes, bathy, isobath=42.42) + contour_lines = viz_tools.plot_coastline(axes, bathy, isobath=42.42) axes.contour.assert_called_once_with( - bathy.variables['Bathymetry'], + bathy.variables["Bathymetry"], [42.42], - colors='black', + colors="black", zorder=2, ) assert contour_lines == axes.contour() def test_plot_coastline_no_xslice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} with pytest.raises(ValueError): - viz_tools.plot_coastline( - axes, bathy, yslice=np.arange(200, 320)) + viz_tools.plot_coastline(axes, bathy, yslice=np.arange(200, 320)) def test_plot_coastline_no_yslice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} with pytest.raises(ValueError): - viz_tools.plot_coastline( - axes, bathy, xslice=np.arange(250, 370)) + viz_tools.plot_coastline(axes, bathy, xslice=np.arange(250, 370)) def test_plot_coastline_grid_coords_slice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': MagicMock(spec=nc.Variable)} + bathy.variables = {"Bathymetry": MagicMock(spec=nc.Variable)} xslice = np.arange(250, 370) yslice = np.arange(200, 320) contour_lines = viz_tools.plot_coastline( - axes, bathy, xslice=xslice, yslice=yslice) + axes, bathy, xslice=xslice, yslice=yslice + ) axes.contour.assert_called_once_with( xslice, yslice, - bathy.variables['Bathymetry'][yslice, xslice].data, + bathy.variables["Bathymetry"][yslice, xslice].data, [0], - colors='black', + colors="black", zorder=2, ) assert contour_lines == axes.contour() @@ -162,56 +166,56 @@ def test_plot_coastline_grid_coords_slice(self): def test_plot_coastline_map_coords_slice(self): axes, bathy = Mock(), Mock() bathy.variables = { - 'Bathymetry': MagicMock(spec=nc.Variable), - 'nav_lon': MagicMock(spec=nc.Variable), - 'nav_lat': MagicMock(spec=nc.Variable), + "Bathymetry": MagicMock(spec=nc.Variable), + "nav_lon": MagicMock(spec=nc.Variable), + "nav_lat": MagicMock(spec=nc.Variable), } xslice = np.arange(250, 370) yslice = np.arange(200, 320) contour_lines = viz_tools.plot_coastline( - axes, bathy, coords='map', xslice=xslice, yslice=yslice) + axes, bathy, coords="map", xslice=xslice, yslice=yslice + ) axes.contour.assert_called_once_with( - bathy.variables['nav_lon'][yslice, xslice], - bathy.variables['nav_lat'][yslice, xslice], - bathy.variables['Bathymetry'][yslice, xslice].data, + bathy.variables["nav_lon"][yslice, xslice], + bathy.variables["nav_lat"][yslice, xslice], + bathy.variables["Bathymetry"][yslice, xslice].data, [0], - colors='black', + colors="black", zorder=2, ) assert contour_lines == axes.contour() def test_plot_coastline_color_arg(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} - contour_lines = viz_tools.plot_coastline( - axes, bathy, color='red') + contour_lines = viz_tools.plot_coastline(axes, bathy, color="red") axes.contour.assert_called_once_with( - bathy.variables['Bathymetry'], + bathy.variables["Bathymetry"], [0], - colors='red', + colors="red", zorder=2, ) assert contour_lines == axes.contour() class TestPlotLandMask(object): - @patch('salishsea_tools.viz_tools.nc.Dataset') + @patch("salishsea_tools.viz_tools.nc.Dataset") def test_plot_land_mask_defaults_bathy_file(self, m_dataset): axes = Mock() - viz_tools.plot_land_mask(axes, 'bathyfile') + viz_tools.plot_land_mask(axes, "bathyfile") - m_dataset.assert_called_once_with('bathyfile') + m_dataset.assert_called_once_with("bathyfile") m_dataset().close.assert_called_once_with() - @patch('salishsea_tools.viz_tools.nc.Dataset') + @patch("salishsea_tools.viz_tools.nc.Dataset") def test_plot_land_mask_defaults_bathy_netCDF_obj(self, m_dataset): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} viz_tools.plot_land_mask(axes, bathy) @@ -220,14 +224,14 @@ def test_plot_land_mask_defaults_bathy_netCDF_obj(self, m_dataset): def test_plot_land_mask_defaults(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} contour_fills = viz_tools.plot_land_mask(axes, bathy) axes.contourf.assert_called_once_with( - numpy.array(bathy.variables['Bathymetry'], dtype=object), + numpy.array(bathy.variables["Bathymetry"], dtype=object), [-0.01, 0.01], - colors='black', + colors="black", zorder=1, ) assert contour_fills == axes.contourf() @@ -235,66 +239,66 @@ def test_plot_land_mask_defaults(self): def test_plot_land_mask_map_coords(self): axes, bathy = Mock(), Mock() bathy.variables = { - 'Bathymetry': Mock(), - 'nav_lat': Mock(), - 'nav_lon': Mock(), + "Bathymetry": Mock(), + "nav_lat": Mock(), + "nav_lon": Mock(), } - contour_fills = viz_tools.plot_land_mask(axes, bathy, coords='map') + contour_fills = viz_tools.plot_land_mask(axes, bathy, coords="map") axes.contourf.assert_called_once_with( - numpy.array(bathy.variables['nav_lon'], dtype=object), - numpy.array(bathy.variables['nav_lat'], dtype=object), - numpy.array(bathy.variables['Bathymetry'], dtype=object), + numpy.array(bathy.variables["nav_lon"], dtype=object), + numpy.array(bathy.variables["nav_lat"], dtype=object), + numpy.array(bathy.variables["Bathymetry"], dtype=object), [-0.01, 0.01], - colors='black', + colors="black", zorder=1, ) assert contour_fills == axes.contourf() def test_plot_land_mask_isobath(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} - contour_fills = viz_tools.plot_land_mask( - axes, bathy, isobath=42.42) + contour_fills = viz_tools.plot_land_mask(axes, bathy, isobath=42.42) args, kwargs = axes.contourf.call_args - assert args[0] == bathy.variables['Bathymetry'] + assert args[0] == bathy.variables["Bathymetry"] np.testing.assert_almost_equal(args[1], [-0.01, 42.43]) - assert kwargs == {'colors': 'black', 'zorder': 1} + assert kwargs == {"colors": "black", "zorder": 1} assert contour_fills == axes.contourf() def test_plot_land_mask_no_xslice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} with pytest.raises(ValueError): viz_tools.plot_land_mask(axes, bathy, yslice=np.arange(200, 320)) def test_plot_land_mask_no_yslice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} with pytest.raises(ValueError): viz_tools.plot_land_mask(axes, bathy, xslice=np.arange(250, 370)) def test_plot_land_mask_grid_coords_slice(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': MagicMock(spec=nc.Variable)} + bathy.variables = {"Bathymetry": MagicMock(spec=nc.Variable)} xslice = np.arange(250, 370) yslice = np.arange(200, 320) contour_fills = viz_tools.plot_land_mask( - axes, bathy, xslice=xslice, yslice=yslice) + axes, bathy, xslice=xslice, yslice=yslice + ) axes.contourf.assert_called_once_with( xslice, yslice, - bathy.variables['Bathymetry'][yslice, xslice].data, + bathy.variables["Bathymetry"][yslice, xslice].data, [-0.01, 0.01], - colors='black', + colors="black", zorder=1, ) assert contour_fills == axes.contourf() @@ -302,37 +306,37 @@ def test_plot_land_mask_grid_coords_slice(self): def test_plot_land_mask_map_coords_slice(self): axes, bathy = Mock(), Mock() bathy.variables = { - 'Bathymetry': MagicMock(spec=nc.Variable), - 'nav_lon': MagicMock(spec=nc.Variable), - 'nav_lat': MagicMock(spec=nc.Variable), + "Bathymetry": MagicMock(spec=nc.Variable), + "nav_lon": MagicMock(spec=nc.Variable), + "nav_lat": MagicMock(spec=nc.Variable), } xslice = np.arange(250, 370) yslice = np.arange(200, 320) contour_fills = viz_tools.plot_land_mask( - axes, bathy, coords='map', xslice=xslice, yslice=yslice) + axes, bathy, coords="map", xslice=xslice, yslice=yslice + ) axes.contourf.assert_called_once_with( - bathy.variables['nav_lon'][yslice, xslice], - bathy.variables['nav_lat'][yslice, xslice], - bathy.variables['Bathymetry'][yslice, xslice].data, + bathy.variables["nav_lon"][yslice, xslice], + bathy.variables["nav_lat"][yslice, xslice], + bathy.variables["Bathymetry"][yslice, xslice].data, [-0.01, 0.01], - colors='black', + colors="black", zorder=1, ) assert contour_fills == axes.contourf() def test_plot_land_mask_color_arg(self): axes, bathy = Mock(), Mock() - bathy.variables = {'Bathymetry': Mock()} + bathy.variables = {"Bathymetry": Mock()} - contour_fills = viz_tools.plot_land_mask( - axes, bathy, color='red') + contour_fills = viz_tools.plot_land_mask(axes, bathy, color="red") axes.contourf.assert_called_once_with( - bathy.variables['Bathymetry'], + bathy.variables["Bathymetry"], [-0.01, 0.01], - colors='red', + colors="red", zorder=1, ) assert contour_fills == axes.contourf() @@ -342,29 +346,28 @@ class TestSetAspect(object): def test_set_aspect_defaults(self): axes = Mock() aspect = viz_tools.set_aspect(axes) - axes.set_aspect.assert_called_once_with(5/4.4, adjustable='box') - assert aspect == 5/4.4 + axes.set_aspect.assert_called_once_with(5 / 4.4, adjustable="box") + assert aspect == 5 / 4.4 def test_set_aspect_args(self): axes = Mock() - aspect = viz_tools.set_aspect(axes, 3/2, adjustable='foo') - axes.set_aspect.assert_called_once_with(3/2, adjustable='foo') - assert aspect == 3/2 + aspect = viz_tools.set_aspect(axes, 3 / 2, adjustable="foo") + axes.set_aspect.assert_called_once_with(3 / 2, adjustable="foo") + assert aspect == 3 / 2 def test_set_aspect_map_lats(self): axes = Mock() lats = np.array([42.0]) lats_aspect = 1 / np.cos(42 * np.pi / 180) - aspect = viz_tools.set_aspect(axes, coords='map', lats=lats) - axes.set_aspect.assert_called_once_with( - lats_aspect, adjustable='box') + aspect = viz_tools.set_aspect(axes, coords="map", lats=lats) + axes.set_aspect.assert_called_once_with(lats_aspect, adjustable="box") assert aspect == lats_aspect def test_set_aspect_map_explicit(self): axes = Mock() - aspect = viz_tools.set_aspect(axes, 2/3, coords='map') - axes.set_aspect.assert_called_once_with(2/3, adjustable='box') - assert aspect == 2/3 + aspect = viz_tools.set_aspect(axes, 2 / 3, coords="map") + axes.set_aspect.assert_called_once_with(2 / 3, adjustable="box") + assert aspect == 2 / 3 def test_unstagger(): diff --git a/SalishSeaTools/tests/test_wind_tools.py b/SalishSeaTools/tests/test_wind_tools.py index 9f67cb98..4ed41a9b 100644 --- a/SalishSeaTools/tests/test_wind_tools.py +++ b/SalishSeaTools/tests/test_wind_tools.py @@ -30,40 +30,44 @@ @pytest.fixture def wind_dataset(nc_dataset): - nc_dataset.createDimension('time_counter') - nc_dataset.createDimension('y', 1) - nc_dataset.createDimension('x', 1) - u_wind = nc_dataset.createVariable( - 'u_wind', float, ('time_counter', 'y', 'x')) + nc_dataset.createDimension("time_counter") + nc_dataset.createDimension("y", 1) + nc_dataset.createDimension("x", 1) + u_wind = nc_dataset.createVariable("u_wind", float, ("time_counter", "y", "x")) u_wind[:] = np.arange(5) - v_wind = nc_dataset.createVariable( - 'v_wind', float, ('time_counter', 'y', 'x')) + v_wind = nc_dataset.createVariable("v_wind", float, ("time_counter", "y", "x")) v_wind[:] = np.arange(0, -5, -1) - time_counter = nc_dataset.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2016-FEB-02 00:00:00' - time_counter[:] = np.arange(5) * 60*60 + time_counter = nc_dataset.createVariable("time_counter", float, ("time_counter",)) + time_counter.time_origin = "2016-FEB-02 00:00:00" + time_counter[:] = np.arange(5) * 60 * 60 return nc_dataset class TestWindSpeedDir(object): - """Unit tests for the wind_speed_dir() function. - """ - @pytest.mark.parametrize('u_wind, v_wind, exp_speed, exp_dir', [ - (0, 0, 0, 0), - (1, 0, 1, 0), - (1, 1, 1.414214, 45), - (3, 4, 5, 53.130102), - (0, 1, 1, 90), - (-1, 1, 1.414214, 135), - (-1, 0, 1, 180), - (-1, -1, 1.414214, 225), - (0, -1, 1, 270), - (1, -1, 1.414214, 315), - (1, -0.001, 1, 359.942704), - ]) + """Unit tests for the wind_speed_dir() function.""" + + @pytest.mark.parametrize( + "u_wind, v_wind, exp_speed, exp_dir", + [ + (0, 0, 0, 0), + (1, 0, 1, 0), + (1, 1, 1.414214, 45), + (3, 4, 5, 53.130102), + (0, 1, 1, 90), + (-1, 1, 1.414214, 135), + (-1, 0, 1, 180), + (-1, -1, 1.414214, 225), + (0, -1, 1, 270), + (1, -1, 1.414214, 315), + (1, -0.001, 1, 359.942704), + ], + ) def test_scalar_uv_values( - self, u_wind, v_wind, exp_speed, exp_dir, + self, + u_wind, + v_wind, + exp_speed, + exp_dir, ): wind = wind_tools.wind_speed_dir(u_wind, v_wind) np.testing.assert_allclose(wind.speed, exp_speed, rtol=1e-05) @@ -73,68 +77,79 @@ def test_ndarray_uv_values(self): u_wind = np.array([0, 1, 1, 3, 0, -1, -1, -1, 0, 1, 1]) v_wind = np.array([0, 0, 1, 4, 1, 1, 0, -1, -1, -1, -0.001]) wind = wind_tools.wind_speed_dir(u_wind, v_wind) - exp_speed = np.array([ - 0, 1, 1.414214, 5, 1, 1.414214, 1, 1.414214, 1, 1.414214, 1]) + exp_speed = np.array( + [0, 1, 1.414214, 5, 1, 1.414214, 1, 1.414214, 1, 1.414214, 1] + ) np.testing.assert_allclose(wind.speed, exp_speed, rtol=1e-05) - exp_dir = np.array([ - 0, 0, 45, 53.130102, 90, 135, 180, 225, 270, 315, 359.942704]) + exp_dir = np.array( + [0, 0, 45, 53.130102, 90, 135, 180, 225, 270, 315, 359.942704] + ) np.testing.assert_allclose(wind.dir, exp_dir) class TestCalcWindAvgAtPoint(object): - """Unit tests for calc_wind_avg_at_point() function. - """ - @patch.object(wind_tools.nc_tools, 'dataset_from_path') + """Unit tests for calc_wind_avg_at_point() function.""" + + @patch.object(wind_tools.nc_tools, "dataset_from_path") def test_ops_wind(self, m_dfp, wind_dataset, tmpdir): - tmp_weather_path = tmpdir.ensure_dir('operational') + tmp_weather_path = tmpdir.ensure_dir("operational") m_dfp.side_effect = (wind_dataset,) wind_avg = wind_tools.calc_wind_avg_at_point( - arrow.get('2016-02-02 04:25'), str(tmp_weather_path), (0, 0)) + arrow.get("2016-02-02 04:25"), str(tmp_weather_path), (0, 0) + ) np.testing.assert_allclose(wind_avg.u, 2.5) np.testing.assert_allclose(wind_avg.v, -2.5) - @patch.object(wind_tools.nc_tools, 'dataset_from_path') + @patch.object(wind_tools.nc_tools, "dataset_from_path") def test_2h_avg(self, m_dfp, wind_dataset, tmpdir): - tmp_weather_path = tmpdir.ensure_dir('operational') + tmp_weather_path = tmpdir.ensure_dir("operational") m_dfp.side_effect = (wind_dataset,) wind_avg = wind_tools.calc_wind_avg_at_point( - arrow.get('2016-02-02 04:25'), str(tmp_weather_path), (0, 0), - avg_hrs=-2) + arrow.get("2016-02-02 04:25"), str(tmp_weather_path), (0, 0), avg_hrs=-2 + ) np.testing.assert_allclose(wind_avg.u, 3.5) np.testing.assert_allclose(wind_avg.v, -3.5) - @patch.object(wind_tools.nc_tools, 'dataset_from_path') + @patch.object(wind_tools.nc_tools, "dataset_from_path") def test_fcst_wind(self, m_dfp, wind_dataset, tmpdir): - tmp_weather_path = tmpdir.ensure_dir('operational') + tmp_weather_path = tmpdir.ensure_dir("operational") m_dfp.side_effect = (IOError, wind_dataset) wind_avg = wind_tools.calc_wind_avg_at_point( - arrow.get('2016-02-02 04:25'), str(tmp_weather_path), (0, 0)) + arrow.get("2016-02-02 04:25"), str(tmp_weather_path), (0, 0) + ) np.testing.assert_allclose(wind_avg.u, 2.5) np.testing.assert_allclose(wind_avg.v, -2.5) - @patch.object(wind_tools.nc_tools, 'dataset_from_path') + @patch.object(wind_tools.nc_tools, "dataset_from_path") def test_prepend_previous_day( - self, m_dfp, wind_dataset, tmpdir, + self, + m_dfp, + wind_dataset, + tmpdir, ): - tmp_weather_path = tmpdir.ensure_dir('operational') - wind_prev_day = nc.Dataset('wind_prev_day', 'w') - wind_prev_day.createDimension('time_counter') - wind_prev_day.createDimension('y', 1) - wind_prev_day.createDimension('x', 1) + tmp_weather_path = tmpdir.ensure_dir("operational") + wind_prev_day = nc.Dataset("wind_prev_day", "w") + wind_prev_day.createDimension("time_counter") + wind_prev_day.createDimension("y", 1) + wind_prev_day.createDimension("x", 1) u_wind = wind_prev_day.createVariable( - 'u_wind', float, ('time_counter', 'y', 'x')) + "u_wind", float, ("time_counter", "y", "x") + ) u_wind[:] = np.arange(5) v_wind = wind_prev_day.createVariable( - 'v_wind', float, ('time_counter', 'y', 'x')) + "v_wind", float, ("time_counter", "y", "x") + ) v_wind[:] = np.arange(0, -5, -1) time_counter = wind_prev_day.createVariable( - 'time_counter', float, ('time_counter',)) - time_counter.time_origin = '2016-FEB-01 00:00:00' - time_counter[:] = np.arange(19, 24) * 60*60 + "time_counter", float, ("time_counter",) + ) + time_counter.time_origin = "2016-FEB-01 00:00:00" + time_counter[:] = np.arange(19, 24) * 60 * 60 m_dfp.side_effect = (wind_dataset, wind_prev_day) wind_avg = wind_tools.calc_wind_avg_at_point( - arrow.get('2016-02-02 01:25'), str(tmp_weather_path), (0, 0)) + arrow.get("2016-02-02 01:25"), str(tmp_weather_path), (0, 0) + ) wind_prev_day.close() - os.remove('wind_prev_day') + os.remove("wind_prev_day") np.testing.assert_allclose(wind_avg.u, 2) np.testing.assert_allclose(wind_avg.v, -2) diff --git a/analysis_tools/make_readme.py b/analysis_tools/make_readme.py index 2f7c3eb3..b254b03c 100644 --- a/analysis_tools/make_readme.py +++ b/analysis_tools/make_readme.py @@ -16,6 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import glob import json @@ -23,10 +24,10 @@ import re -NBVIEWER = 'https://nbviewer.org/urls' -REPO = 'github.com/SalishSeaCast/tools/blob/main' -REPO_DIR = 'analysis_tools' -TITLE_PATTERN = re.compile('#{1,6} ?') +NBVIEWER = "https://nbviewer.org/urls" +REPO = "github.com/SalishSeaCast/tools/blob/main" +REPO_DIR = "analysis_tools" +TITLE_PATTERN = re.compile("#{1,6} ?") def main(): @@ -54,8 +55,8 @@ def main(): (if that cell contains Markdown or raw text). """ - for fn in glob.glob('*.ipynb'): - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) + for fn in glob.glob("*.ipynb"): + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) readme += notebook_description(fn) license = """ ##License @@ -67,39 +68,40 @@ def main(): They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) - with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year + ) + with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) def notebook_description(fn): - description = '' - with open(fn, 'rt') as notebook: + description = "" + with open(fn, "rt") as notebook: contents = json.load(notebook) try: - first_cell = contents['worksheets'][0]['cells'][0] + first_cell = contents["worksheets"][0]["cells"][0] except KeyError: - first_cell = contents['cells'][0] - first_cell_type = first_cell['cell_type'] - if first_cell_type not in 'markdown raw'.split(): + first_cell = contents["cells"][0] + first_cell_type = first_cell["cell_type"] + if first_cell_type not in "markdown raw".split(): return description - desc_lines = first_cell['source'] + desc_lines = first_cell["source"] for line in desc_lines: - suffix = '' + suffix = "" if TITLE_PATTERN.match(line): - line = TITLE_PATTERN.sub('**', line) - suffix = '**' - if line.endswith('\n'): - description += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = TITLE_PATTERN.sub("**", line) + suffix = "**" + if line.endswith("\n"): + description += " {line}{suffix} \n".format( + line=line[:-1], suffix=suffix + ) else: - description += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - description += '\n' * 2 + description += " {line}{suffix} ".format(line=line, suffix=suffix) + description += "\n" * 2 return description -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/analysis_tools/old_notebooks/GYRE_openNC_plot.ipynb b/analysis_tools/old_notebooks/GYRE_openNC_plot.ipynb index 22b4cdd1..6466297d 100644 --- a/analysis_tools/old_notebooks/GYRE_openNC_plot.ipynb +++ b/analysis_tools/old_notebooks/GYRE_openNC_plot.ipynb @@ -169,4 +169,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/NancysCurrents.ipynb b/analysis_tools/old_notebooks/NancysCurrents.ipynb index 6f37cf4b..4063f7a2 100644 --- a/analysis_tools/old_notebooks/NancysCurrents.ipynb +++ b/analysis_tools/old_notebooks/NancysCurrents.ipynb @@ -531,4 +531,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/SusansViewerWQuiver.ipynb b/analysis_tools/old_notebooks/SusansViewerWQuiver.ipynb index 538114cd..d5bb36fb 100644 --- a/analysis_tools/old_notebooks/SusansViewerWQuiver.ipynb +++ b/analysis_tools/old_notebooks/SusansViewerWQuiver.ipynb @@ -359,4 +359,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/Tidal Movie.ipynb b/analysis_tools/old_notebooks/Tidal Movie.ipynb index e2c491db..b413a006 100644 --- a/analysis_tools/old_notebooks/Tidal Movie.ipynb +++ b/analysis_tools/old_notebooks/Tidal Movie.ipynb @@ -174,4 +174,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/Vertical Tracer Cross-sections.ipynb b/analysis_tools/old_notebooks/Vertical Tracer Cross-sections.ipynb index 1e0db91f..ed1432a9 100644 --- a/analysis_tools/old_notebooks/Vertical Tracer Cross-sections.ipynb +++ b/analysis_tools/old_notebooks/Vertical Tracer Cross-sections.ipynb @@ -213,4 +213,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/WCSD_openNC_plot.ipynb b/analysis_tools/old_notebooks/WCSD_openNC_plot.ipynb index 9261a997..4acde15c 100644 --- a/analysis_tools/old_notebooks/WCSD_openNC_plot.ipynb +++ b/analysis_tools/old_notebooks/WCSD_openNC_plot.ipynb @@ -609,4 +609,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/analysis_tools/old_notebooks/make_readme.py b/analysis_tools/old_notebooks/make_readme.py index aab79192..3475c61a 100644 --- a/analysis_tools/old_notebooks/make_readme.py +++ b/analysis_tools/old_notebooks/make_readme.py @@ -16,17 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. """ + import datetime import json import os import re -nbviewer = 'https://nbviewer.org/urls' -repo = 'github.com/SalishSeaCast/tools/blob/main' -repo_dir = 'analysis_tools/old_notebooks' +nbviewer = "https://nbviewer.org/urls" +repo = "github.com/SalishSeaCast/tools/blob/main" +repo_dir = "analysis_tools/old_notebooks" url = os.path.join(nbviewer, repo, repo_dir) -title_pattern = re.compile('#{1,6} ?') +title_pattern = re.compile("#{1,6} ?") readme = """The Jupyter Notebooks in this directory are notebooks from initial experiments around visualization of NEMO results. The best practices from these notebooks have been collected, @@ -39,27 +40,24 @@ (if that cell contains Markdown or raw text). """ -notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb')) +notebooks = (fn for fn in os.listdir("./") if fn.endswith("ipynb")) for fn in notebooks: - readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url) - with open(fn, 'rt') as notebook: + readme += "* ##[{fn}]({url}/{fn}) \n \n".format(fn=fn, url=url) + with open(fn, "rt") as notebook: contents = json.load(notebook) - first_cell_type = contents['worksheets'][0]['cells'][0]['cell_type'] - if first_cell_type in 'markdown raw'.split(): - desc_lines = contents['worksheets'][0]['cells'][0]['source'] + first_cell_type = contents["worksheets"][0]["cells"][0]["cell_type"] + if first_cell_type in "markdown raw".split(): + desc_lines = contents["worksheets"][0]["cells"][0]["source"] for line in desc_lines: - suffix = '' + suffix = "" if title_pattern.match(line): - line = title_pattern.sub('**', line) - suffix = '**' - if line.endswith('\n'): - readme += ( - ' {line}{suffix} \n' - .format(line=line[:-1], suffix=suffix)) + line = title_pattern.sub("**", line) + suffix = "**" + if line.endswith("\n"): + readme += " {line}{suffix} \n".format(line=line[:-1], suffix=suffix) else: - readme += ( - ' {line}{suffix} '.format(line=line, suffix=suffix)) - readme += '\n' * 2 + readme += " {line}{suffix} ".format(line=line, suffix=suffix) + readme += "\n" * 2 license = """ ##License @@ -70,7 +68,9 @@ They are licensed under the Apache License, Version 2.0. https://www.apache.org/licenses/LICENSE-2.0 Please see the LICENSE file for details of the license. -""".format(this_year=datetime.date.today().year) -with open('README.md', 'wt') as f: +""".format( + this_year=datetime.date.today().year +) +with open("README.md", "wt") as f: f.writelines(readme) f.writelines(license) diff --git a/bathymetry/BathyZeroTobaetc.ipynb b/bathymetry/BathyZeroTobaetc.ipynb index 07a26be5..d5221cd5 100644 --- a/bathymetry/BathyZeroTobaetc.ipynb +++ b/bathymetry/BathyZeroTobaetc.ipynb @@ -301,4 +301,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/More Smoothing.ipynb b/bathymetry/More Smoothing.ipynb index 87418b9a..2545b4c6 100644 --- a/bathymetry/More Smoothing.ipynb +++ b/bathymetry/More Smoothing.ipynb @@ -247,4 +247,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/README.md b/bathymetry/README.md index bcea7208..29124a83 100644 --- a/bathymetry/README.md +++ b/bathymetry/README.md @@ -6,29 +6,29 @@ The links below are to static renderings of the notebooks via Descriptions below the links are from the first cell of the notebooks (if that cell contains Markdown or raw text). -* ## [SalishSeaSubdomainBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SalishSeaSubdomainBathy.ipynb) - +* ## [SalishSeaSubdomainBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SalishSeaSubdomainBathy.ipynb) + **Salish Sea NEMO Sub-domain Bathymetry** - - This notebook documents the bathymetry used for the + + This notebook documents the bathymetry used for the initial Salish Sea NEMO runs on a sub-set of the whole region domain. This sub-domain was used for the runs known as `JPP` and `WCSD_RUN_tide_M2_OW_ON_file_DAMP_ANALY`. - + The first part of the notebook explores and plots the bathymetry. The second part records the manual smoothing that was done to get the JPP M2 tidal forcing case to run. -* ## [Bathymetry inside NEMO.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Bathymetry inside NEMO.ipynb) - +* ## [Bathymetry inside NEMO.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Bathymetry inside NEMO.ipynb) + Notebook to look at the Bathymetry that NEMO actually uses after it does its processing -* ## [Deepen by Grid Thickness.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Deepen by Grid Thickness.ipynb) - -* ## [LookAt201803Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/LookAt201803Bathymetry.ipynb) - -* ## [ProcessNewRiverBathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/ProcessNewRiverBathymetry.ipynb) - +* ## [Deepen by Grid Thickness.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Deepen by Grid Thickness.ipynb) + +* ## [LookAt201803Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/LookAt201803Bathymetry.ipynb) + +* ## [ProcessNewRiverBathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/ProcessNewRiverBathymetry.ipynb) + **Process New River Bathymetry **** Take the bathymetry produced by Michael including the better resolved river and process it. We need to do the following steps: @@ -44,16 +44,16 @@ Descriptions below the links are from the first cell of the notebooks 8. Write out bathy file and jetty extra friction files Note: original 201702 processing did Check continuity and add mixed islands and fix Puget after smoothing. -* ## [blast a river.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/blast a river.ipynb) - -* ## [SmoothMouthJdF-DownOneGrid.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SmoothMouthJdF-DownOneGrid.ipynb) - +* ## [blast a river.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/blast a river.ipynb) + +* ## [SmoothMouthJdF-DownOneGrid.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SmoothMouthJdF-DownOneGrid.ipynb) + This notebook takes our downonegrid Salish Sea bathymetry and produces a bathymetry with the mouth of Juan de Fuca and Johnstone Strait identical for the first 6 grid points. -* ## [NEMO-GridBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/NEMO-GridBathy.ipynb) - +* ## [NEMO-GridBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/NEMO-GridBathy.ipynb) + **NEMO Grid Bathymetry** - + This notebook describes the creation of the `NEMO-forcing/grid/grid_bathy.nc` file containing the calculated grid level depths at each grid point @@ -62,13 +62,13 @@ Descriptions below the links are from the first cell of the notebooks and boundary conditions temperature and salinity values that do not induce spurious transient flows at the topographic edges of the domain. -* ## [Deepen Haro Boundary Region.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Deepen Haro Boundary Region.ipynb) - -* ## [Process201803Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Process201803Bathymetry.ipynb) - +* ## [Deepen Haro Boundary Region.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Deepen Haro Boundary Region.ipynb) + +* ## [Process201803Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Process201803Bathymetry.ipynb) + **Process 2018 03 Bathymetry **** New Fraser River, taking 2 m up to 0 to narrow the banks - + Take the bathymetry produced by Michael including the better resolved river and process it. We need to do the following steps: 1. Straighten North Open Boundary @@ -83,72 +83,72 @@ Descriptions below the links are from the first cell of the notebooks 8. Write out bathy file and jetty extra friction files Note: original 201702 processing did Check continuity and add mixed islands and fix Puget after smoothing. -* ## [bathy_for_jie.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/bathy_for_jie.ipynb) - -* ## [mesh_mask202108_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask202108_metadata.ipynb) - +* ## [bathy_for_jie.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/bathy_for_jie.ipynb) + +* ## [mesh_mask202108_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask202108_metadata.ipynb) + **`mesh_mask202108.nc` Metadata** - + Add metadata to the NEMO-generated mesh mask file for the 202108 bathymetry so that well-defined ERDDAP datasets can be produced from it. -* ## [FindTSforSmoothedMouths.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/FindTSforSmoothedMouths.ipynb) - +* ## [FindTSforSmoothedMouths.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/FindTSforSmoothedMouths.ipynb) + For smoothed mouths we need to fill in any new grid points. -* ## [Find TS for new Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Find TS for new Bathymetry.ipynb) - +* ## [Find TS for new Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Find TS for new Bathymetry.ipynb) + If topography is deepened, we need to extend Temp and Salinity Downward. - + Also includes a cell to convert to Reference Salinity -* ## [mesh_mask_downbyone2_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask_downbyone2_metadata.ipynb) - +* ## [mesh_mask_downbyone2_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask_downbyone2_metadata.ipynb) + **`mesh_mask_downbyone2.nc` Metadata** - + Add metadata to the NEMO-generated mesh mask file for the downbyone2 bathymetry so that a well-defined ERDDAP dataset can be produced from it. -* ## [mesh_mask_SalishSea2_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask_SalishSea2_metadata.ipynb) - +* ## [mesh_mask_SalishSea2_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask_SalishSea2_metadata.ipynb) + **`mesh_mask_SalishSea2.nc` Metadata** - + Add metadata to the NEMO-generated mesh mask file for the SalishSea2 bathymetry so that a well-defined ERDDAP dataset can be produced from it. -* ## [Bathymetry in Boundary Pass.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Bathymetry in Boundary Pass.ipynb) - +* ## [Bathymetry in Boundary Pass.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Bathymetry in Boundary Pass.ipynb) + Comparison between original bathy and smoothed bathy -* ## [mesh_mask201702_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask201702_metadata.ipynb) - +* ## [mesh_mask201702_metadata.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/mesh_mask201702_metadata.ipynb) + **`mesh_mask201702.nc` Metadata** - + Add metadata to the NEMO-generated mesh mask file for the 201702 bathymetry so that a well-defined ERDDAP dataset can be produced from it. -* ## [ExploringBagFiles.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/ExploringBagFiles.ipynb) - +* ## [ExploringBagFiles.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/ExploringBagFiles.ipynb) + **Exploring `.bag` Bathymetry Data Files** - + An exploration of data and metadata in Bathymetric Attributed Grid (BAG) files. -* ## [LongRiver.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/LongRiver.ipynb) - +* ## [LongRiver.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/LongRiver.ipynb) + **Look at Bathymetry 6 **** and decide on river input points, and annotate the River with local landmarks -* ## [NEMOBathymetryfromMeshMask.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/NEMOBathymetryfromMeshMask.ipynb) - - Notebook to create a Nemo Bathymetry file for the ERDDAP server - Based on: +* ## [NEMOBathymetryfromMeshMask.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/NEMOBathymetryfromMeshMask.ipynb) + + Notebook to create a Nemo Bathymetry file for the ERDDAP server + Based on: Nancy/NEMO depths vs bathymetry file.ipynb -* ## [Process202108Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Process202108Bathymetry.ipynb) - +* ## [Process202108Bathymetry.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Process202108Bathymetry.ipynb) + **Process 2021 08 Bathymetry: Based on Process2201803 Bathymetry **** New Fraser River, taking 2 m up to 0 to narrow the banks - + Take the bathymetry produced by Michael including the better resolved river and process it. We need to do the following steps: 1. Straighten North Open Boundary @@ -162,65 +162,65 @@ Descriptions below the links are from the first cell of the notebooks 7. Plot up our Final Bathymetry 8. Write out bathy file and jetty extra friction files Note: original 201702 processing did Check continuity and add mixed islands and fix Puget after smoothing. - - 9.Check continuity and islands led to - 9.1 connect Roche Harbour - 9.2 remove extra little island - 9.3 don't close north of Read Island - 9.4 Disconnect Stuart Island - and + + 9.Check continuity and islands led to + 9.1 connect Roche Harbour + 9.2 remove extra little island + 9.3 don't close north of Read Island + 9.4 Disconnect Stuart Island + and 9.5 Deepen South Puget connection -* ## [Thalweg Smoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Thalweg Smoothing.ipynb) - +* ## [Thalweg Smoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Thalweg Smoothing.ipynb) + Smooth the Thalweg -* ## [netCDF4bathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/netCDF4bathy.ipynb) - +* ## [netCDF4bathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/netCDF4bathy.ipynb) + **Explore Changing Bathymetry Data Format** **netCDF4 Instead of netCDF3_CLASSIC** -* ## [TowardSmoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/TowardSmoothing.ipynb) - -* ## [SmoothMouthJdF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SmoothMouthJdF.ipynb) - +* ## [TowardSmoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/TowardSmoothing.ipynb) + +* ## [SmoothMouthJdF.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SmoothMouthJdF.ipynb) + This notebook takes our original smoothed Salish Sea bathymetry and produces a bathymetry with the mouth of Juan de Fuca identical for the first 6 grid points. -* ## [JettyBathymetryTracers.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/JettyBathymetryTracers.ipynb) - +* ## [JettyBathymetryTracers.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/JettyBathymetryTracers.ipynb) + Look at Jetty Bathymetry from Mesh Mask and create a TS file -* ## [bathyImage.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/bathyImage.ipynb) - +* ## [bathyImage.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/bathyImage.ipynb) + This notebook creates an image of the bathymetry and coastlines. Includes: - + 1. Bathymetry 2. Location of Rivers 3. Storm Surge points - + Other important points? -* ## [Thalweg Work.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Thalweg Work.ipynb) - +* ## [Thalweg Work.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Thalweg Work.ipynb) + Determine the Thalweg in more Detail and Channelize it -* ## [Smooth, preserving thalweg.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Smooth, preserving thalweg.ipynb) - +* ## [Smooth, preserving thalweg.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/Smooth, preserving thalweg.ipynb) + Smooth around the Thalweg -* ## [More Smoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/More Smoothing.ipynb) - +* ## [More Smoothing.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/More Smoothing.ipynb) + Notebook to take our SalishSea2 bathymetry which was smoothed to dh/hbar = 0.8 and smooth it more to 0.33. We show below that this makes the Thalweg more rugged as it pulls shallow areas from the sides across the channel. -* ## [BathyZeroTobaetc.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/BathyZeroTobaetc.ipynb) - -* ## [SalishSeaBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SalishSeaBathy.ipynb) - +* ## [BathyZeroTobaetc.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/BathyZeroTobaetc.ipynb) + +* ## [SalishSeaBathy.ipynb](https://nbviewer.org/github/SalishSeaCast/tools/blob/main/bathymetry/SalishSeaBathy.ipynb) + **Salish Sea NEMO Bathymetry** - + This notebook documents the bathymetry used for the Salish Sea NEMO runs. - + The first part of the notebook explores and plots the bathymetry. diff --git a/bathymetry/SalishSeaSubdomainBathy.ipynb b/bathymetry/SalishSeaSubdomainBathy.ipynb index 550af078..02337e77 100644 --- a/bathymetry/SalishSeaSubdomainBathy.ipynb +++ b/bathymetry/SalishSeaSubdomainBathy.ipynb @@ -750,4 +750,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/TowardSmoothing.ipynb b/bathymetry/TowardSmoothing.ipynb index 8824db35..bcf58b13 100644 --- a/bathymetry/TowardSmoothing.ipynb +++ b/bathymetry/TowardSmoothing.ipynb @@ -185,4 +185,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/agrif/fix_bathy.py b/bathymetry/agrif/fix_bathy.py index 40866576..4065bd51 100644 --- a/bathymetry/agrif/fix_bathy.py +++ b/bathymetry/agrif/fix_bathy.py @@ -28,23 +28,23 @@ def fix_bathy(infile, mindep): Run this on the file produced by agrif_create_bathy.exe. """ - with nc.Dataset(infile, 'r+') as f: + with nc.Dataset(infile, "r+") as f: # Enforce minimum bathymetry - bm = f.variables['Bathymetry'][:] + bm = f.variables["Bathymetry"][:] idx = (bm > 0) & (bm < mindep) if np.any(idx): md = np.min(bm[idx]) print("Min depth {:3f} m, resetting to {:3f} m".format(md, mindep)) bm[idx] = mindep - f.variables['Bathymetry'][:] = bm + f.variables["Bathymetry"][:] = bm # Enforce nav_lon to be in [-180,180] and not [0,360] - lon = f.variables['nav_lon'][:] + lon = f.variables["nav_lon"][:] if np.any(lon > 180): lon[lon > 180] -= 360 - f.variables['nav_lon'][:] = lon - f.variables['nav_lon'].valid_min = np.min(lon) - f.variables['nav_lon'].valid_max = np.max(lon) + f.variables["nav_lon"][:] = lon + f.variables["nav_lon"].valid_min = np.min(lon) + f.variables["nav_lon"].valid_max = np.max(lon) if __name__ == "__main__": diff --git a/bathymetry/bathyImage.ipynb b/bathymetry/bathyImage.ipynb index af722719..9abb82bd 100644 --- a/bathymetry/bathyImage.ipynb +++ b/bathymetry/bathyImage.ipynb @@ -467,4 +467,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/bathy_for_jie.ipynb b/bathymetry/bathy_for_jie.ipynb index abedbf30..236897db 100644 --- a/bathymetry/bathy_for_jie.ipynb +++ b/bathymetry/bathy_for_jie.ipynb @@ -186,4 +186,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/make_readme.py b/bathymetry/make_readme.py index 74fa150d..90c613fb 100644 --- a/bathymetry/make_readme.py +++ b/bathymetry/make_readme.py @@ -23,6 +23,7 @@ and commit and push the updated `README.md` to GitHub. """ + import json from pathlib import Path import re @@ -37,7 +38,7 @@ def main(): cwd_parts = Path.cwd().parts - repo_path = Path(*cwd_parts[cwd_parts.index(REPO_NAME)+1:]) + repo_path = Path(*cwd_parts[cwd_parts.index(REPO_NAME) + 1 :]) url = f"{NBVIEWER}/{GITHUB_ORG}/{REPO_NAME}/blob/{DEFAULT_BRANCH_NAME}/{repo_path}" readme = f"""\ diff --git a/bathymetry/netCDF4bathy.ipynb b/bathymetry/netCDF4bathy.ipynb index d9d7cc59..36723b60 100644 --- a/bathymetry/netCDF4bathy.ipynb +++ b/bathymetry/netCDF4bathy.ipynb @@ -366,4 +366,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/bathymetry/thalweg_working.txt b/bathymetry/thalweg_working.txt index 73aa9558..188d1265 100644 --- a/bathymetry/thalweg_working.txt +++ b/bathymetry/thalweg_working.txt @@ -1531,5 +1531,3 @@ 895 49 896 49 897 49 - - diff --git a/docs/breaking_changes.rst b/docs/breaking_changes.rst index 21e94a62..e047c731 100644 --- a/docs/breaking_changes.rst +++ b/docs/breaking_changes.rst @@ -52,4 +52,3 @@ are incompatible with earlier versions: releases. .. _CalVer: https://calver.org/ - diff --git a/docs/conf.py b/docs/conf.py index 094c3fd2..bc0690e4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,8 +19,8 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../SalishSeaNowcast')) -sys.path.insert(0, os.path.abspath('../SalishSeaTools')) +sys.path.insert(0, os.path.abspath("../SalishSeaNowcast")) +sys.path.insert(0, os.path.abspath("../SalishSeaTools")) # -- General configuration ---------------------------------------------------- @@ -30,93 +30,86 @@ # (named 'sphinx.ext.*') # or your custom ones. extensions = [ - 'nbsphinx', - 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', + "nbsphinx", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.todo", + "sphinx.ext.viewcode", ] intersphinx_mapping = { - 'docs': - ('https://salishsea-meopar-docs.readthedocs.io/en/latest/', None), - 'salishseacmd': - ('https://salishseacmd.readthedocs.io/en/latest/', None), - 'salishseanowcast': - ('https://salishsea-nowcast.readthedocs.io/en/latest/', None), - 'numpy': - ('https://docs.scipy.org/doc/numpy/', None), - 'pandas': - ('https://pandas.pydata.org/pandas-docs/stable/', None), - 'scipy': - ('https://docs.scipy.org/doc/scipy/reference/', None), + "docs": ("https://salishsea-meopar-docs.readthedocs.io/en/latest/", None), + "salishseacmd": ("https://salishseacmd.readthedocs.io/en/latest/", None), + "salishseanowcast": ("https://salishsea-nowcast.readthedocs.io/en/latest/", None), + "numpy": ("https://docs.scipy.org/doc/numpy/", None), + "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), } todo_include_todos = True autodoc_mock_imports = [ - 'angles', - 'arrow', - 'driftwood', - 'driftwood.formatters', - 'f90nml', - 'gsw', - 'netCDF4', - 'nowcast', - 'nowcast.figures', - 'pandas', - 'paramiko', - 'retrying', - 'scipy', - 'scipy.interpolate', - 'scipy.io', - 'scipy.optimize', - 'scipy.sparse', - 'tqdm', - 'xarray', - 'yaml', - 'zmq', - 'zmq.eventloop', - 'zmq.eventloop.ioloop', - 'zmq.eventloop.zmqstream', - 'zmq.utils', + "angles", + "arrow", + "driftwood", + "driftwood.formatters", + "f90nml", + "gsw", + "netCDF4", + "nowcast", + "nowcast.figures", + "pandas", + "paramiko", + "retrying", + "scipy", + "scipy.interpolate", + "scipy.io", + "scipy.optimize", + "scipy.sparse", + "tqdm", + "xarray", + "yaml", + "zmq", + "zmq.eventloop", + "zmq.eventloop.ioloop", + "zmq.eventloop.zmqstream", + "zmq.utils", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Salish Sea MEOPAR Tools' +project = "Salish Sea MEOPAR Tools" copyright = ( - '2013-{:%Y}, ' - 'Salish Sea MEOPAR Project Contributors ' - 'and The University of British Columbia' - .format(datetime.date.today()) + "2013-{:%Y}, " + "Salish Sea MEOPAR Project Contributors " + "and The University of British Columbia".format(datetime.date.today()) ) # For autoclass -autoclass_content = 'init' +autoclass_content = "init" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '' +version = "" # The full version, including alpha/beta/rc tags. -release = '' +release = "" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', '**.ipynb_checkpoints'] +exclude_patterns = ["_build", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Options for HTML output -------------------------------------------------- @@ -128,16 +121,16 @@ # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '_static/MEOPAR_favicon.ico' +html_favicon = "_static/MEOPAR_favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" # If false, no module index is generated. html_domain_indices = False @@ -146,7 +139,7 @@ html_use_index = False # Output file base name for HTML help builder. -htmlhelp_basename = 'SalishSea-MEOPAR-toolsdoc' +htmlhelp_basename = "SalishSea-MEOPAR-toolsdoc" # -- Options for LaTeX output ------------------------------------------------- @@ -155,10 +148,12 @@ # List of tuples # (source start file, target name, title, # author, documentclass [howto/manual]). -latex_documents = [( - 'index', - 'SalishSea-MEOPAR-toolsdoc.tex', - 'Salish Sea MEOPAR Tools Documentation', - 'Salish Sea MEOPAR Project Contributors', - 'manual', -)] +latex_documents = [ + ( + "index", + "SalishSea-MEOPAR-toolsdoc.tex", + "Salish Sea MEOPAR Tools Documentation", + "Salish Sea MEOPAR Project Contributors", + "manual", + ) +] diff --git a/nocscombine/Makefile.aix b/nocscombine/Makefile.aix index a0016966..6d3eceb3 100755 --- a/nocscombine/Makefile.aix +++ b/nocscombine/Makefile.aix @@ -6,22 +6,22 @@ LIBS = -L$(NCHOME)/lib -I$(NCHOME)/include -lnetcdf # #.SUFFIXES : OBJFILES = nocscombine.o make_global_file.o ncread_and_collate.o ncfixcoord.o rtime.o handle_err.o -.o.F90 : +.o.F90 : $(FC) $< $(LIBS) # nocscombine : $(OBJFILES) $(LDR) $(OBJFILES) $(LIBS) nocscombine.o : nocscombine.F90 $(FC) $< $(LIBS) -make_global_file.o : make_global_file.F90 +make_global_file.o : make_global_file.F90 $(FC) $< $(LIBS) ncread_and_collate.o : ncread_and_collate.F90 make_global_file.o $(FC) $< $(LIBS) -ncfixcoord.o : ncfixcoord.F90 +ncfixcoord.o : ncfixcoord.F90 $(FC) $< $(LIBS) -handle_err.o : handle_err.F90 +handle_err.o : handle_err.F90 $(FC) $< $(LIBS) -rtime.o : rtime.F90 +rtime.o : rtime.F90 $(FC) $< $(LIBS) -clean : +clean : rm $(OBJFILES) diff --git a/nocscombine/Makefile.nautilus b/nocscombine/Makefile.nautilus index 575deb97..736772c8 100755 --- a/nocscombine/Makefile.nautilus +++ b/nocscombine/Makefile.nautilus @@ -1,27 +1,27 @@ # -FC = ifort -fixed -80 -O3 -DLARGE_FILE -c +FC = ifort -fixed -80 -O3 -DLARGE_FILE -c LDR = ifort -O3 -fixed -o /fibre/acc/UTILS/bin/nocscombine NCHOME = /sw/packages/netcdf/3.6.2/x86_64 LIBS = -L$(NCHOME)/lib -I$(NCHOME)/include -lnetcdf # #.SUFFIXES : OBJFILES = nocscombine.o make_global_file.o ncread_and_collate.o ncfixcoord.o rtime.o handle_err.o -.o.F90 : +.o.F90 : $(FC) $< $(LIBS) # nocscombine : $(OBJFILES) $(LDR) $(OBJFILES) $(LIBS) nocscombine.o : nocscombine.F90 $(FC) $< $(LIBS) -make_global_file.o : make_global_file.F90 +make_global_file.o : make_global_file.F90 $(FC) $< $(LIBS) ncread_and_collate.o : ncread_and_collate.F90 make_global_file.o $(FC) $< $(LIBS) -ncfixcoord.o : ncfixcoord.F90 +ncfixcoord.o : ncfixcoord.F90 $(FC) $< $(LIBS) -handle_err.o : handle_err.F90 +handle_err.o : handle_err.F90 $(FC) $< $(LIBS) -rtime.o : rtime.F90 +rtime.o : rtime.F90 $(FC) $< $(LIBS) -clean : +clean : rm $(OBJFILES) diff --git a/nocscombine/Makefile.novel b/nocscombine/Makefile.novel index a3bf1bf7..15636547 100755 --- a/nocscombine/Makefile.novel +++ b/nocscombine/Makefile.novel @@ -1,27 +1,27 @@ # -FC = ifort -fixed -80 -O3 -DLARGE_FILE -c +FC = ifort -fixed -80 -O3 -DLARGE_FILE -c LDR = ifort -O3 -fixed -o nocscombine NCHOME = /nerc/packages/netcdfifort/v3.6.0-pl1 LIBS = -L$(NCHOME)/lib -I$(NCHOME)/include -lnetcdf # #.SUFFIXES : OBJFILES = nocscombine.o make_global_file.o ncread_and_collate.o ncfixcoord.o rtime.o handle_err.o -.o.F90 : +.o.F90 : $(FC) $< $(LIBS) # nocscombine : $(OBJFILES) $(LDR) $(OBJFILES) $(LIBS) nocscombine.o : nocscombine.F90 $(FC) $< $(LIBS) -make_global_file.o : make_global_file.F90 +make_global_file.o : make_global_file.F90 $(FC) $< $(LIBS) ncread_and_collate.o : ncread_and_collate.F90 make_global_file.o $(FC) $< $(LIBS) -ncfixcoord.o : ncfixcoord.F90 +ncfixcoord.o : ncfixcoord.F90 $(FC) $< $(LIBS) -handle_err.o : handle_err.F90 +handle_err.o : handle_err.F90 $(FC) $< $(LIBS) -rtime.o : rtime.F90 +rtime.o : rtime.F90 $(FC) $< $(LIBS) -clean : +clean : rm $(OBJFILES) diff --git a/nocscombine/README.nocscombine b/nocscombine/README.nocscombine index 15db176f..6cc0ff9a 100644 --- a/nocscombine/README.nocscombine +++ b/nocscombine/README.nocscombine @@ -95,4 +95,3 @@ nocscombine -f ORCA025-N10_1m_19670913_19680410_grid_T_0220.nc \ N10_nov1967_s.nc [ 0] [ 1] [ 2] .......... Completed in 147.961 seconds - diff --git a/nocscombine/handle_err.F90 b/nocscombine/handle_err.F90 index 6d9d17fe..0d2f73df 100644 --- a/nocscombine/handle_err.F90 +++ b/nocscombine/handle_err.F90 @@ -1,7 +1,7 @@ subroutine handle_err(status) USE NETCDF integer, intent ( in) :: status - + if(status /= nf90_noerr) then write(*,*) trim(nf90_strerror(status)) stop "Stopped" diff --git a/nocscombine/make_global_file.F90 b/nocscombine/make_global_file.F90 index 7b996024..a0fbb2cc 100644 --- a/nocscombine/make_global_file.F90 +++ b/nocscombine/make_global_file.F90 @@ -2,19 +2,19 @@ cc cc FORTRAN subroutine that defines the global NetCDF archive containing cc diagnostics on the T-grid. The archive is given the default name of -cc "global_Tgrid.nc" -cc +cc "global_Tgrid.nc" +cc cc INPUT :: cc my_time -> array containing write times in seconds cc my_size -> array containing sizes of global archive dimensions cc num_times -> number of time records in archive cc - subroutine make_global_file( infile, ofile, my_time, my_size3, + subroutine make_global_file( infile, ofile, my_time, my_size3, & num_times, verbose, dovar ) USE netcdf implicit none - + integer, intent(IN) :: num_times logical, intent(IN) :: verbose integer, dimension(3), intent(IN) :: my_size3 @@ -28,7 +28,7 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, real,dimension(:), allocatable :: depth_array character*(*) infile, ofile character*256 nname - integer i, m, k, nDim, nVar, nAtt, nlen, nunlim, + integer i, m, k, nDim, nVar, nAtt, nlen, nunlim, & n, ntyp, nvatt, ndims, oldfill cc cc Create global archive and set dimensions @@ -36,8 +36,8 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, my_size(1:3) = my_size3 my_size(4) = num_times #ifdef LARGE_FILE - status = nf90_create( trim(ofile), - & cmode=or(nf90_clobber,nf90_64bit_offset), + status = nf90_create( trim(ofile), + & cmode=or(nf90_clobber,nf90_64bit_offset), & ncid=ncid ) #else status = nf90_create( trim(ofile), NF90_CLOBBER, ncid ) @@ -78,7 +78,7 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, IF(.not.dovar(n)) THEN c skip unwanted variables ELSE - if(verbose) + if(verbose) & write(*,*) n,ntyp,ndims,(dims(i),i=1,ndims),nvatt,' ',trim(nname) if(ndims.eq.0) then status = nf90_def_var( ncid, trim(nname), ntyp, varID ) @@ -91,11 +91,11 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, if(verbose) write(*,*) 'dimids ',dimID(dims(1)),dimID(dims(2)) elseif(ndims.eq.3) then status = nf90_def_var( ncid, trim(nname), ntyp, - & (/ dimID(dims(1)), dimID(dims(2)), + & (/ dimID(dims(1)), dimID(dims(2)), & dimID(dims(3)) /), varID ) elseif(ndims.eq.4) then status = nf90_def_var( ncid, trim(nname), ntyp, - & (/ dimID(dims(1)), dimID(dims(2)), + & (/ dimID(dims(1)), dimID(dims(2)), & dimID(dims(3)), dimID(dims(4)) /), varID ) else write(*,*) 'Unknown ndims: ',ndims @@ -115,9 +115,9 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, deallocate(dimID) deallocate(dims) deallocate(sdims) -c +c c Global Attributes: -c +c status = nf90_redef(ncid) call handle_err(status) do n = 1,nAtt @@ -125,7 +125,7 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, if(verbose) write(*,*) 'Global Attribute: ',n,' ',trim(nname) if(.not. (index(nname,'DOMAIN').gt.0 .or. & index(nname,'associate').gt.0 .or. - & index(nname,'file_name').gt.0 )) + & index(nname,'file_name').gt.0 )) & status = nf90_copy_att(ncid2, & NF90_GLOBAL,trim(nname),ncid,NF90_GLOBAL) call handle_err(status) @@ -137,4 +137,4 @@ subroutine make_global_file( infile, ofile, my_time, my_size3, status = nf90_close(ncid) call handle_err(status) if(verbose) write(*,*) 'B.Finished make_global_file' - end + end diff --git a/nocscombine/ncfixcoord.F90 b/nocscombine/ncfixcoord.F90 index cb353c98..0aed74ba 100644 --- a/nocscombine/ncfixcoord.F90 +++ b/nocscombine/ncfixcoord.F90 @@ -8,7 +8,7 @@ cc was constructed from a parallel run which had ignored land-only cc regions (i.e. jpnij < jpni*jpnj ) cc -cc Any attempt will be abandoned if the input fields do not match the +cc Any attempt will be abandoned if the input fields do not match the cc fields they are to replace in either rank, size or datatype cc cc INPUT :: @@ -39,7 +39,7 @@ subroutine ncfixcoord( oname, coordname, verbose) integer, allocatable, dimension(:) :: dims, gsize, lsize cc -cc Open the coordinate datafile. Read the global domain dimensions +cc Open the coordinate datafile. Read the global domain dimensions cc status = nf90_open( trim(coordname), NF90_NOWRITE, Gncid ) if(status /= nf90_NoErr) call handle_err(status) @@ -54,7 +54,7 @@ subroutine ncfixcoord( oname, coordname, verbose) allocate(gsize(nDim)) allocate(lsize(nDim)) cc - status = nf90_Inquire_Variable(Gncid, Gvarid(1), nname, + status = nf90_Inquire_Variable(Gncid, Gvarid(1), nname, & ntyp, ndims, dims, nvatt) cc if(verbose) write(*,*)'Fixing coordinates, name: ',trim(nname) @@ -70,7 +70,7 @@ subroutine ncfixcoord( oname, coordname, verbose) if(status /= nf90_NoErr) call handle_err(status) status = nf90_inq_varid(ncid, "nav_lat", varid(2)) if(status /= nf90_NoErr) call handle_err(status) - status = nf90_Inquire_Variable(ncid, varid(1), nname, + status = nf90_Inquire_Variable(ncid, varid(1), nname, & ntyp2, ndims2, dims, nvatt) if(status /= nf90_NoErr) call handle_err(status) if(verbose) write(*,*)'Fixing coordinates, ndims ',ndims @@ -167,4 +167,4 @@ subroutine ncfixcoord( oname, coordname, verbose) deallocate(gsize) deallocate(lsize) - end subroutine + end subroutine diff --git a/nocscombine/ncread_and_collate.F90 b/nocscombine/ncread_and_collate.F90 index 744b03fe..aca3ed36 100644 --- a/nocscombine/ncread_and_collate.F90 +++ b/nocscombine/ncread_and_collate.F90 @@ -1,6 +1,6 @@ cc ncread_and_collate cc -cc FORTRAN subroutine that reads in data from the individual local domains and +cc FORTRAN subroutine that reads in data from the individual local domains and cc combines it into a global datafield which is written into a NetCDF archive. cc cc INPUT :: @@ -80,7 +80,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, cc number of input files to accessed. cc status = nf90_open( trim(fname), NF90_NOWRITE, ncid ) - status = nf90_enddef( ncid ) + status = nf90_enddef( ncid ) status = nf90_Inquire(ncid, nDim, nVar, nAtt, nunlim) allocate(dims(nDim)) allocate(global_start(nDim)) @@ -88,7 +88,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, allocate(lstart(nDim)) allocate(lsize(nDim)) - status = nf90_get_att( ncid, nf90_global, "DOMAIN_size_global", + status = nf90_get_att( ncid, nf90_global, "DOMAIN_size_global", & local_size ) global_size(1) = local_size(1) global_size(2) = local_size(2) @@ -96,7 +96,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, status = nf90_inquire_dimension( ncid, 4, len = num_instances ) allocate(times(num_instances)) - status = nf90_get_att( ncid, nf90_global, "DOMAIN_number_total", + status = nf90_get_att( ncid, nf90_global, "DOMAIN_number_total", & numfiles ) cc cc Read in the number instances contained in the archive @@ -116,7 +116,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, cc cc Create the global NetCDF archive. cc - CALL make_global_file( fname, oname, times, global_size, + CALL make_global_file( fname, oname, times, global_size, & num_instances, verbose, dovar ) deallocate(times) @@ -171,7 +171,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, c land only regions due to faulty logic in mpp_init_ioipsl c which will have set the wrong halo sizes c - if(n.eq.0) + if(n.eq.0) & write(*,*) 'nocscombine test version: fixing domain attributes' open(unit=11, file='layout.dat') read(11,*) @@ -185,10 +185,10 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, ihe(1) = nlci-nlei ihe(2) = nlcj-nlej #endif - global_start(1) = global_start(1) + ihs(1) - global_start(2) = global_start(2) + ihs(2) - global_end(1) = global_end(1) - ihe(1) - global_end(2) = global_end(2) - ihe(2) + global_start(1) = global_start(1) + ihs(1) + global_start(2) = global_start(2) + ihs(2) + global_end(1) = global_end(1) - ihe(1) + global_end(2) = global_end(2) - ihe(2) local_size(1) = local_size(1) - (ihs(1) + ihe(1)) local_size(2) = local_size(2) - (ihs(2) + ihe(2)) endif @@ -209,7 +209,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, end do ig = 0 do i = 1,nVar - status = nf90_Inquire_Variable(ncid, i, nname, + status = nf90_Inquire_Variable(ncid, i, nname, & ntyp, ndims, dims, nvatt) c call handle_err(status) @@ -243,7 +243,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, elseif(ndims.eq.1) then allocate( dtimes(lsize(dims(1))) ) status = nf90_get_var( ncid, i, dtimes, start = lstart ) - status = nf90_put_var( Gncid, ig, dtimes, + status = nf90_put_var( Gncid, ig, dtimes, & start=(/ 1 /), & count=(/ lsize(dims(1)) /) ) deallocate(dtimes) @@ -262,9 +262,9 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, if(verbose) write(*,*) '3d Allocate status: ',status status = nf90_get_var( ncid, i, datad_3d, start=lstart ) status = nf90_put_var( Gncid, ig, datad_3d, - & start=(/ global_start(dims(1)), global_start(dims(2)), + & start=(/ global_start(dims(1)), global_start(dims(2)), & global_start(dims(3)) /), - & count=(/ lsize(dims(1)), lsize(dims(2)), + & count=(/ lsize(dims(1)), lsize(dims(2)), & lsize(dims(3)) /) ) deallocate(datad_3d) elseif(ndims.eq.4) then @@ -339,13 +339,13 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, status = nf90_put_var( Gncid, ig, itmpv ) elseif(ndims.eq.1) then allocate( itimes(lsize(dims(1))) ) - if(verbose) write(*,*) '1d Allocate status: ',status, + if(verbose) write(*,*) '1d Allocate status: ',status, & lsize(dims(1)) status = nf90_get_var( ncid, i, itimes, start = lstart ) status = nf90_put_var( Gncid, ig, itimes, & start=(/ 1 /), & count=(/ lsize(dims(1)) /) ) - if(verbose) write(*,*) '1d put status: ',status + if(verbose) write(*,*) '1d put status: ',status deallocate(itimes) elseif(ndims.eq.2) then allocate( datai_2d(lsize(dims(1)), lsize(dims(2))), @@ -390,13 +390,13 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, status = nf90_put_var( Gncid, ig, istmpv ) elseif(ndims.eq.1) then allocate( istimes(lsize(dims(1))) ) - if(verbose) write(*,*) '1d Allocate status: ',status, + if(verbose) write(*,*) '1d Allocate status: ',status, & lsize(dims(1)) status = nf90_get_var( ncid, i, istimes, start = lstart ) status = nf90_put_var( Gncid, ig, istimes, & start=(/ 1 /), & count=(/ lsize(dims(1)) /) ) - if(verbose) write(*,*) '1d put status: ',status + if(verbose) write(*,*) '1d put status: ',status deallocate(istimes) elseif(ndims.eq.2) then allocate( datas_2d(lsize(dims(1)), lsize(dims(2))), @@ -441,13 +441,13 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, status = nf90_put_var( Gncid, ig, ibtmpv ) elseif(ndims.eq.1) then allocate( itimes(lsize(dims(1))) ) - if(verbose) write(*,*) '1d Allocate status: ',status, + if(verbose) write(*,*) '1d Allocate status: ',status, & lsize(dims(1)) status = nf90_get_var( ncid, i, ibtimes, start = lstart ) status = nf90_put_var( Gncid, ig, ibtimes, & start=(/ 1 /), & count=(/ lsize(dims(1)) /) ) - if(verbose) write(*,*) '1d put status: ',status + if(verbose) write(*,*) '1d put status: ',status deallocate(ibtimes) elseif(ndims.eq.2) then allocate( datab_2d(lsize(dims(1)), lsize(dims(2))), @@ -494,7 +494,7 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, if(verbose) then write(*,*) "INPUT FILENAME :: ", trim(fname2) write(*,*) 'MODEL FILE LOCAL DIMENSIONS :: ', local_size - write(*,*) 'MODEL FILE GLOBAL START :: ', global_start + write(*,*) 'MODEL FILE GLOBAL START :: ', global_start write(*,*) 'MODEL FILE GLOBAL END :: ', global_end write(*,*) ' ' endif @@ -508,4 +508,4 @@ subroutine ncread_and_collate( fname, oname, silent, verbose, showhaloes, deallocate(lstart) status = nf90_close( Gncid ) - end subroutine + end subroutine diff --git a/nocscombine/nocscombine.F90 b/nocscombine/nocscombine.F90 index 2d894998..27a53599 100644 --- a/nocscombine/nocscombine.F90 +++ b/nocscombine/nocscombine.F90 @@ -1,21 +1,21 @@ cc nocscombine cc -cc FORTRAN program that assembles a global NetCDF archive from a series of +cc FORTRAN program that assembles a global NetCDF archive from a series of cc NEMO output files. cc cc USAGE :: nocscombine -f inputfile [-o outfile] [-v] [-s] cc [-d list of variables] [-ts tstart] [-te tend] cc -cc -f inputfile -cc Mandatory argument. inputfile is the filename of -cc one of the NEMO restart or diagnostic files. +cc -f inputfile +cc Mandatory argument. inputfile is the filename of +cc one of the NEMO restart or diagnostic files. cc This should be the whole filename of one of the individual -cc processor files (i.e. include the +cc processor files (i.e. include the cc node number, eg. ORCA1_00010101_restart_0012.nc). cc This program works on NetCDF NEMO output files. The program will -cc create a NetCDF archive covering the entire global domain. +cc create a NetCDF archive covering the entire global domain. cc -cc [-o outputfile ] +cc [-o outputfile ] cc Optional argument. outputfilename is the required name cc of the outputfile. By default this name will be constructed from cc the inputfilename by stripping off the processor number. I.e.: @@ -23,12 +23,12 @@ cc outputfile: ORCA1_00010101_restart.nc cc The -o option permits the user to override this behaviour. cc -cc [-c coordinatefile ] +cc [-c coordinatefile ] cc Optional argument. coordinatefile is the name cc of the global coordinate file relevant to the NEMO model which cc produced the individual processor files. This file is required to cc ensure complete nav_lon and nav_lat fields in cases where wholly -cc land areas have been omitted from a parallel run (i.e. +cc land areas have been omitted from a parallel run (i.e. cc jpnij .ne. jpni*jpnj). The corrections are applied after cc the collation phase. If this argument is supplied and the output cc file exists then the collation phase is not performed but the @@ -41,20 +41,20 @@ cc is useful to prevent multiple simulataneous accesses when running cc nocscombine in a task-farming environment cc -cc [-v] +cc [-v] cc Optional argument. Switches on verbose reporting for debugging cc -cc [-s] +cc [-s] cc Optional argument. Normal behaviour is to show a limited amount cc of progress information. This option runs the utility without any cc output to stdout. cc cc [-showhaloes] -cc Optional argument. Early versions of nocscombine erroneously -cc copied across the halo rows and columns too. Potentially producing -cc lines of unset values if the halo points were not set in the -cc individual restart files (as could happen for some purely -cc diagnostic fields). Use -showhaloes to reproduce this feature +cc Optional argument. Early versions of nocscombine erroneously +cc copied across the halo rows and columns too. Potentially producing +cc lines of unset values if the halo points were not set in the +cc individual restart files (as could happen for some purely +cc diagnostic fields). Use -showhaloes to reproduce this feature cc if required. cc cc [-d] comma-separated list of variables @@ -113,7 +113,7 @@ program combine if ( pos(1).gt.0 .or. fixcoord ) then if(.not.setoname) then oname = fname(1:pos(1)-6)//'.nc' - if(.not.silent) + if(.not.silent) & write(*,'(a)') 'Creating outputfile: '//trim(oname) endif inquire(file=trim(oname),exist = around) @@ -121,7 +121,7 @@ program combine if(.not.silent) runtime = rtime(0.0) CALL flagvars - if(docollation) CALL ncread_and_collate( fname, oname, silent, + if(docollation) CALL ncread_and_collate( fname, oname, silent, & verbose, showhaloes, dovar, tslice, & time_start, time_end, npoff ) cc @@ -229,8 +229,8 @@ end subroutine ParseCmdLine subroutine flagvars c USE netcdf - integer :: n, varid, status, ncid, numfiles, - & i, num_instances + integer :: n, varid, status, ncid, numfiles, + & i, num_instances character (LEN=256) :: nname character (LEN=256), allocatable :: dnames(:) integer m, k, nDim, nVar, nAtt, nlen, nunlim, diff --git a/update_copyright.py b/update_copyright.py index d60be992..cd9dd71d 100644 --- a/update_copyright.py +++ b/update_copyright.py @@ -30,32 +30,31 @@ def main(): this_yr = datetime.date.today().year last_yr = this_yr - 1 - p_last_yr = re.compile('(Copyright) (.*){}'.format(last_yr), re.IGNORECASE) - p_this_yr = re.compile('Copyright (.*){}'.format(this_yr), re.IGNORECASE) + p_last_yr = re.compile("(Copyright) (.*){}".format(last_yr), re.IGNORECASE) + p_this_yr = re.compile("Copyright (.*){}".format(this_yr), re.IGNORECASE) - for root, dirs, files in os.walk('.'): + for root, dirs, files in os.walk("."): for name in files: - if os.path.splitext(name)[1] not in '.py .rst .txt'.split(): + if os.path.splitext(name)[1] not in ".py .rst .txt".split(): continue - if any(('.hg' in root, '_build/html' in root, 'egg-info' in root)): + if any((".hg" in root, "_build/html" in root, "egg-info" in root)): continue - with open(os.path.join(root, name), 'rt') as f: + with open(os.path.join(root, name), "rt") as f: contents = f.read() m = p_last_yr.search(contents) if not m: if not p_this_yr.search(contents): - print('{}: no copyright'.format(os.path.join(root, name))) + print("{}: no copyright".format(os.path.join(root, name))) continue if not m.groups()[1]: # Single year copyright - new_copyright = ( - '{0[0]} {1}-{2}'.format(m.groups(), last_yr, this_yr)) + new_copyright = "{0[0]} {1}-{2}".format(m.groups(), last_yr, this_yr) else: - new_copyright = '{0[0]} {0[1]}{1}'.format(m.groups(), this_yr) + new_copyright = "{0[0]} {0[1]}{1}".format(m.groups(), this_yr) updated = p_last_yr.sub(new_copyright, contents) - with open(os.path.join(root, name), 'wt') as f: + with open(os.path.join(root, name), "wt") as f: f.write(updated) -if __name__ == '__main__': +if __name__ == "__main__": main() From 391d736df8df6c6067dc4d90a33fdbc4e169cf9e Mon Sep 17 00:00:00 2001 From: Doug Latornell Date: Sat, 4 Jan 2025 12:28:26 -0800 Subject: [PATCH 3/3] Fix mixture of tabs and spaces for indentation WARNING: This module has many other broken code issues in it. Much debugging and modernization will be required if you want to use it. --- I_ForcingFiles/Atmos/weather.py | 476 +++++++++++++++++++------------- 1 file changed, 287 insertions(+), 189 deletions(-) diff --git a/I_ForcingFiles/Atmos/weather.py b/I_ForcingFiles/Atmos/weather.py index 6d4c4f2f..f918e52b 100644 --- a/I_ForcingFiles/Atmos/weather.py +++ b/I_ForcingFiles/Atmos/weather.py @@ -33,6 +33,7 @@ import os from compiler.ast import flatten + def get_EC_observations(station, start_day, end_day): """ Gather Environment Canada weather observations for the station and dates indicated. The dates should span one month because of how EC data is collected. @@ -50,241 +51,338 @@ def get_EC_observations(station, start_day, end_day): """ station_ids = { - 'PamRocks': 6817, - 'SistersIsland': 6813, - 'EntranceIsland': 29411, - 'Sandheads': 6831, - 'YVR': 51442, #note, I think YVR station name changed in 2013. Older data use 889 - 'PointAtkinson': 844, - 'Victoria': 10944, - 'CampbellRiver': 145, - 'PatriciaBay': 11007, # not exactly at Patricia Bay - 'Esquimalt': 52 + "PamRocks": 6817, + "SistersIsland": 6813, + "EntranceIsland": 29411, + "Sandheads": 6831, + "YVR": 51442, # note, I think YVR station name changed in 2013. Older data use 889 + "PointAtkinson": 844, + "Victoria": 10944, + "CampbellRiver": 145, + "PatriciaBay": 11007, # not exactly at Patricia Bay + "Esquimalt": 52, } - st_ar=arrow.Arrow.strptime(start_day, '%d-%b-%Y') - end_ar=arrow.Arrow.strptime(end_day, '%d-%b-%Y') + st_ar = arrow.Arrow.strptime(start_day, "%d-%b-%Y") + end_ar = arrow.Arrow.strptime(end_day, "%d-%b-%Y") - wind_spd= []; wind_dir=[]; temp=[]; - url = 'https://climate.weather.gc.ca/climateData/bulkdata_e.html' + wind_spd = [] + wind_dir = [] + temp = [] + url = "https://climate.weather.gc.ca/climateData/bulkdata_e.html" query = { - 'timeframe': 1, - 'stationID': station_ids[station], - 'format': 'xml', - 'Year': st_ar.year, - 'Month': st_ar.month, - 'Day': 1, + "timeframe": 1, + "stationID": station_ids[station], + "format": "xml", + "Year": st_ar.year, + "Month": st_ar.month, + "Day": 1, } response = requests.get(url, params=query) tree = ElementTree.parse(cStringIO.StringIO(response.content)) root = tree.getroot() - #read lat and lon - for raw_info in root.findall('stationinformation'): - lat =float(raw_info.find('latitude').text) - lon =float(raw_info.find('longitude').text) - #read data - raw_data = root.findall('stationdata') + # read lat and lon + for raw_info in root.findall("stationinformation"): + lat = float(raw_info.find("latitude").text) + lon = float(raw_info.find("longitude").text) + # read data + raw_data = root.findall("stationdata") times = [] for record in raw_data: - day = int(record.get('day')) - hour = int(record.get('hour')) - year = int(record.get('year')) - month = int(record.get('month')) - t = arrow.Arrow(year, month,day,hour,tzinfo=tz('US/Pacific')) - selectors = ( - (day >= st_ar.day and day <= end_ar.day) - ) + day = int(record.get("day")) + hour = int(record.get("hour")) + year = int(record.get("year")) + month = int(record.get("month")) + t = arrow.Arrow(year, month, day, hour, tzinfo=tz("US/Pacific")) + selectors = day >= st_ar.day and day <= end_ar.day if selectors: try: - wind_spd.append(float(record.find('windspd').text)) + wind_spd.append(float(record.find("windspd").text)) times.append(t.datetime) except TypeError: wind_spd.append(0) times.append(t.datetime) - try: - wind_dir.append(float(record.find('winddir').text) * 10) + try: + wind_dir.append(float(record.find("winddir").text) * 10) except: - wind_dir.append(float('NaN')) - try: - temp.append(float(record.find('temp').text) +273) + wind_dir.append(float("NaN")) + try: + temp.append(float(record.find("temp").text) + 273) except: - temp.append(float('NaN')) - wind_spd= np.array(wind_spd) * 1000 / 3600 - wind_dir=-np.array(wind_dir)+270 - wind_dir=wind_dir + 360 * (wind_dir<0) - temp=np.array(temp) + temp.append(float("NaN")) + wind_spd = np.array(wind_spd) * 1000 / 3600 + wind_dir = -np.array(wind_dir) + 270 + wind_dir = wind_dir + 360 * (wind_dir < 0) + temp = np.array(temp) return wind_spd, wind_dir, temp, times, lat, lon + def list_files(startdate, numdays, model): - """ create a list of files for a given model beginning on startdate and going back numdays. - model can be a string 'Operational_old' (reformatted GRIB2, older files), - 'Operational' (reformatted GRIB2 on subset), or 'GEM' (netcdf from EC). - returns the list of files and desired dates""" - path='' - if model =='GEM': - path='/ocean/dlatorne/MEOPAR/GEM2.5/NEMO-atmos/res_' - elif model =='Operational_old': - path ='/ocean/sallen/allen/research/Meopar/Operational/oper_allvar_y' - elif model == 'Operational': - path ='/ocean/sallen/allen/research/Meopar/Operational/ops_y' - - dates = [ startdate - datetime.timedelta(days=num) for num in range(0,numdays)] - dates.sort(); - sstr =path+dates[0].strftime('%Y')+'m'+dates[0].strftime('%m')+'d'+dates[0].strftime('%d')+'.nc' - estr =path+dates[-1].strftime('%Y')+'m'+dates[-1].strftime('%m')+'d'+dates[-1].strftime('%d')+'.nc' - - for filename in files: - if filename < sstr: - files.remove(filename) - if filename > estr: - files.remove(filename) - - files.sort(key=os.path.basename) - - return files - -def find_model_point(lon,lat,X,Y): - """returns the closest model grid point to the given lat/lon. - Model grid is X,Y. - returns j,i - """ - # Tolerance for searching for grid points - # (approx. distances between adjacent grid points) - tol1 = 0.015 # lon - tol2 = 0.015# lat - - # Search for a grid point with lon/lat within tolerance of - # measured location - x1, y1 = np.where( - np.logical_and( - (np.logical_and(X > lon-tol1, X < lon+tol1)), - (np.logical_and(Y > lat-tol2, Y < lat+tol2)))) - return x1[0], y1[0] - -def compile_model_output(i,j,files,model): - """ compiles the model variables over severl files into a single array at a j,i grid point. - Model can be "Operational", "Operational_old", "GEM". - returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity. + """create a list of files for a given model beginning on startdate and going back numdays. + model can be a string 'Operational_old' (reformatted GRIB2, older files), + 'Operational' (reformatted GRIB2 on subset), or 'GEM' (netcdf from EC). + returns the list of files and desired dates""" + path = "" + if model == "GEM": + path = "/ocean/dlatorne/MEOPAR/GEM2.5/NEMO-atmos/res_" + elif model == "Operational_old": + path = "/ocean/sallen/allen/research/Meopar/Operational/oper_allvar_y" + elif model == "Operational": + path = "/ocean/sallen/allen/research/Meopar/Operational/ops_y" + + dates = [startdate - datetime.timedelta(days=num) for num in range(0, numdays)] + dates.sort() + sstr = ( + path + + dates[0].strftime("%Y") + + "m" + + dates[0].strftime("%m") + + "d" + + dates[0].strftime("%d") + + ".nc" + ) + estr = ( + path + + dates[-1].strftime("%Y") + + "m" + + dates[-1].strftime("%m") + + "d" + + dates[-1].strftime("%d") + + ".nc" + ) + + for filename in files: + if filename < sstr: + files.remove(filename) + if filename > estr: + files.remove(filename) + + files.sort(key=os.path.basename) + + return files + + +def find_model_point(lon, lat, X, Y): + """returns the closest model grid point to the given lat/lon. + Model grid is X,Y. + returns j,i """ - wind=[]; direc=[]; t=[]; pr=[]; sol=[]; the=[]; pre=[]; tem=[]; qr=[]; + # Tolerance for searching for grid points + # (approx. distances between adjacent grid points) + tol1 = 0.015 # lon + tol2 = 0.015 # lat + + # Search for a grid point with lon/lat within tolerance of + # measured location + x1, y1 = np.where( + np.logical_and( + (np.logical_and(X > lon - tol1, X < lon + tol1)), + (np.logical_and(Y > lat - tol2, Y < lat + tol2)), + ) + ) + return x1[0], y1[0] + + +def compile_model_output(i, j, files, model): + """compiles the model variables over severl files into a single array at a j,i grid point. + Model can be "Operational", "Operational_old", "GEM". + returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity. + """ + wind = [] + direc = [] + t = [] + pr = [] + sol = [] + the = [] + pre = [] + tem = [] + qr = [] for f in files: G = nc.Dataset(f) - u = G.variables['u_wind'][:,j,i]; v=G.variables['v_wind'][:,j,i]; - pr.append(G.variables['atmpres'][:,j,i]); sol.append(G.variables['solar'][:,j,i]); - qr.append(G.variables['qair'][:,j,i]); the.append(G.variables['therm_rad'][:,j,i]); - pre.append(G.variables['precip'][:,j,i]); - tem.append(G.variables['tair'][:,j,i]) + u = G.variables["u_wind"][:, j, i] + v = G.variables["v_wind"][:, j, i] + pr.append(G.variables["atmpres"][:, j, i]) + sol.append(G.variables["solar"][:, j, i]) + qr.append(G.variables["qair"][:, j, i]) + the.append(G.variables["therm_rad"][:, j, i]) + pre.append(G.variables["precip"][:, j, i]) + tem.append(G.variables["tair"][:, j, i]) speed = np.sqrt(u**2 + v**2) wind.append(speed) d = np.arctan2(v, u) - d = np.rad2deg(d + (d<0)*2*np.pi); + d = np.rad2deg(d + (d < 0) * 2 * np.pi) direc.append(d) - ts=G.variables['time_counter'] - if model =='GEM': - torig = nc_tools.time_origin(G) - elif model =='Operational' or model=='Operational_old': - torig = datetime.datetime(1970,1,1) #there is no time_origin attriubte in OP files, so I hard coded this + ts = G.variables["time_counter"] + if model == "GEM": + torig = nc_tools.time_origin(G) + elif model == "Operational" or model == "Operational_old": + torig = datetime.datetime( + 1970, 1, 1 + ) # there is no time_origin attriubte in OP files, so I hard coded this for ind in np.arange(ts.shape[0]): t.append((torig + datetime.timedelta(seconds=ts[ind])).datetime) - wind = np.array(wind).reshape(len(filesGEM)*24,) - direc = np.array(direc,'double').reshape(len(filesGEM)*24,) - t = np.array(t).reshape(len(filesGEM)*24,) - pr = np.array(pr).reshape(len(filesGEM)*24,) - tem = np.array(tem).reshape(len(filesGEM)*24,) - sol = np.array(sol).reshape(len(filesGEM)*24,) - the = np.array(the).reshape(len(filesGEM)*24,) - qr = np.array(qr).reshape(len(filesGEM)*24,) - pre = np.array(pre).reshape(len(filesGEM)*24,) + wind = np.array(wind).reshape( + len(filesGEM) * 24, + ) + direc = np.array(direc, "double").reshape( + len(filesGEM) * 24, + ) + t = np.array(t).reshape( + len(filesGEM) * 24, + ) + pr = np.array(pr).reshape( + len(filesGEM) * 24, + ) + tem = np.array(tem).reshape( + len(filesGEM) * 24, + ) + sol = np.array(sol).reshape( + len(filesGEM) * 24, + ) + the = np.array(the).reshape( + len(filesGEM) * 24, + ) + qr = np.array(qr).reshape( + len(filesGEM) * 24, + ) + pre = np.array(pre).reshape( + len(filesGEM) * 24, + ) return wind, direc, t, pr, tem, sol, the, qr, pre -def compile_model_output_MF(i,j,files,model): - """ Uses the MFDataset method to combine all the datasets - Model can be "Operational", "Operational_old", "GEM". - returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity. - """ - G = nc.MFDataset(files) - u = G.variables['u_wind'][:,j,i]; v=G.variables['v_wind'][:,j,i]; - pr = G.variables['atmpres'][:,j,i]; sol = G.variables['solar'][:,j,i]; - qr = G.variables['qair'][:,j,i]; the = G.variables['therm_rad'][:,j,i]; - pre = G.variables['precip'][:,j,i]; tem = G.variables['tair'][:,j,i]; - - wind = np.sqrt(u**2 + v**2) - direc = np.arctan2(v, u) - direc = np.rad2deg(direc + (direc<0)*2*np.pi); +def compile_model_output_MF(i, j, files, model): + """Uses the MFDataset method to combine all the datasets + Model can be "Operational", "Operational_old", "GEM". + returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity. + """ + G = nc.MFDataset(files) + + u = G.variables["u_wind"][:, j, i] + v = G.variables["v_wind"][:, j, i] + pr = G.variables["atmpres"][:, j, i] + sol = G.variables["solar"][:, j, i] + qr = G.variables["qair"][:, j, i] + the = G.variables["therm_rad"][:, j, i] + pre = G.variables["precip"][:, j, i] + tem = G.variables["tair"][:, j, i] + + wind = np.sqrt(u**2 + v**2) + direc = np.arctan2(v, u) + direc = np.rad2deg(direc + (direc < 0) * 2 * np.pi) + + ts = G.variables["time_counter"] + if model == "GEM": + torig = nc_tools.time_origin(G) + elif model == "Operational" or model == "Operational_old": + torig = datetime.datetime( + 1970, 1, 1 + ) # there is no time_origin attriubte in OP files, so I hard coded this + for ind in np.arange(ts.shape[0]): + t.append((torig + datetime.timedelta(seconds=ts[ind])).datetime) - ts=G.variables['time_counter'] - if model =='GEM': - torig = nc_tools.time_origin(G) - elif model =='Operational' or model=='Operational_old': - torig = datetime.datetime(1970,1,1) #there is no time_origin attriubte in OP files, so I hard coded this - for ind in np.arange(ts.shape[0]): - t.append((torig + datetime.timedelta(seconds=ts[ind])).datetime) + return wind, direc, t, pr, tem, sol, the, qr, pre - return wind, direc, t, pr, tem, sol, the, qr, pre def gather_observed_winds(location, startdate, numdays): """Uses the get_EC_observations function to return the winds and temperature at a location over a certain timefame, beginning on startdate and running backward from numdays. - returns wind speed, wind direction, temperature and times, latitude and longitude. """ - dates = [ startdate - datetime.timedelta(days=num) for num in range(-1,numdays+1)] - dates.sort(); - strd = dates[0].astimezone(pytz.timezone('US/Pacific')); start = strd.strftime('%d-%b-%Y'); - #if only one month of data - if strd.month == dates[-1].astimezone(pytz.timezone('US/Pacific')).month: - endd = strd.replace(month=strd.month+1,day=1); - m = endd.month; - endd=endd -datetime.timedelta(days=1); - end = endd.strftime('%d-%b-%Y'); - [winds,dirs,temps,times, lat, lon] = get_EC_observations(location,start,end) - times=np.array(times); - #if spans more than one month, we have to be more careful + returns wind speed, wind direction, temperature and times, latitude and longitude. + """ + dates = [startdate - datetime.timedelta(days=num) for num in range(-1, numdays + 1)] + dates.sort() + strd = dates[0].astimezone(pytz.timezone("US/Pacific")) + start = strd.strftime("%d-%b-%Y") + # if only one month of data + if strd.month == dates[-1].astimezone(pytz.timezone("US/Pacific")).month: + endd = strd.replace(month=strd.month + 1, day=1) + m = endd.month + endd = endd - datetime.timedelta(days=1) + end = endd.strftime("%d-%b-%Y") + [winds, dirs, temps, times, lat, lon] = get_EC_observations( + location, start, end + ) + times = np.array(times) + # if spans more than one month, we have to be more careful else: - count=1 - m=strd.month - winds =[]; dirs=[]; times=[]; temps=[] - while m != dates[-1].astimezone(pytz.timezone('US/Pacific')).month: - d=dates[count].astimezone(pytz.timezone('US/Pacific')); - if d.month != m: - endd=d-datetime.timedelta(days=1); end = endd.strftime('%d-%b-%Y'); - [wind_speed,wind_dir,temp,time, lat, lon] = get_EC_observations(location,start,end) - winds.append(wind_speed); - dirs.append(wind_dir); - times.append(time); - temps.append(temp) - m=d.month; start=d.strftime('%d-%b-%Y') - count=count+1 - #after the loop - endd=dates[-1].astimezone(pytz.timezone('US/Pacific')); end = endd.strftime('%d-%b-%Y'); - [wind_speed,wind_dir,temp,time, lat, lon] = get_EC_observations(location,start,end) - winds.append(wind_speed); winds=_flatten_list(winds); winds=np.array(winds) - dirs.append(wind_dir); dirs=_flatten_list(dirs); dirs=np.array(dirs) - times.append(time); times=_flatten_list(times); times=np.array(times) - temps.append(temp); temps=_flatten_list(temps); temps=np.array(temps) - #convert times back to utc + count = 1 + m = strd.month + winds = [] + dirs = [] + times = [] + temps = [] + while m != dates[-1].astimezone(pytz.timezone("US/Pacific")).month: + d = dates[count].astimezone(pytz.timezone("US/Pacific")) + if d.month != m: + endd = d - datetime.timedelta(days=1) + end = endd.strftime("%d-%b-%Y") + [wind_speed, wind_dir, temp, time, lat, lon] = get_EC_observations( + location, start, end + ) + winds.append(wind_speed) + dirs.append(wind_dir) + times.append(time) + temps.append(temp) + m = d.month + start = d.strftime("%d-%b-%Y") + count = count + 1 + # after the loop + endd = dates[-1].astimezone(pytz.timezone("US/Pacific")) + end = endd.strftime("%d-%b-%Y") + [wind_speed, wind_dir, temp, time, lat, lon] = get_EC_observations( + location, start, end + ) + winds.append(wind_speed) + winds = _flatten_list(winds) + winds = np.array(winds) + dirs.append(wind_dir) + dirs = _flatten_list(dirs) + dirs = np.array(dirs) + times.append(time) + times = _flatten_list(times) + times = np.array(times) + temps.append(temp) + temps = _flatten_list(temps) + temps = np.array(temps) + # convert times back to utc for i in np.arange(times.shape[0]): - times[i] = times[i].astimezone(pytz.timezone('utc')) - #isolates only requested days - winds = winds[np.where((times<(startdate +datetime.timedelta(days=1))) - & (times>=(startdate -datetime.timedelta(days=numdays-1))))] - dirs = dirs[np.where((times<(startdate +datetime.timedelta(days=1))) - & (times>=(startdate -datetime.timedelta(days=numdays-1))))] - temps = temps[np.where((times<(startdate +datetime.timedelta(days=1))) - & (times>=(startdate -datetime.timedelta(days=numdays-1))))] - times = times[np.where((times<(startdate +datetime.timedelta(days=1))) - & (times>=(startdate -datetime.timedelta(days=numdays-1))))] - - return winds,dirs,temps,times, lat, lon + times[i] = times[i].astimezone(pytz.timezone("utc")) + # isolates only requested days + winds = winds[ + np.where( + (times < (startdate + datetime.timedelta(days=1))) + & (times >= (startdate - datetime.timedelta(days=numdays - 1))) + ) + ] + dirs = dirs[ + np.where( + (times < (startdate + datetime.timedelta(days=1))) + & (times >= (startdate - datetime.timedelta(days=numdays - 1))) + ) + ] + temps = temps[ + np.where( + (times < (startdate + datetime.timedelta(days=1))) + & (times >= (startdate - datetime.timedelta(days=numdays - 1))) + ) + ] + times = times[ + np.where( + (times < (startdate + datetime.timedelta(days=1))) + & (times >= (startdate - datetime.timedelta(days=numdays - 1))) + ) + ] -def _flatten_list(mlist): - """flattens a list of lists - returns the flattened list""" - flat =[val for sublist in mlist for val in sublist] - return flat + return winds, dirs, temps, times, lat, lon +def _flatten_list(mlist): + """flattens a list of lists + returns the flattened list""" + flat = [val for sublist in mlist for val in sublist] + return flat