diff --git a/.coveralls.yml b/.coveralls.yml deleted file mode 100644 index 5b2a1b55..00000000 --- a/.coveralls.yml +++ /dev/null @@ -1,2 +0,0 @@ -repo_token: hxJrvjqiH2xBI7eit7BAb7FidH0LeYpGq -service_name: AppVeyor diff --git a/.flake8 b/.flake8 index 91600df0..486c99f2 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,5 @@ [flake8] -ignore = E203, E266, E501, W503, C901, E722, E731, W605, E712, F841, D202 +ignore = E203, E266, E501, W503, C901, E722, E731, W605, E712, F841, D202, D203 max-line-length = 88 max-complexity = 18 select = B,C,E,F,W,T4 diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index ff39d6f1..09896647 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -55,6 +55,6 @@ jobs: - name: Generate coverage report run: | python -m pytest -vvv --cov=Hapi --cov-report=xml - + - name: Upload coverage reports to Codecov with GitHub Action uses: codecov/codecov-action@v3 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4bd6ce2f..e8180501 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,85 +1,114 @@ +fail_fast: true repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.3.0 - hooks: - - id: end-of-file-fixer - name: "[py - check] validate yaml" - - id: trailing-whitespace - name: "[file - format] trim trailing whitespace" - args: [ --markdown-linebreak-ext=md ] - - id: check-added-large-files + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: end-of-file-fixer + name: "[py - check] validate yaml" + - id: trailing-whitespace + name: "[file - format] trim trailing whitespace" + args: [ --markdown-linebreak-ext=md ] + - id: check-added-large-files name: "[file - check] large file" args: [ --maxkb=5000 ] - - id: check-docstring-first + - id: check-docstring-first name: "[py - check] docstring first" files: /examples types : [file, python ] - - id: check-json + - id: check-json name: "[json - check] validate json" - - id: check-merge-conflict + - id: check-merge-conflict name: "[git - check] merge conflict" - - id: debug-statements + - id: debug-statements name: "[py - check] debug statements" - - id: detect-private-key + - id: detect-private-key name: "[cred - check] private keys" - - id: fix-encoding-pragma + - id: fix-encoding-pragma name: "[file - format] coding pragma" args: [ --remove ] - - id: mixed-line-ending + - id: mixed-line-ending name: "[file - format] mixed line ending" args: [ --fix=auto ] - - id: pretty-format-json + - id: pretty-format-json name: "[json - format] pretty json" args: [ --autofix, --indent=4, --no-sort-keys ] - - id: requirements-txt-fixer + - id: requirements-txt-fixer name: "[reqs - format] fix requirements.txt" - - id: check-yaml + - id: check-yaml name: "[yaml - check] validate yaml" -- repo: https://github.com/PyCQA/docformatter - rev: v1.4 - hooks: - - id: docformatter - name: "[py - format] docformatter" - args: [ -i, --wrap-summaries, "0" ] +# - repo: https://github.com/pre-commit/mirrors-isort +# rev: v5.10.1 +# hooks: +# - id: isort +# name: "[py - format] isort" + - repo: https://github.com/PyCQA/docformatter + rev: v1.4 + hooks: + - id: docformatter + name: "[py - format] docformatter" + args: [ -i, --wrap-summaries, "0" ] -- repo: https://github.com/PyCQA/pydocstyle - rev: 6.1.1 - hooks: - - id: pydocstyle - name: "[py - check] pydocstyle" - files: ^Hapi/ + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.1.1 + hooks: + - id: pydocstyle + name: "[py - check] pydocstyle" + files: ^Hapi/ -- repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 - hooks: - - id: flake8 - name: "[py - check] flake8" - language_version: python3.9 - exclude: ^(examples/|tests/) + - repo: https://gitlab.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + name: "[py - check] flake8" + language_version: python3.9 + exclude: ^(examples/|tests/) -- repo: https://github.com/psf/black - rev: 22.8.0 - hooks: - - id: black -- repo: https://github.com/pre-commit/mirrors-isort - rev: v5.7.0 - hooks: - - id: isort - name: "[py - format] isort" -- repo: https://github.com/ambv/black - rev: 22.8.0 - hooks: - - id: black - name: "[py - format] black" - language_version: python3.9 + #- repo: https://github.com/psf/black + # rev: 22.8.0 + # hooks: + # - id: black + - repo: https://github.com/ambv/black + rev: 22.8.0 + hooks: + - id: black + name: "[py - format] black" + language_version: python3.9 + - repo: https://github.com/lovesegfault/beautysh + rev: v6.2.1 + hooks: + - id: beautysh + name: "[bash - format] beautysh" -- repo: local - hooks: - - id: pytest-check - name: pytest-check - entry: pytest -vvv --cov=Hapi - language: system - pass_filenames: false - always_run: true + # pre-commit-shell: Checks shell scripts against shellcheck. + - repo: https://github.com/detailyang/pre-commit-shell + rev: v1.0.6 + hooks: + - id: shell-lint + name: "[bash - lint] shell-lint" + + - repo: https://github.com/rlindsgaard/pre-commit-commit-msg-hooks + rev: 0.1.0 + hooks: + - id: check-description-max-length + name: "[bash - format] check-description-max-length" + - id: check-second-line-empty + name: "[bash - format] check-second-line-empty" + - id: check-summary-capitalized + name: "[bash - format] check-summary-capitalized" + - id: check-summary-imperative + name: "[bash - format] check-summary-imperative" + - id: check-summary-max-length + name: "[bash - format] check-summary-max-length" + - id: check-summary-punctuation + name: "[bash - format] check-summary-punctuation" + + - repo: local + hooks: + - id: pytest-check + name: pytest-check + entry: pytest -vvv --cov=Hapi + language: system + pass_filenames: false + always_run: true diff --git a/.pydocstyle b/.pydocstyle index b73556f3..885b3c6b 100644 --- a/.pydocstyle +++ b/.pydocstyle @@ -1,2 +1,2 @@ [pydocstyle] -ignore = D202, D413, D417, D107, D213 +ignore = D202, D413, D417, D107, D213, D203 diff --git a/HISTORY.rst b/HISTORY.rst index cfbc011c..088dbce9 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -40,4 +40,12 @@ History ------------------ * hydraulic model can read chunked big zip file * fix CI -* fix missing module (saint venant script and module) \ No newline at end of file +* fix missing module (saint venant script and module) + +1.6.0 (2023-02-03) +------------------ +* all attributes follows snake case naming convention +* refactor all modules with pre-commit +* add smoothDikeLevel, getReach and updateReach +* bump up denpendencies versions +* move un-necessary functions to serapeum-utils diff --git a/Hapi/__init__.py b/Hapi/__init__.py index cc16e569..319242d1 100644 --- a/Hapi/__init__.py +++ b/Hapi/__init__.py @@ -3,9 +3,11 @@ @author: Mostafa """ try: - from importlib.metadata import PackageNotFoundError, version # type: ignore + from importlib.metadata import PackageNotFoundError # type: ignore + from importlib.metadata import version except ImportError: # pragma: no cover - from importlib_metadata import PackageNotFoundError, version # type: ignore + from importlib_metadata import PackageNotFoundError # type: ignore + from importlib_metadata import version try: @@ -37,6 +39,7 @@ def configuration(parent_package="", top_path=None): + """configure.""" from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) diff --git a/Hapi/catchment.py b/Hapi/catchment.py index c8784ac2..4953015b 100644 --- a/Hapi/catchment.py +++ b/Hapi/catchment.py @@ -27,9 +27,14 @@ class Catchment: - """Catchment. The class include methods to read the meteorological and Spatial inputs of the distributed hydrological model. Catchment class also reads the data of the gauges, it is a super class that has the run subclass, so you need to build the catchment object and hand it as an inpit to the Run class to run the model. + """Catchment. - methods: + The class include methods to read the meteorological and Spatial inputs of the distributed hydrological model. + Catchment class also reads the data of the gauges, it is a super class that has the run subclass, + so you need to build the catchment object and hand it as an inpit to the Run class to run the model. + + Methods + ------- 1-readRainfall 2-readTemperature 3-readET @@ -375,7 +380,9 @@ def readFlowAcc(self, Path: str): logger.debug("Flow Accmulation input is read successfully") def readFlowDir(self, Path: str): - """readFlowDir method reads the flow direction raster. + """Read Flow Direction. + + reads the flow direction raster. Parameters ---------- @@ -428,7 +435,9 @@ def readFlowDir(self, Path: str): logger.debug("Flow Direction input is read successfully") def ReadFlowPathLength(self, Path: str): - """ReadFlowPathLength method reads the flow path length. + """Read Flow Path Length method. + + reads the flow path length. Parameters ---------- @@ -981,7 +990,9 @@ def plotHydrograph( label: str = "", fmt: str = "%Y-%m-%d", ): - """plotHydrograph. plot the simulated and gauge hydrograph. + r"""Plot Hydrograph. + + plot the simulated and gauge hydrograph. Parameters ---------- @@ -1143,7 +1154,7 @@ def plotDistributedResults( size of the numbers plotted intop of each cells. The default is 8. Title : [str], optional title of the plot. The default is 'Total Discharge'. - titlesize : [integer], optional + title_size : [integer], optional title size. The default is 15. Backgroundcolorthreshold : [float/integer], optional threshold value if the value of the cell is greater, the plotted @@ -1427,9 +1438,15 @@ def ListAttributes(self): class Lake: - """Lake. Lake class reads the meteorological inputs, and the module to simulate a lake as a lumped model, using a rating curve, the lake and the upstream sub-catchments are going to be considered as one lumped model than result in a discharge input to the lake, the discharge input is going to change the volume of the water in the lake, and from the volume-outflow curve the outflow can be obtained. + """Lake. + + Lake class reads the meteorological inputs, and the module to simulate a lake as a lumped model, using a + rating curve, the lake and the upstream sub-catchments are going to be considered as one lumped model than + result in a discharge input to the lake, the discharge input is going to change the volume of the water in + the lake, and from the volume-outflow curve the outflow can be obtained. - methods: + Methods + ------- 1- readMeteoData 2- readParameters 3- readLumpedModel diff --git a/Hapi/cli.py b/Hapi/cli.py deleted file mode 100644 index 15387897..00000000 --- a/Hapi/cli.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Console script for HapiSM.""" -import argparse -import sys - - -def main(): - """Console script for HapiSM.""" - parser = argparse.ArgumentParser() - parser.add_argument("_", nargs="*") - args = parser.parse_args() - - print("Arguments: " + str(args._)) - print("Replace this message by putting your code into " "Hapi.cli.main") - return 0 - - -if __name__ == "__main__": - sys.exit(main()) # pragma: no cover diff --git a/Hapi/hapi_warnings.py b/Hapi/hapi_warnings.py index 8cefb2c6..ac001eca 100644 --- a/Hapi/hapi_warnings.py +++ b/Hapi/hapi_warnings.py @@ -1,7 +1,6 @@ +"""Custom warning and silencing warnings.""" import warnings -import numpy as np - class InstabilityWarning(UserWarning): """Issued when results may be unstable.""" @@ -13,9 +12,11 @@ class InstabilityWarning(UserWarning): warnings.simplefilter("always", UserWarning) -def SilenceNumpyWarning(): - np.warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) +def SilencePandasWarning(): + """Silence pandas future warning.""" + warnings.simplefilter(action="ignore", category=FutureWarning) def SilenceShapelyWarning(): + """Silence Shapely deprecation warning.""" warnings.filterwarnings("ignore", category=DeprecationWarning) diff --git a/Hapi/hm/__init__.py b/Hapi/hm/__init__.py index e69de29b..5c5e0854 100644 --- a/Hapi/hm/__init__.py +++ b/Hapi/hm/__init__.py @@ -0,0 +1 @@ +"""Hydraulic model.""" diff --git a/Hapi/hm/calibration.py b/Hapi/hm/calibration.py index f653d6e3..1af1fc99 100644 --- a/Hapi/hm/calibration.py +++ b/Hapi/hm/calibration.py @@ -10,18 +10,16 @@ from geopandas import GeoDataFrame from loguru import logger from matplotlib.figure import Figure - -# from pandas import DataFrame from pandas._libs.tslibs.timestamps import Timestamp from pandas.core.frame import DataFrame +from pandas.core.series import Series +from serapeum_utils.utils import class_attr_initialize -from Hapi.hapi_warnings import SilenceNumpyWarning, SilenceShapelyWarning +from Hapi.hapi_warnings import SilenceShapelyWarning from Hapi.hm.river import River -from Hapi.utils import class_attr_initialize datafn = lambda x: dt.datetime.strptime(x, "%Y-%m-%d") -SilenceNumpyWarning() SilenceShapelyWarning() @@ -36,27 +34,27 @@ class Calibration(River): calibration_attributes = dict( q_hm=None, - WLHM=None, + wl_hm=None, q_rrm=None, - QRRM2=None, + q_rrm2=None, rrm_gauges=None, hm_gauges=None, q_gauges=None, - WLGauges=None, - CalibrationQ=None, - CalibrationWL=None, + wl_gauges=None, + calibration_q=None, + calibration_wl=None, annual_max_obs_q=None, annual_max_obs_wl=None, annual_max_rrm=None, annual_max_hm_q=None, annual_max_hm_wl=None, - AnnualMaxDates=None, - MetricsHMvsRRM=None, - MetricsRRMvsObs=None, - MetricsHMWLvsObs=None, - MetricsHMQvsObs=None, - WLgaugesList=None, - QgaugesList=None, + annual_max_dates=None, + metrics_hm_vs_rrm=None, + metrics_rrm_vs_obs=None, + metrics_hm_wl_vs_obs=None, + metrics_hm_q_vs_obs=None, + wl_gauges_list=None, + q_gauges_list=None, ) @class_attr_initialize(calibration_attributes) @@ -67,8 +65,8 @@ def __init__( start: Union[str, dt.datetime] = "1950-1-1", days: int = 36890, fmt: str = "%Y-%m-%d", - rrmstart: str = None, - rrmdays: int = 36890, + rrm_start: str = None, + rrm_days: int = 36890, novalue: int = -9, gauge_id_col: Any = "oid", ): @@ -90,10 +88,10 @@ def __init__( (default number of days are equivalent to 100 years) fmt : [str] format of the given dates. The default is "%Y-%m-%d" - rrmstart : [str], optional + rrm_start : [str], optional the start date of the rainfall-runoff data. The default is "1950-1-1". - rrmdays : [integer], optional + rrm_days : [integer], optional the length of the data of the rainfall-runoff data in days. The default is 36890. gauge_id_col: [Any] @@ -114,25 +112,25 @@ def __init__( self.gauge_id_col = gauge_id_col Ref_ind = pd.date_range(self.start, self.end, freq="D") - self.ReferenceIndex = pd.DataFrame(index=list(range(1, days + 1))) - self.ReferenceIndex["date"] = Ref_ind[:-1] + self.reference_index = pd.DataFrame(index=list(range(1, days + 1))) + self.reference_index["date"] = Ref_ind[:-1] - if rrmstart is None: - self.rrmstart = self.start + if rrm_start is None: + self.rrm_start = self.start else: try: - self.rrmstart = dt.datetime.strptime(rrmstart, fmt) + self.rrm_start = dt.datetime.strptime(rrm_start, fmt) except ValueError: logger.debug( f"plese check the fmt ({fmt}) you entered as it is different from the" - f" rrmstart data ({rrmstart})" + f" rrm_start data ({rrm_start})" ) return - self.rrmend = self.rrmstart + dt.timedelta(days=rrmdays) - ref_ind = pd.date_range(self.rrmstart, self.rrmend, freq="D") - self.rrmreferenceindex = pd.DataFrame(index=list(range(1, rrmdays + 1))) - self.rrmreferenceindex["date"] = ref_ind[:-1] + self.rrm_end = self.rrm_start + dt.timedelta(days=rrm_days) + ref_ind = pd.date_range(self.rrm_start, self.rrm_end, freq="D") + self.rrm_reference_index = pd.DataFrame(index=list(range(1, rrm_days + 1))) + self.rrm_reference_index["date"] = ref_ind[:-1] def readGaugesTable(self, path: str): """ReadGaugesTable. @@ -144,15 +142,21 @@ def readGaugesTable(self, path: str): path : [String] the path to the text file of the gauges table. the file can be geojson or a csv file. >>> "gauges.geojson" - { - "type": "FeatureCollection", "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::3035" } }, - "features": [ - { "type": "Feature", "properties": { "gid": 149, "name": "station 1", "oid": 23800100, "river": "Nile", - "id": 1, "xsid": 16100, "datum(m)": 252.36, "discharge": 1, "waterlevel": 1 }, "geometry": { "type": "Point", "coordinates": [ 4278240.4259, 2843958.863 ] } }, - { "type": "Feature", "properties": { "gid": 106, "name": "station 2", "oid": 23800500, "river": "Nile", - "id": 2, "xsid": 16269, "datum(m)": 159.37, "discharge": 1, "waterlevel": 1 }, "geometry": { "type": "Point", "coordinates": [ 4259614.333, 2884750.556 ] } }, - { "type": "Feature", "properties": { "gid": 158, "name": "station 3", "oid": 23800690, "river": "Nile", - "id": 4, "xsid": 16581, "datum(m)": 119.71, "discharge": 1, "waterlevel": 1}, "geometry": { "type": "Point", "coordinates": [ 4248756.490, 2924872.503 ] } }, + >>> { + >>> "type": "FeatureCollection", "crs": + >>> { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::3035" } }, + >>> "features": [ + >>> { "type": "Feature", "properties": { "gid": 149, "name": "station 1", "oid": 23800100, "river": "Nile", + >>> "id": 1, "xsid": 16100, "datum(m)": 252.36, "discharge": 1, "waterlevel": 1 }, + >>> "geometry": { "type": "Point", "coordinates": [ 4278240.4259, 2843958.863 ] } }, + >>> { "type": "Feature", "properties": { "gid": 106, "name": "station 2", "oid": 23800500, "river": "Nile", + >>> "id": 2, "xsid": 16269, "datum(m)": 159.37, "discharge": 1, "waterlevel": 1 }, + >>> "geometry": { "type": "Point", "coordinates": [ 4259614.333, 2884750.556 ] } }, + >>> { "type": "Feature", "properties": { "gid": 158, "name": "station 3", "oid": 23800690, "river": "Nile", + >>> "id": 4, "xsid": 16581, "datum(m)": 119.71, "discharge": 1, "waterlevel": 1}, + >>> "geometry": { "type": "Point", "coordinates": [ 4248756.490, 2924872.503 ] } }, + >>> ] + >>> } Returns ------- @@ -165,9 +169,9 @@ def readGaugesTable(self, path: str): >>> Calib = RC.Calibration("Hydraulic model", gauge_id_col="id") >>> Calib.readGaugesTable("path/to/gauges.geojson") >>> Calib.hm_gauges - gid ... geometry - 0 149 ... POINT (4278240.426 2843958.864) - 1 106 ... POINT (4259614.334 2884750.556) + >>> gid ... geometry + >>> 0 149 ... POINT (4278240.426 2843958.864) + >>> 1 106 ... POINT (4259614.334 2884750.556) """ try: self.hm_gauges = gpd.read_file(path, driver="GeoJSON") @@ -177,31 +181,33 @@ def readGaugesTable(self, path: str): ) self.hm_gauges = pd.read_csv(path) - # sort the gauges table based on the segment + # sort the gauges table based on the reach self.hm_gauges.sort_values(by="id", inplace=True, ignore_index=True) - def getGauges(self, subid: int, gaugei: int = 0) -> DataFrame: - """Get_Gauge_ID get the id of the station for a given river segment. + def getGauges(self, reach_id: int, gaugei: int = 0) -> DataFrame: + """Get gauges. + + get the id of the station for a given river reach. - parameters: + Parameters ---------- - subid: [int] - the river segment id + reach_id: [int] + the river reach id - return: + Returns ------- id: [list/int] - if the river segment contains more than one gauges the function + if the river reach contains more than one gauges the function returns a list of ids, otherwise it returns the id. gauge name: [str] name of the gauge gauge xs: [int] the nearest cross section to the gauge """ - gauges = self.hm_gauges.loc[self.hm_gauges["id"] == subid, :].reset_index() + gauges = self.hm_gauges.loc[self.hm_gauges["id"] == reach_id, :].reset_index() if len(gauges) == 0: raise KeyError( - "The given river segment does not have gauges in the gauge table" + "The given river reach does not have gauges in the gauge table" ) elif len(gauges) > 1: f = gauges.loc[gaugei, :].to_frame() @@ -213,7 +219,7 @@ def getGauges(self, subid: int, gaugei: int = 0) -> DataFrame: # stationname = gauges.loc[:, column].values.tolist() # gaugename = str(gauges.loc[gaugei, 'name']) # gaugexs = gauges.loc[gaugei, 'xsid'] - # segment_xs = str(subid) + "_" + str(gaugexs) + # reach_xs = str(reach_id) + "_" + str(gaugexs) # stationname, gaugename, gaugexs @@ -251,7 +257,7 @@ def readObservedWL( Returns ------- - WLGauges: [dataframe attiribute]. + wl_gauges: [dataframe attiribute]. dataframe containing the data of the water level gauges and the index as the time series from the StartDate till the end and the gaps filled with the NoValue @@ -307,17 +313,17 @@ def readObservedWL( Gauges.replace(to_replace=np.nan, value=novalue, inplace=True) Gauges.index = ind del Gauges[0] - self.WLGauges = Gauges + self.wl_gauges = Gauges self.hm_gauges["WLstart"] = 0 self.hm_gauges["WLend"] = 0 for i in range(len(columns)): if self.hm_gauges.loc[i, "waterlevel"] == 1: - st1 = self.WLGauges[columns[i]][ - self.WLGauges[columns[i]] != novalue + st1 = self.wl_gauges[columns[i]][ + self.wl_gauges[columns[i]] != novalue ].index[0] - end1 = self.WLGauges[columns[i]][ - self.WLGauges[columns[i]] != novalue + end1 = self.wl_gauges[columns[i]][ + self.wl_gauges[columns[i]] != novalue ].index[-1] self.hm_gauges.loc[i, "WLstart"] = st1 self.hm_gauges.loc[i, "WLend"] = end1 @@ -524,7 +530,7 @@ def readRRM( try: self.q_rrm[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, station_id, fromday, @@ -543,7 +549,7 @@ def readRRM( try: self.q_rrm[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, station_id, fromday, @@ -552,7 +558,7 @@ def readRRM( )[station_id].tolist() self.QRRM2[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path2, station_id, fromday, @@ -572,8 +578,8 @@ def readRRM( if today == "": today = len(self.q_rrm[self.q_rrm.columns[0]]) - start = self.ReferenceIndex.loc[fromday, "date"] - end = self.ReferenceIndex.loc[today, "date"] + start = self.reference_index.loc[fromday, "date"] + end = self.reference_index.loc[today, "date"] if location == 1: self.q_rrm.index = pd.date_range(start, end, freq="D") @@ -584,8 +590,8 @@ def readRRM( def readHMQ( self, path: str, - fromday: Union[str, int] = "", - today: Union[str, int] = "", + from_day: Union[str, int] = "", + to_day: Union[str, int] = "", novalue: Union[int, float] = -9, addHQ2: bool = False, shift: bool = False, @@ -600,9 +606,9 @@ def readHMQ( ---------- path : [String] path to the folder where files for the gauges exist. - fromday : [datetime object/str] + from_day : [datetime object/str] starting date of the time series. - today : [integer] + to_day : [integer] length of the simulation (how many days after the start date) . novalue : [numeric value] the value used to fill the gaps in the time series or to fill the @@ -623,7 +629,7 @@ def readHMQ( ------- q_hm : [dataframe attribute] dataframe containing the simulated hydrograph for each river - segment in the catchment. + reach in the catchment. """ if addHQ2 and self.version == 1: msg = "please read the traceall file using the RiverNetwork method" @@ -649,11 +655,11 @@ def readHMQ( nodeid = gauges[i] self.q_hm[nodeid] = self._readRRMResults( self.version, - self.ReferenceIndex, + self.reference_index, path, nodeid, - fromday="", - today="", + from_day=None, + to_day=None, date_format=fmt, )[nodeid].tolist() logger.debug(f"{i} - {path}{nodeid}.txt is read") @@ -686,21 +692,21 @@ def readHMQ( # f2[shiftsteps:-1] = f2[0 : -(shiftsteps + 1)] # q_hm.loc[ind[f1[0] - 1] : ind[f1[-1] - 1], q_hm.columns[i]] = f2 - if fromday == "": - fromday = 1 - if today == "": - today = len(self.q_hm[self.q_hm.columns[0]]) + if from_day == "": + from_day = 1 + if to_day == "": + to_day = len(self.q_hm[self.q_hm.columns[0]]) - start = self.ReferenceIndex.loc[fromday, "date"] - end = self.ReferenceIndex.loc[today, "date"] + start = self.reference_index.loc[from_day, "date"] + end = self.reference_index.loc[to_day, "date"] self.q_hm.index = pd.date_range(start, end, freq="D") def readHMWL( self, path: str, - fromday: Union[str, int] = "", - today: Union[str, int] = "", + from_day: Union[str, int] = "", + to_day: Union[str, int] = "", novalue: Union[int, float] = -9, shift=False, shiftsteps=0, @@ -734,30 +740,30 @@ def readHMWL( Returns ------- - WLHM : [dataframe attribute] + wl_hm : [dataframe attribute] dataframe containing the simulated water level hydrograph for - each river segment in the catchment. + each river reach in the catchment. """ gauges = self.hm_gauges.loc[ self.hm_gauges["waterlevel"] == 1, self.gauge_id_col ].tolist() - self.WLgaugesList = gauges + self.wl_gauges_list = gauges - self.WLHM = pd.DataFrame() + self.wl_hm = pd.DataFrame() for i in range(len(gauges)): nodeid = gauges[i] - self.WLHM[nodeid] = self._readRRMResults( + self.wl_hm[nodeid] = self._readRRMResults( self.version, - self.ReferenceIndex, + self.reference_index, path, nodeid, - fromday="", - today="", + from_day="", + to_day="", date_format=fmt, )[nodeid].tolist() logger.debug(f"{i} - {path}{nodeid}.txt is read") - # for i in range(len(WLHM.columns)): - # f = np.loadtxt(path + str(int(WLHM.columns[i])) + ".txt", delimiter=",") + # for i in range(len(wl_hm.columns)): + # f = np.loadtxt(path + str(int(wl_hm.columns[i])) + ".txt", delimiter=",") # # f1 = list(range(int(f[0, 0]), int(f[-1, 0]) + 1)) # f2 = list() @@ -773,18 +779,18 @@ def readHMWL( # if shift: # f2[shiftsteps:-1] = f2[0 : -(shiftsteps + 1)] - # WLHM.loc[ind[f1[0] - 1] : ind[f1[-1] - 1], WLHM.columns[i]] = f2 - if fromday == "": - fromday = 1 - if today == "": - today = len(self.WLHM[self.WLHM.columns[0]]) + # wl_hm.loc[ind[f1[0] - 1] : ind[f1[-1] - 1], wl_hm.columns[i]] = f2 + if from_day == "": + from_day = 1 + if to_day == "": + to_day = len(self.wl_hm[self.wl_hm.columns[0]]) - start = self.ReferenceIndex.loc[fromday, "date"] - end = self.ReferenceIndex.loc[today, "date"] + start = self.reference_index.loc[from_day, "date"] + end = self.reference_index.loc[to_day, "date"] - self.WLHM.index = pd.date_range(start, end, freq="D") + self.wl_hm.index = pd.date_range(start, end, freq="D") - def readCalirationResult(self, subid: int, path: str = ""): + def readCalirationResult(self, reach_id: int, path: str = ""): """ReadCalirationResult. ReadCalirationResult method reads the 1D results and fill the missing @@ -792,37 +798,39 @@ def readCalirationResult(self, subid: int, path: str = ""): Parameters ---------- - subid : [integer] + reach_id : [integer] ID of the sub-basin you want to read its data. path : [String], optional Path to read the results from. The default is ''. Returns ------- - CalibrationQ : [dataframe] + calibration_q : [dataframe] the discharge time series of the calibrated gauges - CalibrationWL : [dataframe] + calibration_wl : [dataframe] the water level time series of the calibrated gauges """ hasattr(self, "q_gauges"), "Please read the discharge gauges first" hasattr(self, "WlGauges"), "Please read the water level gauges first" - if not hasattr(self, "CalibrationQ"): + if not hasattr(self, "calibration_q"): indD = pd.date_range(self.start, self.end, freq="D")[:-1] self.CalibrationQ = pd.DataFrame(index=indD) - if not hasattr(self, "CalibrationWL"): + if not hasattr(self, "calibration_wl"): indD = pd.date_range(self.start, self.end, freq="D")[:-1] self.CalibrationWL = pd.DataFrame(index=indD) ind = pd.date_range(self.start, self.end, freq="H")[:-1] - q = pd.read_csv(path + str(subid) + "_q.txt", header=None, delimiter=r"\s+") - wl = pd.read_csv(path + str(subid) + "_wl.txt", header=None, delimiter=r"\s+") + q = pd.read_csv(path + str(reach_id) + "_q.txt", header=None, delimiter=r"\s+") + wl = pd.read_csv( + path + str(reach_id) + "_wl.txt", header=None, delimiter=r"\s+" + ) q.index = ind wl.index = ind - self.CalibrationQ[subid] = q[1].resample("D").mean() - self.CalibrationWL[subid] = wl[1].resample("D").mean() + self.CalibrationQ[reach_id] = q[1].resample("D").mean() + self.CalibrationWL[reach_id] = wl[1].resample("D").mean() def getAnnualMax( self, option=1, CorespondingTo=dict(MaxObserved=" ", TimeWindow=0) @@ -870,7 +878,7 @@ def getAnnualMax( when using option = 4 annual_max_hm_wl: [dataframe attribute] when using option = 5 - AnnualMaxDates : [dataframe attribute] + annual_max_dates : [dataframe attribute] """ if option == 1: if not isinstance(self.q_gauges, DataFrame): @@ -880,12 +888,12 @@ def getAnnualMax( ) columns = self.q_gauges.columns.tolist() elif option == 2: - if not isinstance(self.WLGauges, DataFrame): + if not isinstance(self.wl_gauges, DataFrame): raise ValueError( "please read the observed Water level data first with the " "ReadObservedWL method" ) - columns = self.WLGauges.columns.tolist() + columns = self.wl_gauges.columns.tolist() elif option == 3: if not isinstance(self.q_rrm, DataFrame): raise ValueError( @@ -900,38 +908,38 @@ def getAnnualMax( ) columns = self.q_hm.columns.tolist() else: - if not isinstance(self.WLHM, DataFrame): + if not isinstance(self.wl_hm, DataFrame): raise ValueError( "please read the RIM results first with the ReadRIMWL method" ) - columns = self.WLHM.columns.tolist() + columns = self.wl_hm.columns.tolist() if CorespondingTo["MaxObserved"] == "WL": - if not isinstance(self.WLGauges, DataFrame): + if not isinstance(self.wl_gauges, DataFrame): raise ValueError( "please read the observed Water level data first with the " "ReadObservedWL method" ) - startdate = self.WLGauges.index[0] + startdate = self.wl_gauges.index[0] AnnualMax = ( - self.WLGauges.loc[:, self.WLGauges.columns[0]].resample("A-OCT").max() + self.wl_gauges.loc[:, self.wl_gauges.columns[0]].resample("A-OCT").max() ) self.AnnualMaxDates = pd.DataFrame( - index=AnnualMax.index, columns=self.WLGauges.columns + index=AnnualMax.index, columns=self.wl_gauges.columns ) # get the dates when the max value happen every year - for i in range(len(self.WLGauges.columns)): - sub = self.WLGauges.columns[i] + for i in range(len(self.wl_gauges.columns)): + sub = self.wl_gauges.columns[i] for j in range(len(AnnualMax)): if j == 0: - f = self.WLGauges.loc[startdate : AnnualMax.index[j], sub] + f = self.wl_gauges.loc[startdate : AnnualMax.index[j], sub] self.AnnualMaxDates.loc[AnnualMax.index[j], sub] = f.index[ f.argmax() ] else: - f = self.WLGauges.loc[ + f = self.wl_gauges.loc[ AnnualMax.index[j - 1] : AnnualMax.index[j], sub ] self.AnnualMaxDates.loc[AnnualMax.index[j], sub] = f.index[ @@ -959,7 +967,7 @@ def getAnnualMax( date = self.AnnualMaxDates.loc[ind, Sub] start = date - dt.timedelta(days=1) end = date + dt.timedelta(days=1) - QTS.append(self.WLGauges.loc[start:end, Sub].max()) + QTS.append(self.wl_gauges.loc[start:end, Sub].max()) elif option == 3: for j in range(len(self.AnnualMaxDates.loc[:, Sub])): ind = self.AnnualMaxDates.index[j] @@ -980,7 +988,7 @@ def getAnnualMax( date = self.AnnualMaxDates.loc[ind, Sub] start = date - dt.timedelta(days=CorespondingTo["TimeWindow"]) end = date + dt.timedelta(days=CorespondingTo["TimeWindow"]) - QTS.append(self.WLHM.loc[start:end, Sub].max()) + QTS.append(self.wl_hm.loc[start:end, Sub].max()) AnnualMax.loc[:, Sub] = QTS @@ -1036,7 +1044,7 @@ def getAnnualMax( date = self.AnnualMaxDates.loc[ind, Sub] start = date - dt.timedelta(days=CorespondingTo["TimeWindow"]) end = date + dt.timedelta(days=CorespondingTo["TimeWindow"]) - QTS.append(self.WLGauges.loc[start:end, Sub].max()) + QTS.append(self.wl_gauges.loc[start:end, Sub].max()) elif option == 3: for j in range(len(self.AnnualMaxDates.loc[:, Sub])): @@ -1059,7 +1067,7 @@ def getAnnualMax( date = self.AnnualMaxDates.loc[ind, Sub] start = date - dt.timedelta(days=CorespondingTo["TimeWindow"]) end = date + dt.timedelta(days=CorespondingTo["TimeWindow"]) - QTS.append(self.WLHM.loc[start:end, Sub].max()) + QTS.append(self.wl_hm.loc[start:end, Sub].max()) # resample to annual time step AnnualMax.loc[:, Sub] = QTS @@ -1071,13 +1079,13 @@ def getAnnualMax( if option == 1: QTS = self.q_gauges.loc[:, Sub] elif option == 2: - QTS = self.WLGauges.loc[:, Sub] + QTS = self.wl_gauges.loc[:, Sub] elif option == 3: QTS = self.q_rrm.loc[:, Sub] elif option == 4: QTS = self.q_hm.loc[:, Sub] else: - QTS = self.WLHM.loc[:, Sub] + QTS = self.wl_hm.loc[:, Sub] # resample to annual time step AnnualMax.loc[:, Sub] = QTS.resample("A-OCT").max().values @@ -1095,299 +1103,326 @@ def getAnnualMax( self.annual_max_hm_wl = AnnualMax def calculateProfile( - self, Segmenti: int, BedlevelDS: float, Manning: float, BC_slope: float + self, reachi: int, BedlevelDS: float, Manning: float, BC_slope: float ): """CalculateProfile. - CalculateProfile method takes the river segment ID and the calibration + CalculateProfile method takes the river reach ID and the calibration parameters (last downstream cross-section bed level and the manning coefficient) and calculates the new profiles. Parameters ---------- - 1-Segmenti : [Integer] - cross-sections segment ID . - 2-BedlevelDS : [Float] - the bed level of the last cross section in the segment. - 3-Manning : [float] + reachi : [Integer] + cross-sections reach ID . + BedlevelDS : [Float] + the bed level of the last cross section in the reach. + Manning : [float] manning coefficient. - 4-BC_slope : [float] + BC_slope : [float] slope of the BC. Returns ------- - 1-crosssection:[dataframe attribute] + crosssection:[dataframe attribute] crosssection attribute will be updated with the newly calculated - profile for the given segment - 2-slope:[dataframe attribute] + profile for the given reach + slope:[dataframe attribute] slope attribute will be updated with the newly calculated average - slope for the given segment + slope for the given reach """ levels = pd.DataFrame(columns=["id", "bedlevelUS", "bedlevelDS"]) # change cross-section - bedlevel = self.crosssections.loc[ - self.crosssections["id"] == Segmenti, "gl" + bedlevel = self.cross_sections.loc[ + self.cross_sections["id"] == reachi, "gl" ].values - # get the bedlevel of the last cross section in the segment + # get the bedlevel of the last cross section in the reach # as a calibration parameter - levels.loc[Segmenti, "bedlevelDS"] = BedlevelDS - levels.loc[Segmenti, "bedlevelUS"] = bedlevel[0] + levels.loc[reachi, "bedlevelDS"] = BedlevelDS + levels.loc[reachi, "bedlevelUS"] = bedlevel[0] NoDistances = len(bedlevel) - 1 - # AvgSlope = ((levels.loc[Segmenti,'bedlevelUS'] - - # levels.loc[Segmenti,'bedlevelDS'] )/ (500 * NoDistances)) *-500 + # AvgSlope = ((levels.loc[reachi,'bedlevelUS'] - + # levels.loc[reachi,'bedlevelDS'] )/ (500 * NoDistances)) *-500 # change in the bed level of the last XS - AverageDelta = (levels.loc[Segmenti, "bedlevelDS"] - bedlevel[-1]) / NoDistances + AverageDelta = (levels.loc[reachi, "bedlevelDS"] - bedlevel[-1]) / NoDistances # calculate the new bed levels bedlevelNew = np.zeros(len(bedlevel)) - bedlevelNew[len(bedlevel) - 1] = levels.loc[Segmenti, "bedlevelDS"] - bedlevelNew[0] = levels.loc[Segmenti, "bedlevelUS"] + bedlevelNew[len(bedlevel) - 1] = levels.loc[reachi, "bedlevelDS"] + bedlevelNew[0] = levels.loc[reachi, "bedlevelUS"] for i in range(len(bedlevel) - 1): - # bedlevelNew[i] = levels.loc[Segmenti,'bedlevelDS'] + (len(bedlevel) - i -1) * abs(AvgSlope) + # bedlevelNew[i] = levels.loc[reachi,'bedlevelDS'] + (len(bedlevel) - i -1) * abs(AvgSlope) bedlevelNew[i] = bedlevel[i] + i * AverageDelta - self.crosssections.loc[self.crosssections["id"] == Segmenti, "gl"] = bedlevelNew + self.cross_sections.loc[self.cross_sections["id"] == reachi, "gl"] = bedlevelNew # change manning - self.crosssections.loc[self.crosssections["id"] == Segmenti, "m"] = Manning + self.cross_sections.loc[self.cross_sections["id"] == reachi, "m"] = Manning ## change slope try: - # self.slope.loc[self.slope['id']==Segmenti, 'slope'] = AvgSlope - self.slope.loc[self.slope["id"] == Segmenti, "slope"] = BC_slope + # self.slope.loc[self.slope['id']==reachi, 'slope'] = AvgSlope + self.slope.loc[self.slope["id"] == reachi, "slope"] = BC_slope except AttributeError: - logger.debug(f"The Given river segment- {Segmenti} does not have a slope") + logger.debug(f"The Given river reach- {reachi} does not have a slope") - def smoothBedLevel(self, segmenti): - """SmoothXS. + def getReach(self, reach_id: int) -> DataFrame: + """Get Reach cross section data. - SmoothBedLevel method smoothes the bed level of a given segment ID by - calculating the moving average of three cross sections + Parameters + ---------- + reach_id: [int] + reach id + + Returns + ------- + DataFrame + """ + return ( + self.cross_sections.loc[self.cross_sections["id"] == reach_id, :] + .copy() + .reset_index() + ) + + def updateReach(self, reach: DataFrame): + """Update the cross section of a given reach in the cross_sections attributes. Parameters ---------- - 1-segmenti : [Integer] - segment ID. + reach: [DataFrame] + DataFrame of the reach cross sections Returns ------- - 1-crosssections: [dataframe attribute] - the "gl" column in the crosssections attribute will be smoothed + Updates the cross_sections DataFrame attribute. """ - msg = "please read the cross section first" - assert hasattr(self, "crosssections"), "{0}".format(msg) - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] + # get the reach id + reach_id: np.ndarray = reach.loc[:, "id"].unique() + if len(reach_id) > 1: + raise ValueError( + f"The given DataFrame conains more than one river reach: {len(reach_id)}, the function " + "can update 1 reach at a time." + ) + reach_id = reach_id[0] + g = self.cross_sections.loc[self.cross_sections["id"] == reach_id, :].index[0] + # reset the index to the original index order + reach.index = range(g, g + len(reach)) + # copy back the reach to the whole XS df + self.cross_sections.loc[self.cross_sections["id"] == reach_id, :] = reach - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() + @staticmethod + def _smooth(series: Series, window: int = 3): + """Smooth data in a specific column in the given DataFrame. - segment.index = range(len(segment)) - segment.loc[:, "glnew"] = 0 - # the bed level at the beginning and end of the egment - segment.loc[0, "glnew"] = segment.loc[0, "gl"] - segment.loc[len(segment) - 1, "glnew"] = segment.loc[len(segment) - 1, "gl"] + Parameters + ---------- + series: [series] + Pandas Series. + window: [int] + window length (length of averaged values) + Returns + ------- + Pandas Series + """ # calculate the average of three XS bed level - for j in range(1, len(segment) - 1): - segment.loc[j, "glnew"] = ( - segment.loc[j - 1, "gl"] - + segment.loc[j, "gl"] - + segment.loc[j + 1, "gl"] - ) / 3 + # TODO: use the rolling method in all other smoothing methods + # https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rolling.html + smoothed = series.rolling(window=window, center=True).mean() + # the bed level at the beginning and end of the egment + smoothed[0] = series[0] + smoothed[smoothed.index[-1]] = series[series.index[-1]] + + return smoothed + + def smoothBedLevel(self, reach_id: int, window: int = 3): + """smoothBedLevel. + + SmoothBedLevel method smoothes the bed level of a given reach ID by + calculating the moving average of three cross sections + + Parameters + ---------- + reach_id : [Integer] + reach ID. + window: [int] + window length (length of averaged values) + + Returns + ------- + cross_sections: [dataframe attribute] + the "gl" column in the cross_sections attribute will be smoothed + """ + if not hasattr(self, "cross_sections"): + raise ValueError("Please read the cross section first") + + reach = self.getReach(reach_id) + reach["glnew"] = self._smooth(reach["gl"], window=window) # calculate the difference in the bed level and take it from # the bankful depth - segment.loc[:, "diff"] = segment.loc[:, "glnew"] - segment.loc[:, "gl"] - segment.loc[:, "dbf"] = segment.loc[:, "dbf"] - segment.loc[:, "diff"] - segment.loc[:, "gl"] = segment.loc[:, "glnew"] - del segment["glnew"], segment["diff"] + reach.loc[:, "diff"] = reach.loc[:, "glnew"] - reach.loc[:, "gl"] + reach.loc[:, "dbf"] = reach.loc[:, "dbf"] - reach.loc[:, "diff"] + reach.loc[:, "gl"] = reach.loc[:, "glnew"] + reach.drop(labels=["glnew", "diff"], axis=1, inplace=True) - segment.index = range(g, g + len(segment)) - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment + self.updateReach(reach) - def smoothBankLevel(self, segmenti): - """SmoothBankLevel. + def smoothDikeLevel(self, reach_id: int, window: int = 3): + """smoothBedLevel. - SmoothBankLevel method smoothes the bankfull depth for a given segment + SmoothBedLevel method smoothes the bed level of a given reach ID by + calculating the moving average of three cross sections Parameters ---------- - 1-segmenti : [Integer] - segment ID. + reach_id : [Integer] + reach ID. + window: [int] + window length (length of averaged values) Returns ------- - 1-crosssections: [dataframe attribute] - the "dbf" column in the crosssections attribute will be smoothed + cross_sections: [dataframe attribute] + the "gl" column in the cross_sections attribute will be smoothed """ - self.crosssections.loc[:, "banklevel"] = ( - self.crosssections.loc[:, "dbf"] + self.crosssections.loc[:, "gl"] - ) + if not hasattr(self, "cross_sections"): + raise ValueError("Please read the cross section first") - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] + reach = self.getReach(reach_id) + # TODO: use the rolling method in all other smoothing methods + reach["zl"] = self._smooth(reach["zl"], window=window) + reach["zr"] = self._smooth(reach["zr"], window=window) + self.updateReach(reach) - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() - segment.index = range(len(segment)) - segment.loc[:, "banklevelnew"] = 0 - segment.loc[0, "banklevelnew"] = segment.loc[0, "banklevel"] - segment.loc[len(segment) - 1, "banklevelnew"] = segment.loc[ - len(segment) - 1, "banklevel" - ] + def smoothBankLevel(self, reach_id: int, window: int = 3): + """SmoothBankLevel. - for j in range(1, len(segment) - 1): - segment.loc[j, "banklevelnew"] = ( - segment.loc[j - 1, "banklevel"] - + segment.loc[j, "banklevel"] - + segment.loc[j + 1, "banklevel"] - ) / 3 + SmoothBankLevel method smoothes the bankfull depth for a given reach - segment.loc[:, "diff"] = ( - segment.loc[:, "banklevelnew"] - segment.loc[:, "banklevel"] + Parameters + ---------- + reach_id : [Integer] + Reach ID. + window: [int] + window length (length of averaged values) + + Returns + ------- + cross_sections: [dataframe attribute] + the "dbf" column in the cross_sections attribute will be smoothed + """ + self.cross_sections.loc[:, "banklevel"] = ( + self.cross_sections.loc[:, "dbf"] + self.cross_sections.loc[:, "gl"] ) - segment.loc[:, "dbf"] = segment.loc[:, "dbf"] + segment.loc[:, "diff"] - del self.crosssections["banklevel"] - segment.index = range(g, g + len(segment)) + reach = self.getReach(reach_id) + reach["banklevelnew"] = self._smooth(reach["banklevel"], window=window) + + reach.loc[:, "diff"] = reach.loc[:, "banklevelnew"] - reach.loc[:, "banklevel"] + # add the difference to the bankful depth + reach.loc[:, "dbf"] = reach.loc[:, "dbf"] + reach.loc[:, "diff"] - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment + reach.drop(labels=["banklevel"], axis=1, inplace=True) + self.updateReach(reach) - def smoothFloodplainHeight(self, segmenti): + def smoothFloodplainHeight(self, reach_id: int, window: int = 3): """SmoothFloodplainHeight. SmoothFloodplainHeight method smoothes the Floodplain Height the - point 5 and 6 in the cross section for a given segment + point 5 and 6 in the cross section for a given reach Parameters ---------- - 1-segmenti : [Integer] - segment ID. + reach_id : [Integer] + reach ID. + window: [int] + window length (length of averaged values) Returns ------- - 1-crosssections: [dataframe attribute] - the "hl" and "hr" column in the crosssections attribute will be + cross_sections: [dataframe attribute] + the "hl" and "hr" column in the cross_sections attribute will be smoothed. """ - self.crosssections.loc[:, "banklevel"] = ( - self.crosssections.loc[:, "dbf"] + self.crosssections.loc[:, "gl"] + self.cross_sections.loc[:, "banklevel"] = ( + self.cross_sections.loc[:, "dbf"] + self.cross_sections.loc[:, "gl"] ) - self.crosssections.loc[:, "fpl"] = ( - self.crosssections.loc[:, "hl"] + self.crosssections.loc[:, "banklevel"] + self.cross_sections.loc[:, "fpl"] = ( + self.cross_sections.loc[:, "hl"] + self.cross_sections.loc[:, "banklevel"] ) - self.crosssections.loc[:, "fpr"] = ( - self.crosssections.loc[:, "hr"] + self.crosssections.loc[:, "banklevel"] + self.cross_sections.loc[:, "fpr"] = ( + self.cross_sections.loc[:, "hr"] + self.cross_sections.loc[:, "banklevel"] ) - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] - - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() - segment.index = range(len(segment)) - - segment.loc[:, "fplnew"] = 0 - segment.loc[:, "fprnew"] = 0 - segment.loc[0, "fplnew"] = segment.loc[0, "fpl"] - segment.loc[len(segment) - 1, "fplnew"] = segment.loc[len(segment) - 1, "fpl"] - - segment.loc[0, "fprnew"] = segment.loc[0, "fpr"] - segment.loc[len(segment) - 1, "fprnew"] = segment.loc[len(segment) - 1, "fpr"] - - for j in range(1, len(segment) - 1): - segment.loc[j, "fplnew"] = ( - segment.loc[j - 1, "fpl"] - + segment.loc[j, "fpl"] - + segment.loc[j + 1, "fpl"] - ) / 3 - segment.loc[j, "fprnew"] = ( - segment.loc[j - 1, "fpr"] - + segment.loc[j, "fpr"] - + segment.loc[j + 1, "fpr"] - ) / 3 - - segment.loc[:, "diff0"] = segment.loc[:, "fplnew"] - segment.loc[:, "fpl"] - segment.loc[:, "diff1"] = segment.loc[:, "fprnew"] - segment.loc[:, "fpr"] - - segment.loc[:, "hl"] = segment.loc[:, "hl"] + segment.loc[:, "diff0"] - segment.loc[:, "hr"] = segment.loc[:, "hr"] + segment.loc[:, "diff1"] - - segment.index = range(g, g + len(segment)) - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment - - del ( - self.crosssections["banklevel"], - self.crosssections["fpr"], - self.crosssections["fpl"], + reach = self.getReach(reach_id) + + reach["fplnew"] = self._smooth(reach["fpl"], window=window) + reach["fprnew"] = self._smooth(reach["fpr"], window=window) + + reach.loc[:, "diff0"] = reach.loc[:, "fplnew"] - reach.loc[:, "fpl"] + reach.loc[:, "diff1"] = reach.loc[:, "fprnew"] - reach.loc[:, "fpr"] + + reach.loc[:, "hl"] = reach.loc[:, "hl"] + reach.loc[:, "diff0"] + reach.loc[:, "hr"] = reach.loc[:, "hr"] + reach.loc[:, "diff1"] + + self.updateReach(reach) + self.cross_sections.drop( + labels=["banklevel", "fpr", "fpl"], axis=1, inplace=True ) - def smoothBedWidth(self, segmenti): + def smoothBedWidth(self, reach_id: int, window: int = 3): """SmoothBedWidth. SmoothBedWidth method smoothes the Bed Width the in the cross section - for a given segment + for a given reach Parameters ---------- - 1-segmenti : [Integer] - segment ID. + reach_id : [Integer] + reach ID. + window: [int] + window length (length of averaged values) Returns ------- - 1-crosssections: [dataframe attribute] - the "b" column in the crosssections attribute will be smoothed + cross_sections: [dataframe attribute] + the "b" column in the cross_sections attribute will be smoothed """ - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() - segment.index = range(len(segment)) - segment.loc[:, "bnew"] = 0 - segment.loc[0, "bnew"] = segment.loc[0, "b"] - segment.loc[len(segment) - 1, "bnew"] = segment.loc[len(segment) - 1, "b"] - - for j in range(1, len(segment) - 1): - segment.loc[j, "bnew"] = ( - segment.loc[j - 1, "b"] + segment.loc[j, "b"] + segment.loc[j + 1, "b"] - ) / 3 - - segment.loc[:, "b"] = segment.loc[:, "bnew"] - segment.index = range(g, g + len(segment)) - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment - - def downWardBedLevel(self, segmenti: int, height: Union[int, float]): - """SmoothBedWidth. + reach = self.getReach(reach_id) + reach["n"] = self._smooth(reach["b"], window=window) + self.updateReach(reach) - SmoothBedWidth method smoothes the Bed Width the in the cross section - for a given segment + def downWardBedLevel(self, reach_id: int, height: Union[int, float]): + """downWardBedLevel. + + lowering the bed level by a certain height (5 cm) Parameters ---------- - segmenti : [Integer] - segment ID. + reach_id : [Integer] + reach ID. height : [] + down Returns ------- - crosssections: [dataframe attribute] - the "b" column in the crosssections attribute will be smoothed + cross_sections: [dataframe attribute] + the "b" column in the cross_sections attribute will be smoothed """ - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] + reach = self.getReach(reach_id) - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() - segment.index = range(len(segment)) + for j in range(1, len(reach)): + if reach.loc[j - 1, "gl"] - reach.loc[j, "gl"] < height: + reach.loc[j, "gl"] = reach.loc[j - 1, "gl"] - height - for j in range(1, len(segment)): - if segment.loc[j - 1, "gl"] - segment.loc[j, "gl"] < height: - segment.loc[j, "gl"] = segment.loc[j - 1, "gl"] - height + self.updateReach(reach) - segment.index = range(g, g + len(segment)) - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment - - def smoothMaxSlope(self, segmenti, SlopePercentThreshold=1.5): + def smoothMaxSlope(self, reach_id: int, SlopePercentThreshold=1.5): """SmoothMaxSlope. SmoothMaxSlope method smoothes the bed level the in the cross section - for a given segment + for a given reach As now the slope is not very smoothed as it was when using the average slope everywhere, when the the difference between two consecutive @@ -1412,25 +1447,22 @@ def smoothMaxSlope(self, segmenti, SlopePercentThreshold=1.5): Parameters ---------- - 1-segmenti : [Integer] - segment ID. - 2-SlopePercentThreshold : [Float] + reach_id : [Integer] + reach ID. + SlopePercentThreshold : [Float] the percent of change in slope between three successive cross sections. The default is 1.5. Returns ------- - 1-crosssections: [dataframe attribute] - the "gl" column in the crosssections attribute will be smoothed + cross_sections: [dataframe attribute] + the "gl" column in the cross_sections attribute will be smoothed """ - g = self.crosssections.loc[self.crosssections["id"] == segmenti, :].index[0] - - segment = self.crosssections.loc[self.crosssections["id"] == segmenti, :].copy() - segment.index = range(len(segment)) + reach = self.getReach(reach_id) # slope must be positive due to the smoothing slopes = [ - (segment.loc[k, "gl"] - segment.loc[k + 1, "gl"]) / 500 - for k in range(len(segment) - 1) + (reach.loc[k, "gl"] - reach.loc[k + 1, "gl"]) / 500 + for k in range(len(reach) - 1) ] # if percent is -ve means second slope is steeper precent = [ @@ -1440,29 +1472,25 @@ def smoothMaxSlope(self, segmenti, SlopePercentThreshold=1.5): # at row 1 in precent list is difference between row 1 and row 2 # in slopes list and slope in row 2 is the steep slope, # slope at row 2 is the difference - # between gl in row 2 and row 3 in the segment dataframe, and gl row + # between gl in row 2 and row 3 in the reach dataframe, and gl row # 3 is very and we want to elevate it to reduce the slope for j in range(len(precent)): if precent[j] < 0 and abs(precent[j]) >= SlopePercentThreshold: logger.debug(j) # get the calculated slope based on the slope percent threshold slopes[j + 1] = slopes[j] - (-SlopePercentThreshold * slopes[j]) - segment.loc[j + 2, "gl"] = ( - segment.loc[j + 1, "gl"] - slopes[j + 1] * 500 - ) + reach.loc[j + 2, "gl"] = reach.loc[j + 1, "gl"] - slopes[j + 1] * 500 # recalculate all the slopes again slopes = [ - (segment.loc[k, "gl"] - segment.loc[k + 1, "gl"]) / 500 - for k in range(len(segment) - 1) + (reach.loc[k, "gl"] - reach.loc[k + 1, "gl"]) / 500 + for k in range(len(reach) - 1) ] precent = [ (slopes[k] - slopes[k + 1]) / slopes[k] for k in range(len(slopes) - 1) ] - segment.index = range(g, g + len(segment)) - # copy back the segment to the whole XS df - self.crosssections.loc[self.crosssections["id"] == segmenti, :] = segment + self.updateReach(reach) def checkFloodplain(self): """CheckFloodplain. @@ -1474,30 +1502,31 @@ def checkFloodplain(self): Returns ------- crosssection : [dataframe attribute] - the "zl" and "zr" column in the "crosssections" attribute will be + the "zl" and "zr" column in the "cross_sections" attribute will be updated """ msg = """please read the cross section first or copy it to the Calibration object""" - assert hasattr(self, "crosssections"), "{0}".format(msg) - for i in range(len(self.crosssections)): + assert hasattr(self, "cross_sections"), "{0}".format(msg) + for i in range(len(self.cross_sections)): BankLevel = ( - self.crosssections.loc[i, "gl"] + self.crosssections.loc[i, "dbf"] + self.cross_sections.loc[i, "gl"] + self.cross_sections.loc[i, "dbf"] ) if ( - BankLevel + self.crosssections.loc[i, "hl"] - > self.crosssections.loc[i, "zl"] + BankLevel + self.cross_sections.loc[i, "hl"] + > self.cross_sections.loc[i, "zl"] ): - self.crosssections.loc[i, "zl"] = ( - BankLevel + self.crosssections.loc[i, "hl"] + 0.5 + self.cross_sections.loc[i, "zl"] = ( + BankLevel + self.cross_sections.loc[i, "hl"] + 0.5 ) + if ( - BankLevel + self.crosssections.loc[i, "hr"] - > self.crosssections.loc[i, "zr"] + BankLevel + self.cross_sections.loc[i, "hr"] + > self.cross_sections.loc[i, "zr"] ): - self.crosssections.loc[i, "zr"] = ( - BankLevel + self.crosssections.loc[i, "hr"] + 0.5 + self.cross_sections.loc[i, "zr"] = ( + BankLevel + self.cross_sections.loc[i, "hr"] + 0.5 ) @staticmethod @@ -1511,7 +1540,7 @@ def Metrics( shift: int = 0, fmt: str = "%Y-%m-%d", ) -> DataFrame: - """ + """Calculate performance metrics. Parameters ---------- @@ -1627,21 +1656,21 @@ def HMvsRRM( if not isinstance(self.q_hm, DataFrame): raise ValueError("please read the HM results with the 'ReadHMQ' method") ### HM vs RRM - self.MetricsHMvsRRM = self.Metrics( + self.metrics_hm_vs_rrm = self.Metrics( self.q_rrm, self.q_hm, self.rrm_gauges, self.novalue, start, end, shift, fmt ) # get the point geometry from the hm_gauges - self.MetricsHMvsRRM = self.hm_gauges.merge( - self.MetricsHMvsRRM, + self.metrics_hm_vs_rrm = self.hm_gauges.merge( + self.metrics_hm_vs_rrm, left_on=self.gauge_id_col, right_index=True, how="left", sort=False, ) - self.MetricsHMvsRRM.index = self.MetricsHMvsRRM[self.gauge_id_col] - self.MetricsHMvsRRM.index.name = "index" + self.metrics_hm_vs_rrm.index = self.metrics_hm_vs_rrm[self.gauge_id_col] + self.metrics_hm_vs_rrm.index.name = "index" if isinstance(self.hm_gauges, GeoDataFrame): - self.MetricsHMvsRRM.crs = self.hm_gauges.crs + self.metrics_hm_vs_rrm.crs = self.hm_gauges.crs def RRMvsObserved( self, start: str = "", end: str = "", fmt: str = "%Y-%m-%d", shift: int = 0 @@ -1685,7 +1714,7 @@ def RRMvsObserved( ) ### RRM vs observed - self.MetricsRRMvsObs = self.Metrics( + self.metrics_rrm_vs_obs = self.Metrics( self.q_gauges, self.q_rrm, self.rrm_gauges, @@ -1696,18 +1725,18 @@ def RRMvsObserved( fmt, ) - self.MetricsRRMvsObs = self.hm_gauges.merge( - self.MetricsRRMvsObs, + self.metrics_rrm_vs_obs = self.hm_gauges.merge( + self.metrics_rrm_vs_obs, left_on=self.gauge_id_col, right_index=True, how="left", sort=False, ) - self.MetricsRRMvsObs.index = self.MetricsRRMvsObs[self.gauge_id_col] - self.MetricsRRMvsObs.index.name = "index" + self.metrics_rrm_vs_obs.index = self.metrics_rrm_vs_obs[self.gauge_id_col] + self.metrics_rrm_vs_obs.index.name = "index" if isinstance(self.hm_gauges, GeoDataFrame): - self.MetricsRRMvsObs.crs = self.hm_gauges.crs + self.metrics_rrm_vs_obs.crs = self.hm_gauges.crs def HMQvsObserved( self, @@ -1736,7 +1765,7 @@ def HMQvsObserved( Returns ------- - MetricsHMQvsObs: [dataframe] + metrics_hm_q_vs_obs: [dataframe] dataframe with the gauges id as rows and ['start', 'end', 'rmse', 'KGE', 'WB', 'NSE', 'NSEModefied'], as columns. """ @@ -1753,7 +1782,7 @@ def HMQvsObserved( ) ### HM Q vs observed - self.MetricsHMQvsObs = self.Metrics( + self.metrics_hm_q_vs_obs = self.Metrics( self.q_gauges, self.q_hm, self.QgaugesList, @@ -1764,18 +1793,18 @@ def HMQvsObserved( fmt, ) - self.MetricsHMQvsObs = self.hm_gauges.merge( - self.MetricsHMQvsObs, + self.metrics_hm_q_vs_obs = self.hm_gauges.merge( + self.metrics_hm_q_vs_obs, left_on=self.gauge_id_col, right_index=True, how="left", sort=False, ) - self.MetricsHMQvsObs.index = self.MetricsHMQvsObs[self.gauge_id_col] - self.MetricsHMQvsObs.index.name = "index" + self.metrics_hm_q_vs_obs.index = self.metrics_hm_q_vs_obs[self.gauge_id_col] + self.metrics_hm_q_vs_obs.index.name = "index" if isinstance(self.hm_gauges, GeoDataFrame): - self.MetricsHMQvsObs.crs = self.hm_gauges.crs + self.metrics_hm_q_vs_obs.crs = self.hm_gauges.crs def HMWLvsObserved( self, @@ -1804,27 +1833,27 @@ def HMWLvsObserved( Returns ------- - MetricsHMWLvsObs: [dataframe] + metrics_hm_wl_vs_obs: [dataframe] dataframe with the gauges id as rows and ['start', 'end', 'rmse', 'KGE', 'WB', 'NSE', 'NSEModefied'], as columns. """ - if not isinstance(self.WLGauges, DataFrame): + if not isinstance(self.wl_gauges, DataFrame): raise ValueError( - "WLGauges variable does not exist please read the water level gauges " - "with 'ReadObservedWL' method" + "wl_gauges variable does not exist please read the water level gauges " + "with 'readObservedWL' method" ) - if not isinstance(self.WLHM, DataFrame): + if not isinstance(self.wl_hm, DataFrame): raise ValueError( - "WLHM variable does not exist please read the water level simulated by the HM " - "with 'ReadHMWL' method" + "wl_hm variable does not exist please read the water level simulated by the HM " + "with 'readHMWL' method" ) ### HM WL vs observed - self.MetricsHMWLvsObs = self.Metrics( - self.WLGauges, - self.WLHM, - self.WLgaugesList, + self.metrics_hm_wl_vs_obs = self.Metrics( + self.wl_gauges, + self.wl_hm, + self.wl_gauges_list, self.novalue, start, end, @@ -1832,22 +1861,22 @@ def HMWLvsObserved( fmt, ) - self.MetricsHMWLvsObs = self.hm_gauges.merge( - self.MetricsHMWLvsObs, + self.metrics_hm_wl_vs_obs = self.hm_gauges.merge( + self.metrics_hm_wl_vs_obs, left_on=self.gauge_id_col, right_index=True, how="left", sort=False, ) - self.MetricsHMWLvsObs.index = self.MetricsHMWLvsObs[self.gauge_id_col] - self.MetricsHMWLvsObs.index.name = "index" + self.metrics_hm_wl_vs_obs.index = self.metrics_hm_wl_vs_obs[self.gauge_id_col] + self.metrics_hm_wl_vs_obs.index.name = "index" if isinstance(self.hm_gauges, GeoDataFrame): - self.MetricsHMWLvsObs.crs = self.hm_gauges.crs + self.metrics_hm_wl_vs_obs.crs = self.hm_gauges.crs def InspectGauge( self, - subid: int, + reach_id: int, gaugei: int = 0, start: str = "", end: str = "", @@ -1860,43 +1889,44 @@ def InspectGauge( InspectGauge returns the metrices of the gauge simulated discharge and water level and plot it - parameters + Parameters ---------- - subid: [int] - river segment id + reach_id: [int] + river reach id gaugei: [int] - if the river segment has more than one gauge, gaugei is the gauge order + if the river reach has more than one gauge, gaugei is the gauge order start: [str] start date, if not given it will be taken from the already calculated Metrics table end: [str] end date, if not given it will be taken from the already calculated Metrics table fmt : [str] format of the given dates. The default is "%Y-%m-%d" + Returns ------- summary: [DataFrame] performance metrix """ - if not isinstance(self.MetricsHMvsRRM, DataFrame) and not isinstance( - self.MetricsHMvsRRM, GeoDataFrame + if not isinstance(self.metrics_hm_vs_rrm, DataFrame) and not isinstance( + self.metrics_hm_vs_rrm, GeoDataFrame ): raise ValueError( - "please calculate first the MetricsHMvsRRM by the method HMvsRRM" + "please calculate first the metrics_hm_vs_rrm by the method HMvsRRM" ) - gauge = self.getGauges(subid, gaugei) + gauge = self.getGauges(reach_id, gaugei) gauge_id = gauge.loc[0, self.gauge_id_col] gaugename = str(gauge.loc[0, "name"]) summary = pd.DataFrame( index=["HM-RRM", "RRM-Observed", "HM-Q-Observed", "HM-WL-Observed"], - columns=self.MetricsHMvsRRM.columns, + columns=self.metrics_hm_vs_rrm.columns, ) - # for each gauge in the segment - if isinstance(self.MetricsHMQvsObs, DataFrame) or isinstance( - self.MetricsHMQvsObs, GeoDataFrame + # for each gauge in the reach + if isinstance(self.metrics_hm_q_vs_obs, DataFrame) or isinstance( + self.metrics_hm_q_vs_obs, GeoDataFrame ): - summary.loc["HM-Q-Observed", :] = self.MetricsHMQvsObs.loc[gauge_id, :] + summary.loc["HM-Q-Observed", :] = self.metrics_hm_q_vs_obs.loc[gauge_id, :] if gauge.loc[0, "waterlevel"] == 1 and gauge.loc[0, "discharge"] == 1: fig, (ax1, ax2) = plt.subplots( @@ -1907,24 +1937,26 @@ def InspectGauge( if gauge_id in self.rrm_gauges: # there are RRM simulated data - summary.loc["HM-RRM", :] = self.MetricsHMvsRRM.loc[gauge_id, :] - if isinstance(self.MetricsRRMvsObs, DataFrame) or isinstance( - self.MetricsRRMvsObs, GeoDataFrame + summary.loc["HM-RRM", :] = self.metrics_hm_vs_rrm.loc[gauge_id, :] + if isinstance(self.metrics_rrm_vs_obs, DataFrame) or isinstance( + self.metrics_rrm_vs_obs, GeoDataFrame ): - summary.loc["RRM-Observed", :] = self.MetricsRRMvsObs.loc[gauge_id, :] + summary.loc["RRM-Observed", :] = self.metrics_rrm_vs_obs.loc[ + gauge_id, : + ] if start == "": - start_1 = self.MetricsHMvsRRM.loc[gauge_id, "start"] + start_1 = self.metrics_hm_vs_rrm.loc[gauge_id, "start"] else: s1 = dt.datetime.strptime(start, fmt) - s2 = self.MetricsHMvsRRM.loc[gauge_id, "start"] + s2 = self.metrics_hm_vs_rrm.loc[gauge_id, "start"] start_1 = max(s1, s2) if end == "": - end_1 = self.MetricsHMvsRRM.loc[gauge_id, "end"] + end_1 = self.metrics_hm_vs_rrm.loc[gauge_id, "end"] else: e1 = dt.datetime.strptime(end, fmt) - e2 = self.MetricsHMvsRRM.loc[gauge_id, "end"] + e2 = self.metrics_hm_vs_rrm.loc[gauge_id, "end"] end_1 = min(e1, e2) ax1.plot(self.q_hm[gauge_id].loc[start_1:end_1], label="HM", zorder=5) @@ -1937,35 +1969,37 @@ def InspectGauge( # pos = max(SimMax, ObsMax) if gauge.loc[0, "waterlevel"] == 1: # there are water level observed data - summary.loc["HM-WL-Observed", :] = self.MetricsHMWLvsObs.loc[gauge_id, :] + summary.loc["HM-WL-Observed", :] = self.metrics_hm_wl_vs_obs.loc[ + gauge_id, : + ] if start == "": - start_2 = self.MetricsHMWLvsObs.loc[gauge_id, "start"] + start_2 = self.metrics_hm_wl_vs_obs.loc[gauge_id, "start"] else: s1 = dt.datetime.strptime(start, fmt) - s2 = self.MetricsHMWLvsObs.loc[gauge_id, "start"] + s2 = self.metrics_hm_wl_vs_obs.loc[gauge_id, "start"] start_2 = max(s1, s2) if end == "": - end_2 = self.MetricsHMWLvsObs.loc[gauge_id, "end"] + end_2 = self.metrics_hm_wl_vs_obs.loc[gauge_id, "end"] else: e1 = dt.datetime.strptime(end, fmt) - e2 = self.MetricsHMWLvsObs.loc[gauge_id, "end"] + e2 = self.metrics_hm_wl_vs_obs.loc[gauge_id, "end"] end_2 = min(e1, e2) - ax2.plot(self.WLHM[gauge_id].loc[start_2:end_2], label="HM", linewidth=2) + ax2.plot(self.wl_hm[gauge_id].loc[start_2:end_2], label="HM", linewidth=2) ax2.plot( - self.WLGauges[gauge_id].loc[start_2:end_2], + self.wl_gauges[gauge_id].loc[start_2:end_2], label="Observed", linewidth=2, ) ax2.set_ylabel("Water Level m", fontsize=12) ax2.legend(fontsize=15) - # SimMax = max(self.WLHM[gauge_id].loc[start_2:end_2]) - # ObsMax = max(self.WLGauges[gauge_id].loc[start_2: end_2]) + # SimMax = max(self.wl_hm[gauge_id].loc[start_2:end_2]) + # ObsMax = max(self.wl_gauges[gauge_id].loc[start_2: end_2]) # pos = max(SimMax, ObsMax) - # plt.legend(fontsize=15) + # plt.legend(font_size=15) ax1.set_title(gaugename, fontsize=30) ax1.set_title(gaugename, fontsize=30) @@ -2016,7 +2050,7 @@ def prepareToSave(df: DataFrame) -> DataFrame: ) return df - def SaveMetices(self, path): + def saveMetices(self, path): """SaveMetices. SaveMetices saves the calculated metrics @@ -2029,40 +2063,40 @@ def SaveMetices(self, path): ------- None """ - if isinstance(self.MetricsHMvsRRM, GeoDataFrame) or isinstance( - self.MetricsHMvsRRM, DataFrame + if isinstance(self.metrics_hm_vs_rrm, GeoDataFrame) or isinstance( + self.metrics_hm_vs_rrm, DataFrame ): - df = self.prepareToSave(self.MetricsHMvsRRM.copy()) - if isinstance(self.MetricsHMvsRRM, GeoDataFrame): + df = self.prepareToSave(self.metrics_hm_vs_rrm.copy()) + if isinstance(self.metrics_hm_vs_rrm, GeoDataFrame): df.to_file(path + "MetricsHM_Q_RRM.geojson", driver="GeoJSON") - if isinstance(self.MetricsHMvsRRM, DataFrame): + if isinstance(self.metrics_hm_vs_rrm, DataFrame): df.to_csv(path + "MetricsHM_Q_RRM.geojson.csv") - if isinstance(self.MetricsHMQvsObs, GeoDataFrame) or isinstance( - self.MetricsHMQvsObs, DataFrame + if isinstance(self.metrics_hm_q_vs_obs, GeoDataFrame) or isinstance( + self.metrics_hm_q_vs_obs, DataFrame ): - df = self.prepareToSave(self.MetricsHMQvsObs.copy()) - if isinstance(self.MetricsHMQvsObs, GeoDataFrame): + df = self.prepareToSave(self.metrics_hm_q_vs_obs.copy()) + if isinstance(self.metrics_hm_q_vs_obs, GeoDataFrame): df.to_file(path + "MetricsHM_Q_Obs.geojson", driver="GeoJSON") - if isinstance(self.MetricsHMQvsObs, DataFrame): + if isinstance(self.metrics_hm_q_vs_obs, DataFrame): df.to_csv(path + "MetricsHM_Q_Obs.geojson.csv") - if isinstance(self.MetricsRRMvsObs, GeoDataFrame) or isinstance( - self.MetricsRRMvsObs, DataFrame + if isinstance(self.metrics_rrm_vs_obs, GeoDataFrame) or isinstance( + self.metrics_rrm_vs_obs, DataFrame ): - df = self.prepareToSave(self.MetricsRRMvsObs.copy()) - if isinstance(self.MetricsRRMvsObs, GeoDataFrame): + df = self.prepareToSave(self.metrics_rrm_vs_obs.copy()) + if isinstance(self.metrics_rrm_vs_obs, GeoDataFrame): df.to_file(path + "MetricsRRM_Q_Obs.geojson", driver="GeoJSON") - if isinstance(self.MetricsRRMvsObs, DataFrame): + if isinstance(self.metrics_rrm_vs_obs, DataFrame): df.to_csv(path + "MetricsRRM_Q_Obs.geojson.csv") - if isinstance(self.MetricsHMWLvsObs, GeoDataFrame) or isinstance( - self.MetricsHMWLvsObs, DataFrame + if isinstance(self.metrics_hm_wl_vs_obs, GeoDataFrame) or isinstance( + self.metrics_hm_wl_vs_obs, DataFrame ): - df = self.prepareToSave(self.MetricsHMWLvsObs.copy()) - if isinstance(self.MetricsHMWLvsObs, GeoDataFrame): + df = self.prepareToSave(self.metrics_hm_wl_vs_obs.copy()) + if isinstance(self.metrics_hm_wl_vs_obs, GeoDataFrame): df.to_file(path + "MetricsHM_WL_Obs.geojson", driver="GeoJSON") - if isinstance(self.MetricsHMWLvsObs, DataFrame): + if isinstance(self.metrics_hm_wl_vs_obs, DataFrame): df.to_csv(path + "MetricsHM_WL_Obs.geojson.csv") def ListAttributes(self): diff --git a/Hapi/hm/event.py b/Hapi/hm/event.py index 1f6864d3..9aace630 100644 --- a/Hapi/hm/event.py +++ b/Hapi/hm/event.py @@ -14,10 +14,13 @@ class Event: - # class attributes - """Event. The Event class reads all the results of the Hydraulic model to preform all kind of analysis on flood event basis and the overtopping. + """Event. - Methods: + The Event class reads all the results of the Hydraulic model to preform all kind of analysis on flood event + basis and the overtopping. + + Methods + ------- 1- IndexToDate 2- CreateEventIndex 3- GetAllEvents @@ -39,12 +42,12 @@ def __init__( name, start="1950-1-1", days=36890, - leftOvertopping_Suffix="_left.txt", - RightOvertopping_Suffix="_right.txt", - DepthPrefix="DepthMax", - DurationPrefix="Duration", - ReturnPeriodPrefix="ReturnPeriod", - Compressed=True, + left_overtopping_suffix="_left.txt", + right_overtopping_suffix="_right.txt", + depth_prefix="DepthMax", + duration_prefix="Duration", + return_period_prefix="ReturnPeriod", + compressed=True, ): """Event. To instantiate the Event class you need to provide the following arguments. @@ -56,22 +59,22 @@ def __init__( start date. The default is "1950-1-1". days : integer, optional length of the simulation . The default is 36890. - leftOvertopping_Suffix : [str], optional + left_overtopping_suffix : [str], optional the prefix you used to name the overtopping form the left bank files. The default is "_left.txt". - RightOvertopping_Suffix : TYPE, optional + right_overtopping_suffix : TYPE, optional the prefix you used to name the overtopping form the right bank files. The default is "_right.txt". - DepthPrefix : [str], optional + depth_prefix : [str], optional the prefix you used to name the Max depth raster result maps. The default is "DepthMax". - DurationPrefix : [str], optional + duration_prefix : [str], optional the prefix you used to name the inundation duration raster result maps. The default is "Duration". - ReturnPeriodPrefix : [str], optional + return_period_prefix : [str], optional the prefix you used to name the Return Period raster result maps. The default is "ReturnPeriod". - Compressed : [bool], optional + compressed : [bool], optional True if the result raster/ascii files are compressed. The default is True. Returns @@ -83,28 +86,28 @@ def __init__( self.start = dt.datetime.strptime(start, "%Y-%m-%d") self.end = self.start + dt.timedelta(days=days) - self.leftOvertopping_Suffix = leftOvertopping_Suffix - self.RightOvertopping_Suffix = RightOvertopping_Suffix - self.DepthPrefix = DepthPrefix - self.DurationPrefix = DurationPrefix - self.ReturnPeriodPrefix = ReturnPeriodPrefix - self.TwoDResultPath = "" - self.Compressed = Compressed + self.left_overtopping_suffix = left_overtopping_suffix + self.right_overtopping_suffix = right_overtopping_suffix + self.depth_prefix = depth_prefix + self.duration_prefix = duration_prefix + self.return_period_prefix = return_period_prefix + self.two_d_result_path = "" + self.compressed = compressed Ref_ind = pd.date_range(self.start, self.end, freq="D") # the last day is not in the results day Ref_ind[-1] # write the number of days + 1 as python does not include the last number in the range # 19723 days so write 19724 - self.ReferenceIndex = pd.DataFrame(index=list(range(1, days + 1))) - self.ReferenceIndex["date"] = Ref_ind[:-1] + self.reference_index = pd.DataFrame(index=list(range(1, days + 1))) + self.reference_index["date"] = Ref_ind[:-1] # create dictionary to store any extracted values from maps - self.ExtractedValues = dict() - self.EventIndex = None - self.EventBeginning = None - self.EndDays = None + self.extracted_values = dict() + self.event_index = None + self.event_beginning = None + self.end_days = None # method - def IndexToDate(self): + def indexToDate(self): """IndexToDate. get the date coresponding to a given index. Returns @@ -112,22 +115,25 @@ def IndexToDate(self): Date """ # convert the index into date - dateFn = lambda i: self.ReferenceIndex.loc[i, "date"] + dateFn = lambda i: self.reference_index.loc[i, "date"] # get the date the column 'id' - date = self.EventIndex.loc[:, "id"].to_frame().applymap(dateFn) - self.EventIndex["date"] = date + date = self.event_index.loc[:, "id"].to_frame().applymap(dateFn) + self.event_index["date"] = date + + def createEventIndex(self, path: str): + """CreateEventIndex. - def CreateEventIndex(self, IndexPath: str): - """CreateEventIndex. CreateEventIndex takes the path to the index file result from the 2D model and creates a data frame to start adding the components of the EventIndex table. + CreateEventIndex takes the path to the index file result from the 2D model and creates a data frame to + start adding the components of the event_index table. Parameters - --------- - IndexPath: [String] + ---------- + path: [String] path including the file name and extention of the index file result from the 2D model Returns ------- - EventIndex: [dataframe] + event_index: [dataframe] this method creates an instance attribute of type dataframe with columns ['id','continue', 'IndDiff', 'Duration'] """ @@ -139,163 +145,170 @@ def CreateEventIndex(self, IndexPath: str): # read the index file (containing the id of the days where flood happens (2D # algorithm works)) - EventDays = pd.read_csv(IndexPath, header=None) + EventDays = pd.read_csv(path, header=None) EventIndex = EventDays.rename(columns={0: "id"}) # convert the index into date - self.EventIndex = EventIndex.loc[:, :] - self.IndexToDate() + self.event_index = EventIndex.loc[:, :] + self.indexToDate() - self.EventIndex.loc[:, "continue"] = 0 + self.event_index.loc[:, "continue"] = 0 # index difference maybe different than the duration as there might be # a gap in the middle of the event - self.EventIndex.loc[:, "IndDiff"] = 0 - self.EventIndex.loc[:, "Duration"] = 0 + self.event_index.loc[:, "IndDiff"] = 0 + self.event_index.loc[:, "Duration"] = 0 # the first day in the index file is an event beginning - self.EventBeginning = self.EventIndex.loc[0, "date"] - for i in range(1, len(self.EventIndex)): + self.event_beginning = self.event_index.loc[0, "date"] + for i in range(1, len(self.event_index)): # if the day is previous day+1 - if self.EventIndex.loc[i, "id"] == self.EventIndex.loc[i - 1, "id"] + 1: + if self.event_index.loc[i, "id"] == self.event_index.loc[i - 1, "id"] + 1: # then the event continues - self.EventIndex.loc[i, "continue"] = 1 + self.event_index.loc[i, "continue"] = 1 # increase the duration - self.EventIndex.loc[i, "IndDiff"] = ( - self.EventIndex.loc[i - 1, "IndDiff"] + 1 + self.event_index.loc[i, "IndDiff"] = ( + self.event_index.loc[i - 1, "IndDiff"] + 1 ) - self.EventIndex.loc[i, "Duration"] = ( - self.EventIndex.loc[i, "date"] - self.EventBeginning + self.event_index.loc[i, "Duration"] = ( + self.event_index.loc[i, "date"] - self.event_beginning ).days + 1 else: # if not then the day is the start of another event - self.EventBeginning = self.EventIndex.loc[i, "date"] + self.event_beginning = self.event_index.loc[i, "date"] - def GetAllEvents(self): + def getAllEvents(self): """GetAllEvents. GetAllEvents methods returns the end day of all events. Returns ------- None. """ - assert hasattr(self, "EventIndex"), "please read/Create the EventIndex" + assert hasattr(self, "event_index"), "please read/Create the event_index" IDs = list() - for i in range(len(self.EventIndex)): - if self.EventIndex.loc[i, "continue"] == 0 and i != 0: - IDs.append(self.EventIndex.loc[i - 1, "id"]) + for i in range(len(self.event_index)): + if self.event_index.loc[i, "continue"] == 0 and i != 0: + IDs.append(self.event_index.loc[i - 1, "id"]) - self.EndDays = IDs + self.end_days = IDs - def Overtopping(self, OvertoppingPath: str): - """Overtopping. Overtopping method reads the overtopping file and check if the EventIndex dataframe has already need created by the CreateEventIndex method, it will add the overtopping to it, if not it will create the EventIndex dataframe. + def Overtopping(self, overtopping_path: str): + """Overtopping. Overtopping method reads the overtopping file and check if the event_index dataframe has already need created by the CreateEventIndex method, it will add the overtopping to it, if not it will create the event_index dataframe. Inputs: - 1- OvertoppingPath: + 1- overtopping_path: [String] path including the file name and extention of the Overtopping file result from the 1D model Outputs: - 1- EventIndex: + 1- event_index: [dataframe] this method creates an instance attribute of type dataframe with columns ['id','continue', 'IndDiff', 'Duration', 'Overtopping', 'OvertoppingCum', 'Volume'] """ - OverTopTotal = pd.read_csv(OvertoppingPath, delimiter=r"\s+") # , header = None + OverTopTotal = pd.read_csv( + overtopping_path, delimiter=r"\s+" + ) # , header = None # FIXME # if the flood event does not have overtopping for 1 day then continues to # overtop after the method considers it as two separate events however # it is the same event (if the gap is less than 10 days it is still # considered the same event) - if not isinstance(self.EventIndex, DataFrame): + if not isinstance(self.event_index, DataFrame): # create the dataframe if the user did not use the CreateEventIndex method to - # create the EventIndex dataframe - self.EventIndex = pd.DataFrame() - self.EventIndex["id"] = OverTopTotal["Step"] - self.IndexToDate() + # create the event_index dataframe + self.event_index = pd.DataFrame() + self.event_index["id"] = OverTopTotal["Step"] + self.indexToDate() - self.EventIndex.loc[:, "continue"] = 0 + self.event_index.loc[:, "continue"] = 0 # index difference maybe different than the duration as there might be # a gap in the middle of the event - self.EventIndex.loc[:, "IndDiff"] = 0 - self.EventIndex.loc[:, "Duration"] = 0 + self.event_index.loc[:, "IndDiff"] = 0 + self.event_index.loc[:, "Duration"] = 0 # the first day in the index file is an event beginning - self.EventBeginning = self.EventIndex.loc[0, "date"] - for i in range(1, len(self.EventIndex)): + self.event_beginning = self.event_index.loc[0, "date"] + for i in range(1, len(self.event_index)): # if the day is previous day+1 - if self.EventIndex.loc[i, "id"] == self.EventIndex.loc[i - 1, "id"] + 1: + if ( + self.event_index.loc[i, "id"] + == self.event_index.loc[i - 1, "id"] + 1 + ): # then the event continues - self.EventIndex.loc[i, "continue"] = 1 + self.event_index.loc[i, "continue"] = 1 # increase the duration - self.EventIndex.loc[i, "IndDiff"] = ( - self.EventIndex.loc[i - 1, "IndDiff"] + 1 + self.event_index.loc[i, "IndDiff"] = ( + self.event_index.loc[i - 1, "IndDiff"] + 1 ) - self.EventIndex.loc[i, "Duration"] = ( - self.EventIndex.loc[i, "date"] - self.EventBeginning + self.event_index.loc[i, "Duration"] = ( + self.event_index.loc[i, "date"] - self.event_beginning ).days + 1 else: # if not then the day is the start of another event - self.EventBeginning = self.EventIndex.loc[i, "date"] + self.event_beginning = self.event_index.loc[i, "date"] - # store the overtoppiung data in the EventIndex dataframe - self.EventIndex["Overtopping"] = OverTopTotal["overtopping(m3/s)"] + # store the overtoppiung data in the event_index dataframe + self.event_index["Overtopping"] = OverTopTotal["overtopping(m3/s)"] - self.EventIndex.loc[0, "OvertoppingCum"] = self.EventIndex.loc[0, "Overtopping"] - for i in range(1, len(self.EventIndex)): - if self.EventIndex.loc[i, "continue"] == 0: - self.EventIndex.loc[i, "OvertoppingCum"] = self.EventIndex.loc[ + self.event_index.loc[0, "OvertoppingCum"] = self.event_index.loc[ + 0, "Overtopping" + ] + for i in range(1, len(self.event_index)): + if self.event_index.loc[i, "continue"] == 0: + self.event_index.loc[i, "OvertoppingCum"] = self.event_index.loc[ i, "Overtopping" ] else: - self.EventIndex.loc[i, "OvertoppingCum"] = ( - self.EventIndex.loc[i, "Overtopping"] - + self.EventIndex.loc[i - 1, "OvertoppingCum"] + self.event_index.loc[i, "OvertoppingCum"] = ( + self.event_index.loc[i, "Overtopping"] + + self.event_index.loc[i - 1, "OvertoppingCum"] ) # the volume of water is m3/s for hourly stored and acumulated values # volume = overtopping * 60 *60 = m3 - self.EventIndex.loc[:, "Volume"] = ( - self.EventIndex.loc[:, "OvertoppingCum"] * 60 * 60 + self.event_index.loc[:, "Volume"] = ( + self.event_index.loc[:, "OvertoppingCum"] * 60 * 60 ) - def VolumeError(self, Path): + def calculateVolumeError(self, path): """VolumeError. VolumeError method reads the VoleError file, assign values to the the coresponding time step. Parameters ---------- - Path : [String] + path : [String] a path to the folder includng the maps. Returns ------- - EventIndex: [dataframe attribute]. - add columns ['DEMError','StepError','TooMuchWater'] to the EventIndex dataframe + event_index: [dataframe attribute]. + add columns ['DEMError','StepError','TooMuchWater'] to the event_index dataframe """ # read the VolError file - VolError = pd.read_csv(Path, delimiter=r"\s+") - self.EventIndex["DEMError"] = 0 - self.EventIndex["StepError"] = 0 - self.EventIndex["TooMuchWater"] = 0 + VolError = pd.read_csv(path, delimiter=r"\s+") + self.event_index["DEMError"] = 0 + self.event_index["StepError"] = 0 + self.event_index["TooMuchWater"] = 0 for i in range(len(VolError)): - loc = np.where(VolError.loc[i, "step"] == self.EventIndex.loc[:, "id"])[0][ + loc = np.where(VolError.loc[i, "step"] == self.event_index.loc[:, "id"])[0][ 0 ] - self.EventIndex.loc[ + self.event_index.loc[ loc, ["DEMError", "StepError", "TooMuchWater"] ] = VolError.loc[ i, ["DEM_Error", "PreviousDepthError", "TOOMuchWaterError"] ].tolist() - self.EventIndex["VolError"] = ( - self.EventIndex["StepError"] - + self.EventIndex["DEMError"] - + self.EventIndex["TooMuchWater"] + self.event_index["VolError"] = ( + self.event_index["StepError"] + + self.event_index["DEMError"] + + self.event_index["TooMuchWater"] ) - self.EventIndex["VolError2"] = self.EventIndex["VolError"] / 20 + self.event_index["VolError2"] = self.event_index["VolError"] / 20 - def OverlayMaps(self, Path, BaseMapF, ExcludedValue, OccupiedCellsOnly, SavePath): - """OverlayMaps. OverlayMaps method reads all the maps in the folder given by Path input and overlay them with the basemap and for each value in the basemap it create a dictionary with the intersected values from all maps. + def overlayMaps(self, path, BaseMapF, ExcludedValue, OccupiedCellsOnly, SavePath): + """OverlayMaps. OverlayMaps method reads all the maps in the folder given by path input and overlay them with the basemap and for each value in the basemap it create a dictionary with the intersected values from all maps. Parameters ---------- - Path: [String] + path: [String] a path to the folder includng the maps. BaseMapF: [String] a path includng the name of the ASCII and extention like @@ -311,18 +324,18 @@ def OverlayMaps(self, Path, BaseMapF, ExcludedValue, OccupiedCellsOnly, SavePath Returns ------- - ExtractedValues: [Dict] + extracted_values: [Dict] dictonary with a list of values in the basemap as keys and for each key a list of all the intersected values in the maps from the path NonZeroCells: [dataframe] dataframe with the first column as the "file" name and the second column is the number of cells in each map """ self.DepthValues, NonZeroCells = Raster.overlayMaps( - Path, + path, BaseMapF, - self.DepthPrefix, + self.depth_prefix, ExcludedValue, - self.Compressed, + self.compressed, OccupiedCellsOnly, ) @@ -330,22 +343,22 @@ def OverlayMaps(self, Path, BaseMapF, ExcludedValue, OccupiedCellsOnly, SavePath # is the number of cells in each map NonZeroCells["days"] = [ - int(i[len(self.DepthPrefix) : -4]) for i in NonZeroCells["files"].tolist() + int(i[len(self.depth_prefix) : -4]) for i in NonZeroCells["files"].tolist() ] # get the numbe of inundated cells in the Event index data frame - self.EventIndex["cells"] = 0 + self.event_index["cells"] = 0 for i in range(len(NonZeroCells)): - # get the location in the EventIndex dataframe + # get the location in the event_index dataframe try: loc = np.where( - NonZeroCells.loc[i, "days"] == self.EventIndex.loc[:, "id"] + NonZeroCells.loc[i, "days"] == self.event_index.loc[:, "id"] )[0][0] except IndexError: # if it does not find the event in the eventindex table ignore continue # store number of cells - self.EventIndex.loc[loc, "cells"] = NonZeroCells.loc[i, "cells"] + self.event_index.loc[loc, "cells"] = NonZeroCells.loc[i, "cells"] # save depths of each sub-basin inundatedSubs = list(self.DepthValues.keys()) @@ -356,24 +369,27 @@ def OverlayMaps(self, Path, BaseMapF, ExcludedValue, OccupiedCellsOnly, SavePath fmt="%4.2f", ) - def ReadEventIndex(self, Path): - """ReadEventIndex ReadEventIndex method reads the EventIndex table created using the "CreateEventIndex" or "Overtopping" methods. + def readEventIndex(self, path): + """ReadEventIndex. + + ReadEventIndex method reads the event_index table created using the "CreateEventIndex" or + "Overtopping" methods. Parameters ---------- - Path : [str] - Path to the EventIndex file. + path : [str] + path to the event_index file. Returns ------- - EventIndex : [dataframe]. - dataframe of the EventIndex table + event_index : [dataframe]. + dataframe of the event_index table """ - EventIndex = pd.read_csv(Path) - self.EventIndex = EventIndex - self.IndexToDate() + EventIndex = pd.read_csv(path) + self.event_index = EventIndex + self.indexToDate() - def Histogram( + def histogram( self, Day, ExcludeValue, OccupiedCellsOnly, Map=1, filter1=0.2, filter2=15 ): """Histogram Histogram method extract values fro the event MaxDepth map and plot the histogram th emethod check first if you already extracted the values before then plot the histogram. @@ -389,40 +405,48 @@ def Histogram( Map : [integer], optional 1 for the max depth maps, 2 for the duration map, 3 for the return period maps. The default is 1. + filter1: [float, int] + Default is 0.2 + filter2: [float, int] + Default is 15 Returns ------- None. """ - # check if the object has the attribute ExtractedValues - if hasattr(self, "ExtractedValues"): + # check if the object has the attribute extracted_values + if hasattr(self, "extracted_values"): # get the list of event that then object has their Extractedvalues - if Day not in list(self.ExtractedValues.keys()): + if Day not in list(self.extracted_values.keys()): # depth map if Map == 1: - Path = self.TwoDResultPath + self.DepthPrefix + str(Day) + ".zip" + path = ( + self.two_d_result_path + self.depth_prefix + str(Day) + ".zip" + ) elif Map == 2: - Path = self.TwoDResultPath + self.DurationPrefix + str(Day) + ".zip" - else: - Path = ( - self.TwoDResultPath - + self.ReturnPeriodPrefix + path = ( + self.two_d_result_path + + self.duration_prefix + str(Day) + ".zip" ) + else: + path = ( + f"{self.two_d_result_path}{self.return_period_prefix}{Day}.zip" + ) ExtractedValues, NonZeroCells = Raster.extractValues( - Path, ExcludeValue, self.Compressed, OccupiedCellsOnly + path, ExcludeValue, self.compressed, OccupiedCellsOnly ) - self.ExtractedValues[Day] = ExtractedValues + self.extracted_values[Day] = ExtractedValues - ExtractedValues = self.ExtractedValues[Day] + ExtractedValues = self.extracted_values[Day] # filter values ExtractedValues = [j for j in ExtractedValues if j > filter1] ExtractedValues = [j for j in ExtractedValues if j < filter2] # plot - # fig, ax1 = plt.subplots(figsize=(10,8)) - # ax1.hist(ExtractedValues, bins=15, alpha = 0.4) #width = 0.2, + # fig, ax1 = plt.subplots(fig_size=(10,8)) + # ax1.hist(extracted_values, bins=15, alpha = 0.4) #width = 0.2, n, bins, patches = plt.hist( x=ExtractedValues, bins=15, color="#0504aa", alpha=0.7, rwidth=0.85 @@ -435,47 +459,47 @@ def Histogram( plt.ylabel("Frequency", fontsize=15) plt.tight_layout() - # plt.title('Normal Distribution Histogram matplotlib',fontsize=15) + # plt.title('Normal Distribution Histogram matplotlib',font_size=15) plt.show() return n, bins, patches - def Drop(self, DropList): - """Drop Drop method deletes columns from the EventIndex dataframe. + def drop(self, DropList): + """Drop Drop method deletes columns from the event_index dataframe. Parameters ---------- DropList: [list] - list of column names to delete from the EventIndex dataframe table + list of column names to delete from the event_index dataframe table Returns ------- - EventIndex: [datadrame] - the EventIndex dataframe without the columns in the Droplist + event_index: [datadrame] + the event_index dataframe without the columns in the Droplist """ - dataframe = self.EventIndex.loc[:, :] + dataframe = self.event_index.loc[:, :] columns = list(dataframe.columns) [columns.remove(i) for i in DropList] dataframe = dataframe.loc[:, columns] - self.EventIndex = dataframe + self.event_index = dataframe - def Save(self, Path): - """Save Save method saves the EventIndex table. + def save(self, path): + """Save Save method saves the event_index table. Parameters ---------- - Path : [str] - Path to where you want to save the table. + path : [str] + path to where you want to save the table. Returns ------- None. """ - self.EventIndex.to_csv(Path, header=True, index=None) # index_label = "Index" + self.event_index.to_csv(path, header=True, index=None) # index_label = "Index" - def GetEventBeginning(self, loc): - """GetEventBeginning. EventBeginning method returns the index of the beginning of the event in the EventIndex dataframe. + def getEventBeginning(self, loc): + """GetEventBeginning. event_beginning method returns the index of the beginning of the event in the event_index dataframe. Parameters ---------- @@ -491,31 +515,31 @@ def GetEventBeginning(self, loc): ------ 1- if you want to get the beginning of the event that has the highest overtopping - HighOvertoppingInd = EventIndex['Overtopping'].idxmax() - ind = EventBeginning(HighOvertoppingInd) + HighOvertoppingInd = event_index['Overtopping'].idxmax() + ind = event_beginning(HighOvertoppingInd) """ - # loc = np.where(self.EventIndex['id'] == day)[0][0] + # loc = np.where(self.event_index['id'] == day)[0][0] # get all the days in the same event before that day as the inundation in the maps may # happen due to any of the days before not in this day - ind = self.EventIndex.index[loc - self.EventIndex.loc[loc, "IndDiff"]] - day = self.EventIndex.loc[ind, "id"] + ind = self.event_index.index[loc - self.event_index.loc[loc, "IndDiff"]] + day = self.event_index.loc[ind, "id"] return ind, day # # filter the dataframe and get only the 'indDiff' and 'id' columns - # FilteredEvent = self.EventIndex.loc[:,['IndDiff','id']] + # FilteredEvent = self.event_index.loc[:,['IndDiff','id']] # FilteredEvent['diff'] = FilteredEvent.index - ind # # get only days before the day you inputed # FilteredEvent = FilteredEvent[FilteredEvent['diff'] <=0 ] # # start to search from down to up till you get the first 0 in the IndDiff - # for i in range(self.EventIndex['Duration'].max()): + # for i in range(self.event_index['Duration'].max()): # if FilteredEvent.loc[len(FilteredEvent)-1-i,'IndDiff'] == 0: # break # return FilteredEvent.index[len(FilteredEvent)-1-i] - def GetEventEnd(self, loc): - """GetEventEnd. method returns the index of the beginning of the event in the EventIndex dataframe. + def getEventEnd(self, loc): + """GetEventEnd. method returns the index of the beginning of the event in the event_index dataframe. Parameters ---------- @@ -531,15 +555,15 @@ def GetEventEnd(self, loc): ------- 1- if you want to get the beginning of the event that has the highest overtopping - HighOvertoppingInd = EventIndex['Overtopping'].idxmax() - ind = EventBeginning(HighOvertoppingInd) + HighOvertoppingInd = event_index['Overtopping'].idxmax() + ind = event_beginning(HighOvertoppingInd) """ - # loc = np.where(self.EventIndex['id'] == day)[0][0] + # loc = np.where(self.event_index['id'] == day)[0][0] # get all the days in the same event before that day as the inundation in the maps may # happen due to any of the days before not in this day # filter the dataframe and get only the 'indDiff' and 'id' columns - FilteredEvent = self.EventIndex.loc[:, ["continue", "id"]] + FilteredEvent = self.event_index.loc[:, ["continue", "id"]] # get only days before the day you inputed for i in range(loc + 1, len(FilteredEvent)): # start search from the following day @@ -547,12 +571,12 @@ def GetEventEnd(self, loc): break ind = i - 1 - day = self.EventIndex.loc[ind, "id"] + day = self.event_index.loc[ind, "id"] return ind, day - def PrepareForPlotting(self, ColumnName): - """PrepareForPlotting. PrepareForPlotting takes a time series in the EventIndex dataframe and fill the days that does not exist in date column and fill it with zero to properly plot it without letting the graph mislead the viewer of connecting the data over the gap period. + def prepareForPlotting(self, ColumnName): + """PrepareForPlotting. PrepareForPlotting takes a time series in the event_index dataframe and fill the days that does not exist in date column and fill it with zero to properly plot it without letting the graph mislead the viewer of connecting the data over the gap period. Parameters ---------- @@ -565,11 +589,13 @@ def PrepareForPlotting(self, ColumnName): dataframe with a date column, and the required column """ NewDataFrame = pd.DataFrame() - NewDataFrame["date"] = self.ReferenceIndex["date"].tolist() + NewDataFrame["date"] = self.reference_index["date"].tolist() NewDataFrame[ColumnName] = 0 - for i in range(len(self.EventIndex)): - loc = np.where(NewDataFrame["date"] == self.EventIndex.loc[i, "date"])[0][0] - NewDataFrame.loc[loc, ColumnName] = self.EventIndex.loc[i, ColumnName] + for i in range(len(self.event_index)): + loc = np.where(NewDataFrame["date"] == self.event_index.loc[i, "date"])[0][ + 0 + ] + NewDataFrame.loc[loc, ColumnName] = self.event_index.loc[i, ColumnName] return NewDataFrame @@ -578,11 +604,7 @@ def ListAttributes(self): print("\n") print( - "Attributes List of: " - + repr(self.__dict__["name"]) - + " - " - + self.__class__.__name__ - + " Instance\n" + f"Attributes List of: {repr(self.__dict__['name'])} - {self.__class__.__name__} Instance\n" ) self_keys = list(self.__dict__.keys()) self_keys.sort() diff --git a/Hapi/hm/inputs.py b/Hapi/hm/inputs.py index b4b79143..f47d2cfe 100644 --- a/Hapi/hm/inputs.py +++ b/Hapi/hm/inputs.py @@ -34,7 +34,7 @@ class Inputs(River): """ def __init__(self, Name, version=3): - """Inputs. + """Input. Inputs is an object to create the inputs for the river model @@ -50,28 +50,28 @@ def __init__(self, Name, version=3): self.StatisticalPr = None self.DistributionPr = None - def ExtractHydrologicalInputs( - self, WeatherGenerator, FilePrefix, realization, path, SWIMNodes, SavePath + def extractHydrologicalInputs( + self, weather_generator, file_prefix, realization, path, locations, save_path ): """ExtractHydrologicalInputs. Parameters ---------- - WeatherGenerator : TYPE + weather_generator : TYPE DESCRIPTION. - FilePrefix : TYPE + file_prefix : TYPE DESCRIPTION. realization : [Integer] type the number of the realization (the order of the 100 year run by swim). path : [String] - SWIMResultFile is the naming format you used in naming the result + rrm_result_file is the naming format you used in naming the result files of the discharge values stored with the name of the file as out+realization number + .dat (ex out15.dat). - SWIMNodes : [String] + locations : [String] text file containing the list of sub-basins IDs or computational nodes ID you - have used to run SWIM and store the results. - SavePath : [String] + have used to run the rrm and store the results. + save_path : [String] path to the folder where you want to save the separate file for each sub-basin. @@ -79,23 +79,23 @@ def ExtractHydrologicalInputs( ------- None. """ - if WeatherGenerator: - """WeatherGenerator.""" - SWIMResultFile = FilePrefix + str(realization) + ".dat" + if weather_generator: + """weather_generator.""" + rrm_result_file = file_prefix + str(realization) + ".dat" # 4-5 # check whether the the name of the realization the same as the name of 3 # the saving file or not to prevent any confusion in saving the files if int(realization) <= 9: - assert int(SWIMResultFile[-5:-4]) == int( - SavePath[-1] + assert int(rrm_result_file[-5:-4]) == int( + save_path[-1] ), " Wrong files sync " else: - assert int(SWIMResultFile[-6:-4]) == int( - SavePath[-2:] + assert int(rrm_result_file[-6:-4]) == int( + save_path[-2:] ), " Wrong files sync " else: """Observed data.""" - SWIMResultFile = FilePrefix + str(realization) + ".dat" + rrm_result_file = file_prefix + str(realization) + ".dat" """ SWIM writes the year as the first colmun then day as a second column and the @@ -106,13 +106,13 @@ def ExtractHydrologicalInputs( # read SWIM result file SWIMData = pd.read_csv( - path + "/" + SWIMResultFile, delimiter=r"\s+", header=None + f"{path}/{rrm_result_file}", delimiter=r"\s+", header=None ) - Nodes = pd.read_csv(path + "/" + SWIMNodes, header=None) + Nodes = pd.read_csv(f"{path}/{locations}", header=None) for i in range(len(Nodes)): SWIMData.loc[:, i + ignoreColumns].to_csv( - SavePath + "/" + str(Nodes.loc[i, 0]) + ".txt", header=None, index=None + f"{save_path}/{Nodes.loc[i, 0]}.txt", header=None, index=None ) def statisticalProperties( @@ -471,6 +471,7 @@ def returnPeriod( SaveTo, wpath, ): + """Return period.""" AllResults = os.listdir(MapsPath) # list of the Max Depth files only MaxDepthList = list() @@ -772,7 +773,7 @@ def CreateTraceALL( # ToSave = Subs.loc[:,['SubID','US']] # ToSave['Extra column 1'] = -1 # ToSave['Extra column 2'] = -1 - # ToSave.to_csv(SavePath + TraceFile,header = None, index = None) + # ToSave.to_csv(save_path + TraceFile,header = None, index = None) def ListAttributes(self): """ListAttributes. diff --git a/Hapi/hm/interface.py b/Hapi/hm/interface.py index ec78642d..b1a3ae47 100644 --- a/Hapi/hm/interface.py +++ b/Hapi/hm/interface.py @@ -51,12 +51,12 @@ def __init__( self.start = dt.datetime.strptime(start, fmt) self.end = self.start + dt.timedelta(days=days) Ref_ind = pd.date_range(self.start, self.end, freq="D") - self.ReferenceIndex = pd.DataFrame(index=list(range(1, days + 1))) - self.ReferenceIndex["date"] = Ref_ind[:-1] + self.reference_Index = pd.DataFrame(index=list(range(1, days + 1))) + self.reference_Index["date"] = Ref_ind[:-1] - self.LateralsTable = None - self.routedRRM = None - self.BCTable = None + self.laterals_table = None + self.routed_rrm = None + self.bc_table = None self.BC = None pass @@ -87,28 +87,28 @@ def readLateralsTable( dataframe with two columns ["filename", "sxid"] """ try: - self.LateralsTable = pd.read_csv(path, skiprows=[0], header=None) + self.laterals_table = pd.read_csv(path, skiprows=[0], header=None) except pd.errors.EmptyDataError: - self.LateralsTable = pd.DataFrame() + self.laterals_table = pd.DataFrame() logger.warning("The Lateral table file is empty") return - self.LateralsTable.columns = ["filename"] + self.laterals_table.columns = ["filename"] l1 = len(prefix) l2 = len(suffix) - self.LateralsTable["xsid"] = [ + self.laterals_table["xsid"] = [ int(i[l1 : len(i) - l2]) - for i in self.LateralsTable[self.LateralsTable.columns[0]] + for i in self.laterals_table[self.laterals_table.columns[0]] ] - if hasattr(self, "crosssections"): - self.crosssections["lateral"] = 0 - for i in range(len(self.crosssections)): + if hasattr(self, "cross_sections"): + self.cross_sections["lateral"] = 0 + for i in range(len(self.cross_sections)): if ( - self.crosssections.loc[i, "xsid"] - in self.LateralsTable["xsid"].tolist() + self.cross_sections.loc[i, "xsid"] + in self.laterals_table["xsid"].tolist() ): - self.crosssections.loc[i, "lateral"] = 1 + self.cross_sections.loc[i, "lateral"] = 1 else: raise ValueError( "Please read the cross section file first using the method 'ReadCrossSections'" @@ -117,8 +117,8 @@ def readLateralsTable( def _readRRMwrapper( self, table: DataFrame, - fromday: int = None, - today: int = None, + from_day: int = None, + to_day: int = None, path: str = "", date_format: str = "'%Y-%m-%d'", prefix: str = "lf_xsid", @@ -131,10 +131,10 @@ def _readRRMwrapper( Parameters ---------- table: [DataFrame] - LateralsTable, or BCTable - fromday : [integer], optional + laterals_table, or bc_table + from_day : [integer], optional the day you want to read the result from, the first day is 1 not zero.The default is ''. - today : [integer], optional + to_day : [integer], optional the day you want to read the result to. path : [String], optional path to read the results from. The default is ''. @@ -164,11 +164,11 @@ def _readRRMwrapper( results = Parallel(n_jobs=cores)( delayed(func)( self.version, - self.ReferenceIndex, + self.reference_Index, path, fname, - fromday, - today, + from_day, + to_day, date_format, ) for fname in fnames @@ -185,38 +185,39 @@ def _readRRMwrapper( rrm_ts[node_id] = self._readRRMResults( self.version, - self.ReferenceIndex, + self.reference_Index, path, fname, - fromday, - today, + from_day, + to_day, date_format, )[fname].tolist() logger.info(f"Lateral file {fname} is read") rrm_ts["total"] = rrm_ts.sum(axis=1) - if not fromday: - fromday = 1 - if not today: - today = len(rrm_ts[rrm_ts.columns[0]]) + if not from_day: + from_day = 1 + if not to_day: + to_day = len(rrm_ts[rrm_ts.columns[0]]) - start = self.ReferenceIndex.loc[fromday, "date"] - end = self.ReferenceIndex.loc[today, "date"] + start = self.reference_Index.loc[from_day, "date"] + end = self.reference_Index.loc[to_day, "date"] rrm_ts.index = pd.date_range(start, end, freq="D") return rrm_ts def readLaterals( self, - fromday: int = None, - today: int = None, + from_day: int = None, + to_day: int = None, path: str = "", date_format: str = "'%Y-%m-%d'", cores: Optional[Union[int, bool]] = None, prefix: str = "lf_xsid", laterals: Optional[bool] = True, ): - """readLaterals. + """Read Lateral files. + TODO: rename this function as it is better to name if readRRMoutputs - read the laterals at the location of cross-sections (if laterals=True) - read the routed hydrograph by the rainfall-runoff model at the location @@ -224,9 +225,9 @@ def readLaterals( Parameters ---------- - fromday : [integer], optional + from_day : [integer], optional the day you want to read the result from, the first day is 1 not zero.The default is ''. - today : [integer], optional + to_day : [integer], optional the day you want to read the result to. path : [String], optional path to read the results from. The default is ''. @@ -239,7 +240,7 @@ def readLaterals( prefix: [str] prefix used to distinguish the boundary condition files, Default is "lf_xsid". laterals: Optional[bool] - True if you want to read the laterals, false if you want to read the routedRRM + True if you want to read the laterals, false if you want to read the routed_rrm Default is True. Returns @@ -249,7 +250,7 @@ def readLaterals( with xsid as a column name and a column 'total' contains the sum of all the hydrographs. this attribut will be careated only if the laterals argument is True [default] - routedRRM: [dataframe attribute] + routed_rrm: [dataframe attribute] read the routed hydrograph by the rainfall-runoff model at the location of the lateral cross-sections dataframe contains the hydrograph of each of the laterals at the location if the xs @@ -257,16 +258,16 @@ def readLaterals( sum of all the hydrographs. this attribut will be careated only if the laterals argument is False """ - if not isinstance(self.LateralsTable, DataFrame): + if not isinstance(self.laterals_table, DataFrame): raise ValueError( "Please read the laterals table first using the 'ReadLateralsTable' method" ) - if len(self.LateralsTable) > 0: + if len(self.laterals_table) > 0: rrm_df = self._readRRMwrapper( - self.LateralsTable, - fromday=fromday, - today=today, + self.laterals_table, + from_day=from_day, + to_day=to_day, path=path, date_format=date_format, prefix=prefix, @@ -276,7 +277,7 @@ def readLaterals( if laterals: self.Laterals = rrm_df else: - self.routedRRM = rrm_df + self.routed_rrm = rrm_df else: logger.info("There are no Laterals table please check") @@ -303,18 +304,18 @@ def readBoundaryConditionsTable(self, path, prefix="bc_xsid", suffix=".txt"): ------- None. """ - self.BCTable = pd.read_csv(path, skiprows=[0], header=None) - self.BCTable.columns = ["filename"] + self.bc_table = pd.read_csv(path, skiprows=[0], header=None) + self.bc_table.columns = ["filename"] l1 = len(prefix) l2 = len(suffix) - self.BCTable["xsid"] = [ - int(i[l1 : len(i) - l2]) for i in self.BCTable[self.BCTable.columns[0]] + self.bc_table["xsid"] = [ + int(i[l1 : len(i) - l2]) for i in self.bc_table[self.bc_table.columns[0]] ] def readBoundaryConditions( self, - fromday: Union[str, int] = "", - today: Union[str, int] = "", + from_day: Union[str, int] = "", + to_day: Union[str, int] = "", path: str = "", date_format: str = "'%Y-%m-%d'", prefix: str = "bc_xsid", @@ -326,9 +327,9 @@ def readBoundaryConditions( Parameters ---------- - fromday : [integer], optional + from_day : [integer], optional the day you want to read the result from, the first day is 1 not zero.The default is ''. - today : [integer], optional + to_day : [integer], optional the day you want to read the result to. path : [String], optional path to read the results from. The default is ''. @@ -343,21 +344,21 @@ def readBoundaryConditions( Returns ------- - USHydrographs : [dataframe attribute]. + us_hydrographs : [dataframe attribute]. dataframe contains the hydrograph of each of the upstream segments with segment id as a column name and a column 'total' contains the sum of all the hydrographs. """ - if not isinstance(self.BCTable, DataFrame): + if not isinstance(self.bc_table, DataFrame): raise ValueError( "Please read the lateras table first using the 'ReadLateralsTable' method" ) self.BC = pd.DataFrame() self.BC = self._readRRMwrapper( - self.BCTable, - fromday=fromday, - today=today, + self.bc_table, + from_day=from_day, + to_day=to_day, path=path, date_format=date_format, prefix=prefix, diff --git a/Hapi/hm/river.py b/Hapi/hm/river.py index dac262d4..95e859fc 100644 --- a/Hapi/hm/river.py +++ b/Hapi/hm/river.py @@ -19,12 +19,15 @@ from pandas.core.frame import DataFrame from pyramids.raster import Raster as raster from scipy.stats import genextreme, gumbel_r +from serapeum_utils.utils import class_attr_initialize, class_method_parse from statista import metrics as Pf from statista.distributions import GEV, Gumbel # , PlottingPosition +from Hapi.hapi_warnings import SilencePandasWarning from Hapi.hm.saintvenant import SaintVenant from Hapi.plot.visualizer import Visualize as V -from Hapi.utils import class_attr_initialize, class_method_parse + +SilencePandasWarning() hours = list(range(1, 25)) @@ -43,30 +46,30 @@ class River: dx={"default": 500, "type": int}, start={"default": "1950-1-1", "type": str}, days={"default": 36890, "type": int}, # 100 years - rrmstart={"default": None, "type": str}, - rrmdays={"default": 36890, "type": int}, # 100 years - leftovertopping_suffix={"default": "_left.txt", "type": str}, - rightovertopping_suffix={"default": "_right.txt", "type": str}, - depthprefix={"default": "DepthMax", "type": str}, - durationprefix={"default": "Duration", "type": str}, - returnperiod_prefix={"default": "ReturnPeriod", "type": str}, + rrm_start={"default": None, "type": str}, + rrm_days={"default": 36890, "type": int}, # 100 years + left_overtopping_suffix={"default": "_left.txt", "type": str}, + right_overtopping_suffix={"default": "_right.txt", "type": str}, + depth_prefix={"default": "DepthMax", "type": str}, + duration_prefix={"default": "Duration", "type": str}, + return_period_prefix={"default": "ReturnPeriod", "type": str}, compressed={"default": True, "type": bool}, fmt={"default": "%Y-%m-%d", "type": str}, - onedresultpath={"default": "/results/1d", "type": str}, - twodresultpath={"default": "/results/2d", "type": str}, + one_d_result_path={"default": "/results/1d", "type": str}, + two_d_result_path={"default": "/results/2d", "type": str}, ) river_attributes = dict( - oneminresultpath=None, - usbcpath=None, - firstday=None, + one_min_result_path=None, + us_bc_path=None, + first_day=None, referenceindex_results=None, wd=None, XSF=None, - LateralsF=None, + laterals_file=None, BCF=None, - RiverNetworkF=None, - SlopeF=None, + river_network_file=None, + slope_file=None, NoSeg=None, CalibrationF=None, Coupling1D2DF=None, @@ -85,34 +88,34 @@ class River: OneDTempR=None, D1=None, D2=None, - crosssections=None, + cross_sections=None, xsno=None, - xsname=None, - QBCmin=None, - HBCmin=None, + xs_names=None, + q_bc_1min=None, + h_bc_1min=None, h=None, q=None, from_beginning=None, - firstdayresults=None, - lastday=None, - daylist=None, + first_day_results=None, + last_day=None, + days_list=None, id=None, QBC=None, HBC=None, usbc=None, dsbc=None, - Result1D=None, + results_1d=None, Q=None, H=None, slope=None, - EventIndex=None, + event_index=None, rivernetwork=None, SP=None, customized_runs_path=None, Segments=None, RP=None, - rrmpath=None, - segments=None, + rrm_path=None, + reach_ids=None, customized_runs_config=None, parameters=None, results_config=None, @@ -152,37 +155,37 @@ def __init__( start date. The default is "1950-1-1". days : [integer], optional length of the simulation in days. The default is 36890. - rrmstart : [str], optional + rrm_start : [str], optional the start date of the rainfall-runoff data. The default is "1950-1-1". - rrmdays : [integer], optional + rrm_days : [integer], optional the length of the data of the rainfall-runoff data in days. The default is 36890. dto : [integer] time step (sec) of the 1d routing model. default is 60 second. - leftovertopping_suffix : [str], optional + left_overtopping_suffix : [str], optional the prefix you used to name the overtopping form the left bank files. The default is "_left.txt". - rightovertopping_suffix : TYPE, optional + righ_tovertopping_suffix : TYPE, optional the prefix you used to name the overtopping form the right bank files. The default is "_right.txt". - depthprefix : [str], optional + depth_prefix : [str], optional the prefix you used to name the Max depth raster result maps. The default is "DepthMax". - durationprefix : [str], optional + duration_prefix : [str], optional the prefix you used to name the inundation duration raster result maps. The default is "Duration". - returnperiod_prefix : [str], optional + return_period_prefix : [str], optional the prefix you used to name the Return Period raster result maps. The default is "ReturnPeriod". compressed : [bool], optional True if the result raster/ascii files are compressed. The default is True. - onedresultpath : [str], optional + one_d_result_path : [str], optional path to the folder where the 1D river routing results exist. The default is ''. - twodresultpath : [str], optional + two_d_result_path : [str], optional path to the folder where the 1D river routing results exist. The default is ''. fmt: [string] @@ -219,22 +222,22 @@ def __init__( # 19723 days so write 19724 if self.days == 1: self.days = 2 - self.referenceindex = pd.DataFrame(index=list(range(1, self.days + 1))) - self.referenceindex["date"] = ref_ind + self.reference_index = pd.DataFrame(index=list(range(1, self.days + 1))) + self.reference_index["date"] = ref_ind else: - self.referenceindex = pd.DataFrame(index=list(range(1, self.days + 1))) - self.referenceindex["date"] = ref_ind[:-1] + self.reference_index = pd.DataFrame(index=list(range(1, self.days + 1))) + self.reference_index["date"] = ref_ind[:-1] - if self.rrmstart is None: - self.rrmstart = self.start + if self.rrm_start is None: + self.rrm_start = self.start else: - self.rrmstart = dt.datetime.strptime(self.rrmstart, self.fmt) + self.rrm_start = dt.datetime.strptime(self.rrm_start, self.fmt) - self.rrmend = self.rrmstart + dt.timedelta(days=self.rrmdays) - ref_ind = pd.date_range(self.rrmstart, self.rrmend, freq="D") - self.rrmreferenceindex = pd.DataFrame(index=list(range(1, self.rrmdays + 1))) - self.rrmreferenceindex["date"] = ref_ind[:-1] - self.notimesteps = len(self.rrmreferenceindex) + self.rrm_end = self.rrm_start + dt.timedelta(days=self.rrm_days) + ref_ind = pd.date_range(self.rrm_start, self.rrm_end, freq="D") + self.rrm_reference_index = pd.DataFrame(index=list(range(1, self.rrm_days + 1))) + self.rrm_reference_index["date"] = ref_ind[:-1] + self.no_time_steps = len(self.rrm_reference_index) self.indsub = pd.date_range(self.start, self.end, freq=self.freq) @@ -257,7 +260,7 @@ def indexToDate(self, index: int): date object. """ # convert the index into date - return self.referenceindex.loc[index, "date"] + return self.reference_index.loc[index, "date"] def dateToIndex(self, date: Union[dt.datetime, str], fmt: str = "%Y-%m-%d"): """DateToIndex. @@ -280,12 +283,12 @@ def dateToIndex(self, date: Union[dt.datetime, str], fmt: str = "%Y-%m-%d"): if isinstance(date, str): date = dt.datetime.strptime(date, fmt) try: - return np.where(self.referenceindex["date"] == date)[0][0] + 1 + return np.where(self.reference_index["date"] == date)[0][0] + 1 except: raise ValueError( f"The input date {date} is out of the range" - f"Simulation is between {self.referenceindex.loc[1, 'date']} and " - f"{self.referenceindex.loc[len(self.referenceindex), 'date']}" + f"Simulation is between {self.reference_index.loc[1, 'date']} and " + f"{self.reference_index.loc[len(self.reference_index), 'date']}" ) def indexToDateRRM(self, index: int): @@ -307,7 +310,7 @@ def indexToDateRRM(self, index: int): date object. """ # convert the index into date - return self.referenceindex.loc[index, "date"] + return self.reference_index.loc[index, "date"] def dateToIndexRRM(self, date: Union[str, dt.datetime], fmt: str = "%Y-%m-%d"): """DateToIndexRRM. @@ -329,14 +332,15 @@ def dateToIndexRRM(self, date: Union[str, dt.datetime], fmt: str = "%Y-%m-%d"): """ if isinstance(date, str): date = dt.datetime.strptime(date, fmt) - return np.where(self.referenceindex["date"] == date)[0][0] + 1 + return np.where(self.reference_index["date"] == date)[0][0] + 1 @staticmethod def round(number, roundto): + """Round fload number.""" return round(number / roundto) * roundto def readConfig(self, path): - """reads the hydraulic model configuration file. + """Read the hydraulic model configuration file. Parameters ---------- @@ -377,7 +381,7 @@ def readConfig(self, path): ), laterals_dir=rrm_rdir, boundary_condition_path=rrm_rdir, - rrm_location_1=Path(rrm_results.get("location-1")), # rrmpath + rrm_location_1=Path(rrm_results.get("location-1")), # rrm_path rrm_location_2=Path(rrm_results.get("location-2")), ) # result files @@ -392,10 +396,10 @@ def readConfig(self, path): results_files = config.get("Results 2D") self.results_paths = dict( results_rdir=results_rdir, - onedresultpath=results_rdir.joinpath(hourlt_results.get("folder")), - oneminresultpath=results_rdir, - usbcpath=results_rdir.joinpath(one_min_results.get("usbc").get("folder")), - twodresultpath=Path(results_files.get("root directory")), + one_d_result_path=results_rdir.joinpath(hourlt_results.get("folder")), + one_min_result_path=results_rdir, + us_bc_path=results_rdir.joinpath(one_min_results.get("usbc").get("folder")), + two_d_result_path=Path(results_files.get("root directory")), ) # parameters parameters = config.get("simulation parameters") @@ -428,16 +432,16 @@ def read1DConfigFile(self, path: str): self.XSF = wholefile[4][:-1] self.readXS(self.wd + "/inputs/1d/topo/" + self.XSF) # Laterals file, BC file - self.LateralsF, self.BCF = wholefile[6][:-1].split(" ") + self.laterals_file, self.BCF = wholefile[6][:-1].split(" ") # RiverNetwork file - self.RiverNetworkF = wholefile[8][:-1] - self.readRiverNetwork(self.wd + "/inputs/1d/topo/" + self.RiverNetworkF) + self.river_network_file = wholefile[8][:-1] + self.readRiverNetwork(self.wd + "/inputs/1d/topo/" + self.river_network_file) # Slope File - self.SlopeF = wholefile[10][:-1] - self.readSlope(self.wd + "/inputs/1d/topo/" + self.SlopeF) + self.slope_file = wholefile[10][:-1] + self.readSlope(self.wd + "/inputs/1d/topo/" + self.slope_file) self.NoSeg = len(self.slope) # Calibration file - self.CalibrationF = wholefile[12][:-1] + self.calibration_file = wholefile[12][:-1] # 1D-2D Coupling file self.Coupling1D2DF = wholefile[14][:-1] # Run mode @@ -535,19 +539,19 @@ def readXS(self, path: str): Returns ------- - crosssections : [dataframe] + cross_sections : [dataframe] a dataframe attribute will be created """ if self.version == 3: - self.crosssections = pd.read_csv(path, delimiter=",") - self.xsno = len(self.crosssections) - self.segments = list(set(self.crosssections["id"].tolist())) + self.cross_sections = pd.read_csv(path, delimiter=",") + self.xsno = len(self.cross_sections) + self.reach_ids = list(set(self.cross_sections["id"].tolist())) else: - self.crosssections = pd.read_csv(path, delimiter=",") - self.xsno = len(self.crosssections) + self.cross_sections = pd.read_csv(path, delimiter=",") + self.xsno = len(self.cross_sections) # TODO to be checked later now for testing of version 4 - self.xsname = self.crosssections["xsid"].tolist() - self.segments = list(set(self.crosssections["id"].tolist())) + self.xs_names = self.cross_sections["xsid"].tolist() + self.reach_ids = list(set(self.cross_sections["id"].tolist())) def readBoundaryConditions( self, @@ -603,38 +607,38 @@ def readBoundaryConditions( day as a row and for each column are the hours """ if path != "": - self.usbcpath = path + self.us_bc_path = path if self.version < 4: if start == "": start = 1 if end == "": - end = len(self.referenceindex_results) - 1 + end = len(self.reference_index_results) - 1 if isinstance(start, str): start = dt.datetime.strptime(start, fmt) - start = np.where(self.referenceindex_results == start)[0][0] + 1 + start = np.where(self.reference_index_results == start)[0][0] + 1 if isinstance(end, str): end = dt.datetime.strptime(end, fmt) - end = np.where(self.referenceindex_results == end)[0][0] + 1 + end = np.where(self.reference_index_results == end)[0][0] + 1 QBC = pd.DataFrame( - index=self.referenceindex_results[start - 1 : end], columns=hours + index=self.reference_index_results[start - 1 : end], columns=hours ) HBC = pd.DataFrame( - index=self.referenceindex_results[start - 1 : end], columns=hours + index=self.reference_index_results[start - 1 : end], columns=hours ) - for i in self.daylist[start - 1 : end]: + for i in self.days_list[start - 1 : end]: bc_q = np.loadtxt( - self.usbcpath + str(self.id) + "-" + str(i) + ".txt", + self.us_bc_path + str(self.id) + "-" + str(i) + ".txt", dtype=np.float16, ) - QBC.loc[self.referenceindex.loc[i, "date"], :] = bc_q[:, 0].tolist()[ + QBC.loc[self.reference_index.loc[i, "date"], :] = bc_q[:, 0].tolist()[ 0 : bc_q.shape[0] : 60 ] - HBC.loc[self.referenceindex.loc[i, "date"], :] = bc_q[:, 1].tolist()[ + HBC.loc[self.reference_index.loc[i, "date"], :] = bc_q[:, 1].tolist()[ 0 : bc_q.shape[0] : 60 ] @@ -646,7 +650,7 @@ def readBoundaryConditions( def convertdate(date): return dt.datetime.strptime(date, fmt) - BC = pd.read_csv(self.usbcpath) + BC = pd.read_csv(self.us_bc_path) BC.index = BC[BC.columns[0]].apply(convertdate) BC = BC.drop(BC.columns[0], axis=1) @@ -670,14 +674,18 @@ def convertdate(date): ) def readSubDailyResults( - self, start: str, end: str, fmt: str = "%Y-%m-%d", Lastsegment: bool = False + self, + start: str, + end: str, + fmt: str = "%Y-%m-%d", + last_river_reach: bool = False, ): """ReadSubDailyResults. Read Reach-Daily Results Read1DResults1Min method is used by the sub sub-class, so most of the - parameters (xsname,...) are assigned to values after reading results + parameters (xs_names,...) are assigned to values after reading results with other methods in the sub class version 4 @@ -691,7 +699,7 @@ def readSubDailyResults( DESCRIPTION. fmt: [string] format of the date. fmt="%Y-%m-%d %H:%M:%S" - Lastsegment : [bool] + last_river_reach : [bool] Returns ------- @@ -700,10 +708,10 @@ def readSubDailyResults( columns are the cross-section ids. """ if self.version == 4: - assert self.crosssections, "please read the cross sections first" + assert self.cross_sections, "please read the cross sections first" - assert isinstance(self.usbcpath, str), ( - "please input the 'usbcpath' attribute in " + assert isinstance(self.us_bc_path, str), ( + "please input the 'us_bc_path' attribute in " "the River or the Reach instance" ) @@ -725,7 +733,7 @@ def readSubDailyResults( bc_q = pd.DataFrame(index=index_daily, columns=list(range(1, nstep + 1))) bc_h = pd.DataFrame(index=index_daily, columns=list(range(1, nstep + 1))) - xsname = [int(i) for i in self.xsname] + xsname = [int(i) for i in self.xs_names] h = pd.DataFrame(index=indmin, columns=xsname) q = pd.DataFrame(index=indmin, columns=xsname) @@ -737,7 +745,7 @@ def readSubDailyResults( # read results for each day for i in list2: path = ( - self.oneminresultpath + self.one_min_result_path + "{0}/" + str(self.id) + "-{0}-" @@ -748,11 +756,11 @@ def readSubDailyResults( logger.debug(path.format("h") + "- file is read") qq = np.transpose(np.loadtxt(path.format("q"), dtype=np.float16)) logger.debug(path.format("q") + " file is read") - if not Lastsegment: + if not last_river_reach: hh = hh[:, :-1] qq = qq[:, :-1] # add the bed level to the water depth - hh = hh + self.crosssections["gl"].values + hh = hh + self.cross_sections["gl"].values # assign the sub-daily results in the big dataframe ind1 = h.index[(i - list2[0]) * nstep] ind2 = h.index[(i - list2[0]) * nstep + nstep - 1] @@ -761,7 +769,7 @@ def readSubDailyResults( # BC bc = np.loadtxt( - self.usbcpath + str(self.id) + "-" + str(i) + ".txt", + self.us_bc_path + str(self.id) + "-" + str(i) + ".txt", dtype=np.float16, ) bc_q.loc[bc_q.index[i - list2[0]], :] = bc[:, 0] @@ -769,16 +777,20 @@ def readSubDailyResults( self.h = h[:] self.q = q[:] - self.QBCmin = bc_q[:] - self.HBCmin = bc_h[:] + self.q_bc_1min = bc_q[:] + self.h_bc_1min = bc_h[:] else: for i in list2: - path = f"{self.oneminresultpath}H-{str(self.indexToDate(i))[:10]}.csv" + path = ( + f"{self.one_min_result_path}H-{str(self.indexToDate(i))[:10]}.csv" + ) hh = np.transpose(np.loadtxt(path, delimiter=",", dtype=np.float16)) - path = f"{self.oneminresultpath}Q-{str(self.indexToDate(i))[:10]}.csv" + path = ( + f"{self.one_min_result_path}Q-{str(self.indexToDate(i))[:10]}.csv" + ) qq = np.transpose(np.loadtxt(path, delimiter=",", dtype=np.float16)) - h = h + self.crosssections["bed level"].values + h = h + self.cross_sections["bed level"].values ind1 = h.index[(i - list2[0]) * len(indmin)] ind2 = h.index[(i - list2[0]) * len(indmin) + len(indmin) - 1] h.loc[ind1:ind2] = hh @@ -790,24 +802,24 @@ def readSubDailyResults( # check the first day in the results and get the date of the first day and last day # create time series # TODO to be checked later now for testing - self.from_beginning = 1 # self.Result1D['day'][0] + self.from_beginning = 1 # self.results_1d['day'][0] - self.firstday = self.indexToDate(self.from_beginning) + self.first_day = self.indexToDate(self.from_beginning) # if there are empty days at the beginning the filling missing days is not going to detect it # so ignore it here by starting from the first day in the data (data['day'][0]) dataframe # empty days at the beginning - self.firstdayresults = self.indexToDate(self.from_beginning) - self.lastday = self.indexToDate(len(self.referenceindex)) + self.first_day_results = self.indexToDate(self.from_beginning) + self.last_day = self.indexToDate(len(self.reference_index)) # last days+1 as range does not include the last element - self.daylist = list(range(self.from_beginning, len(self.referenceindex))) - self.referenceindex_results = pd.date_range( - self.firstdayresults, self.lastday, freq="D" + self.days_list = list(range(self.from_beginning, len(self.reference_index))) + self.reference_index_results = pd.date_range( + self.first_day_results, self.last_day, freq="D" ) @staticmethod def _read_chuncks(path, chunksize=10e5): - """read csv file in chuncks. + """Read csv file in chuncks. Parameters ---------- @@ -844,8 +856,8 @@ def _read_chuncks(path, chunksize=10e5): def read1DResult( self, Subid: int, - fromday: Optional[int] = None, - today: Optional[int] = None, + from_day: Optional[int] = None, + to_day: Optional[int] = None, path: str = None, fill_missing: bool = False, chunk_size: int = None, @@ -861,10 +873,10 @@ def read1DResult( ---------- Subid : [integer] id of the sub-basin you want to read its data. - fromday : [integer], optional + from_day : [integer], optional the index of the day you want the data to start from. The default is empty. means read everything - today : [integer], optional + to_day : [integer], optional the index of the day you want the data to end to. The default is empty. means read everything path : [String], optional @@ -878,14 +890,14 @@ def read1DResult( Returns ------- - Result1D : [attribute] + results_1d : [attribute] the results read will be stored (as it is without any filter) - in the attribute "Result1D" + in the attribute "results_1d" """ - # if the path is not given try to read from the object predefined onedresultpath + # if the path is not given try to read from the object predefined one_d_result_path t1 = dt.datetime.now() - if not path: - path = self.onedresultpath + if path is None: + path = self.one_d_result_path path = os.path.join(path, f"{Subid}{extension}") @@ -896,6 +908,7 @@ def read1DResult( delimiter=r"\s+", index_col=False, compression="infer", + # engine="pyarrow" ) else: # read the file in chunks @@ -905,27 +918,27 @@ def read1DResult( days = list(set(data["day"])) days.sort() - if fromday: - if fromday not in days: + if from_day: + if from_day not in days: raise ValueError( - f"Please use the GetDays method to select fromday:{fromday} that exist in the data" + f"Please use the GetDays method to select from_day:{from_day} that exist in the data" ) - if today: - if today not in days: + if to_day: + if to_day not in days: raise ValueError( - f"please use the GetDays method to select today: {today} that exist in the data" + f"please use the GetDays method to select to_day: {to_day} that exist in the data" ) - if fromday: - data = data.loc[data["day"] >= fromday, :] + if from_day: + data = data.loc[data["day"] >= from_day, :] - if today: - data = data.loc[data["day"] <= today] + if to_day: + data = data.loc[data["day"] <= to_day] # data.index = list(range(0, len(data))) # Cross section data add one more xs at the end - xsname = self.xsname + [self.xsname[-1] + 1] + xsname = self.xs_names + [self.xs_names[-1] + 1] # data["xs"][data["day"] == data["day"][1]][data["hour"] == 1].tolist() if fill_missing: @@ -972,8 +985,8 @@ def read1DResult( # calculate time and print it t2 = dt.datetime.now() time_min = (t2 - t1).seconds / 60 - print(time_min) - self.Result1D = data + print(f"Time taken to read the file: {time_min:0.3f} min") + self.results_1d = data @staticmethod def collect1DResults( @@ -995,32 +1008,32 @@ def collect1DResults( Parameters ---------- - 1-path : [String] - path to the folder containing the separated folder. - 2-FolderNames : [List] - list containing folder names. - 3-Left : [Bool] - True if you want to combine left overtopping files. - 4-Right : [Bool] - True if you want to combine right overtopping files. - 5-Savepath : [String] - path to the folder where data will be saved. - 6-OneD : [Bool] - True if you want to combine 1D result files. - 7-fromf : [Integer], optional - if the files are very big and the cache memory has a problem - reading all the files you can specify here the order of the file - the code will start from to combine. The default is ''. - 8-tof : [Integer], optional - if the files are very big and the cache memory has a problem - reading all the files you can specify here the order of the file - the code will end to combine. The default is ''. - 9-FilterbyName : [Bool], optional - if the results include a wanm up period at the beginning - or has results for some days at the end you want to filter out - you want to include the period you want to be combined only - in the name of the folder between () and separated with - - ex 1d(5000-80000). The default is False. + path : [String] + path to the folder containing the separated folder. + FolderNames : [List] + list containing folder names. + Left : [Bool] + True if you want to combine left overtopping files. + Right : [Bool] + True if you want to combine right overtopping files. + Savepath : [String] + path to the folder where data will be saved. + OneD : [Bool] + True if you want to combine 1D result files. + fromf : [Integer], optional + if the files are very big and the cache memory has a problem + reading all the files you can specify here the order of the file + the code will start from to combine. The default is ''. + tof : [Integer], optional + if the files are very big and the cache memory has a problem + reading all the files you can specify here the order of the file + the code will end to combine. The default is ''. + FilterbyName : [Bool], optional + if the results include a wanm up period at the beginning + or has results for some days at the end you want to filter out + you want to include the period you want to be combined only + in the name of the folder between () and separated with - + ex 1d(5000-80000). The default is False. Returns ------- @@ -1077,7 +1090,7 @@ def collect1DResults( # read the file try: temp_df = pd.read_csv( - path + "/" + FolderNames[i] + "/" + FileList[j], + f"{path}/{FolderNames[i]}/{FileList[j]}", header=None, delimiter=r"\s+", ) @@ -1105,28 +1118,28 @@ def collect1DResults( if var.endswith("_left"): # put the dataframe in order first exec(var + ".sort_values(by=[0,1,2],ascending = True, inplace = True)") - path = Savepath + "/" + var[1:] + ".txt" + path = f"{Savepath}/{var[1:]}.txt" exec(var + ".to_csv(path ,index= None, sep = ' ', header = None)") elif var.endswith("_right"): # put the dataframe in order first exec(var + ".sort_values(by=[0,1,2],ascending = True, inplace = True)") - path = Savepath + "/" + var[1:] + ".txt" + path = f"{Savepath}/{var[1:]}.txt" exec(var + ".to_csv(path ,index= None, sep = ' ', header = None)") elif var.startswith("one"): # put the dataframe in order first exec(var + ".sort_values(by=[0,1,2],ascending = True, inplace = True)") - logger.debug("Saving " + var[3:] + ".txt") - path = Savepath + "/" + var[3:] + ".txt" + logger.debug(f"Saving {var[3:]}.txt") + path = f"{Savepath}/{var[3:]}.txt" exec(var + ".to_csv(path ,index= None, sep = ' ', header = None)") @staticmethod def _readRRMResults( version: int, - rrmreferenceindex, + rrm_reference_index, path: str, - nodeid: Union[int, str], - fromday: int, - today: int, + node_id: Union[int, str], + from_day: int, + to_day: int, date_format: str = "%d_%m_%Y", ) -> DataFrame: """ReadRRMResults. @@ -1138,15 +1151,15 @@ def _readRRMResults( ---------- version: [] - rrmreferenceindex: [] + rrm_reference_index: [] path : [String] path to the result files. - nodeid : [Integer] + node_id : [Integer] the id given the the sub-basin . - fromday : [integer], optional + from_day : [integer], optional the day you want to read the result from, the first day is 1 not zero.The default is ''. - today : [integer], optional + to_day : [integer], optional the day you want to read the result to. date_format: [str] format of the date string @@ -1155,19 +1168,19 @@ def _readRRMResults( Q : [Dataframe] time series of the runoff . """ - rpath = os.path.join(path, f"{nodeid}.txt") + rpath = os.path.join(path, f"{node_id}.txt") if version < 3: Q = pd.read_csv(rpath, header=None) - Q = Q.rename(columns={0: nodeid}) + Q = Q.rename(columns={0: node_id}) Q.index = list(range(1, len(Q) + 1)) - if not fromday: - fromday = 1 - if not today: - today = len(Q) + if not from_day: + from_day = 1 + if not to_day: + to_day = len(Q) - Q = Q.loc[Q.index >= fromday, :] - Q = Q.loc[Q.index <= today] + Q = Q.loc[Q.index >= from_day, :] + Q = Q.loc[Q.index <= to_day] else: Q = pd.read_csv(rpath, header=None, skiprows=1) @@ -1175,20 +1188,20 @@ def _readRRMResults( del Q[0] # convert the date into integer index - s = np.where(rrmreferenceindex["date"] == Q.index[0])[0][0] + 1 - e = np.where(rrmreferenceindex["date"] == Q.index[-1])[0][0] + 1 + s = np.where(rrm_reference_index["date"] == Q.index[0])[0][0] + 1 + e = np.where(rrm_reference_index["date"] == Q.index[-1])[0][0] + 1 Q.index = list(range(s, e + 1)) - if not fromday: - fromday = s - if not today: - today = e + if not from_day: + from_day = s + if not to_day: + to_day = e - Q = Q.loc[Q.index >= fromday, :] - Q = Q.loc[Q.index <= today, :] + Q = Q.loc[Q.index >= from_day, :] + Q = Q.loc[Q.index <= to_day, :] Q = Q[1].to_frame() - Q = Q.rename(columns={1: nodeid}) + Q = Q.rename(columns={1: node_id}) return Q @@ -1223,20 +1236,20 @@ def kinematicwave(self, start: str = "", end: str = "", fmt: str = "%Y-%m-%d"): # TODO to be checked later now for testing # self.from_beginning = self.indsub[np.where(self.indsub == start)[0][0]] - self.firstday = self.indsub[np.where(self.indsub == start)[0][0]] + self.first_day = self.indsub[np.where(self.indsub == start)[0][0]] # if there are empty days at the beginning the filling missing days is not going to detect it # so ignore it here by starting from the first day in the data (data['day'][0]) dataframe # empty days at the beginning - # self.firstdayresults = self.indsub[np.where(self.indsub == start)[0][0]] - self.lastday = self.indsub[np.where(self.indsub == end)[0][0]] + # self.first_day_results = self.indsub[np.where(self.indsub == start)[0][0]] + self.last_day = self.indsub[np.where(self.indsub == end)[0][0]] # last days+1 as range does not include the last element - # self.daylist = list(range(self.from_beginning, len(self.referenceindex))) - self.referenceindex_results = pd.date_range( - self.firstday, self.lastday, freq=self.freq + # self.days_list = list(range(self.from_beginning, len(self.reference_index))) + self.reference_index_results = pd.date_range( + self.first_day, self.last_day, freq=self.freq ) - usbc = self.usbc.loc[self.referenceindex_results, :] + usbc = self.usbc.loc[self.reference_index_results, :] SaintVenant.kinematic1d(self, usbc) def preissmann( @@ -1290,21 +1303,21 @@ def preissmann( # TODO to be checked later now for testing # self.from_beginning = self.indsub[np.where(self.indsub == start)[0][0]] - self.firstday = self.indsub[np.where(self.indsub == start)[0][0]] + self.first_day = self.indsub[np.where(self.indsub == start)[0][0]] # if there are empty days at the beginning the filling missing days is not going to detect it # so ignore it here by starting from the first day in the data (data['day'][0]) dataframe # empty days at the beginning - # self.firstdayresults = self.indsub[np.where(self.indsub == start)[0][0]] - self.lastday = self.indsub[np.where(self.indsub == end)[0][0]] + # self.first_day_results = self.indsub[np.where(self.indsub == start)[0][0]] + self.last_day = self.indsub[np.where(self.indsub == end)[0][0]] # last days+1 as range does not include the last element - # self.daylist = list(range(self.from_beginning, len(self.referenceindex))) - self.referenceindex_results = pd.date_range( - self.firstday, self.lastday, freq=self.freq + # self.days_list = list(range(self.from_beginning, len(self.reference_index))) + self.reference_index_results = pd.date_range( + self.first_day, self.last_day, freq=self.freq ) - # usbc = self.qusbc.loc[self.referenceindex_results,:] - # dsbc = self.qusbc.loc[self.referenceindex_results, :] + # usbc = self.qusbc.loc[self.reference_index_results,:] + # dsbc = self.qusbc.loc[self.reference_index_results, :] saintpreis = SaintVenant( maxiteration=maxiteration, beta=beta, epsi=epsi, theta=theta ) @@ -1340,20 +1353,20 @@ def storagecell(self, start: str = "", end: str = "", fmt: str = "%Y-%m-%d"): # TODO to be checked later now for testing # self.from_beginning = self.indsub[np.where(self.indsub == start)[0][0]] - self.firstday = self.indsub[np.where(self.indsub == start)[0][0]] + self.first_day = self.indsub[np.where(self.indsub == start)[0][0]] # if there are empty days at the beginning the filling missing days is not going to detect it # so ignore it here by starting from the first day in the data (data['day'][0]) dataframe # empty days at the beginning - # self.firstdayresults = self.indsub[np.where(self.indsub == start)[0][0]] - self.lastday = self.indsub[np.where(self.indsub == end)[0][0]] + # self.first_day_results = self.indsub[np.where(self.indsub == start)[0][0]] + self.last_day = self.indsub[np.where(self.indsub == end)[0][0]] # last days+1 as range does not include the last element - # self.daylist = list(range(self.from_beginning, len(self.referenceindex))) - self.referenceindex_results = pd.date_range( - self.firstday, self.lastday, freq=self.freq + # self.days_list = list(range(self.from_beginning, len(self.reference_index))) + self.reference_index_results = pd.date_range( + self.first_day, self.last_day, freq=self.freq ) - usbc = self.usbc.loc[self.referenceindex_results, :] + usbc = self.usbc.loc[self.reference_index_results, :] SaintVenant.storagecell(self, usbc) def animatefloodwave( @@ -1369,7 +1382,7 @@ def animatefloodwave( xaxislabelsize: float = 15, yaxislabelsize: float = 15, nxlabels: float = 50, - # plotbanhfuldepth=False, + # plot_bankfull_depth=False, ): """animatefloodwave. @@ -1410,13 +1423,13 @@ def animatefloodwave( end, interval=interval, xs=xs, - xsbefore=xsbefore, - xsafter=xsafter, + xs_before=xsbefore, + xs_after=xsafter, fmt=fmt, - textlocation=textlocation, - xaxislabelsize=xaxislabelsize, - yaxislabelsize=yaxislabelsize, - nxlabels=nxlabels, + text_location=textlocation, + x_axis_label_size=xaxislabelsize, + y_axis_label_size=yaxislabelsize, + xlabels_number=nxlabels, ) return anim @@ -1434,8 +1447,8 @@ def saveResult(self, path: str): # , fmt="%.3f"): ------- None. """ - for i in range(len(self.referenceindex)): - name = str(self.referenceindex.loc[self.referenceindex.index[i], "date"])[ + for i in range(len(self.reference_index)): + name = str(self.reference_index.loc[self.reference_index.index[i], "date"])[ :10 ] # space is rows , time is columns @@ -1467,7 +1480,7 @@ def readSlope(self, path: str): Returns ------- slope: [DataFrame] - dataframe of the boundary condition segments that has slope + dataframe of the boundary condition reaches that has slope """ self.slope = pd.read_csv(path, delimiter=",", header=None, skiprows=1) self.slope.columns = ["id", "slope"] @@ -1603,7 +1616,7 @@ def trace(self, sub_id): Returns ------- US : [list attribute], optional - the id of all the upstream segments are going to be stored in a list + the id of all the upstream reaches are going to be stored in a list attribute. """ self.US = [] @@ -1813,8 +1826,8 @@ def getBankfullDepth(self, function, ColumnName): dataframe column in the cross section attribute with the calculated depth. """ - self.crosssections[ColumnName] = ( - self.crosssections["b"].to_frame().applymap(function) + self.cross_sections[ColumnName] = ( + self.cross_sections["b"].to_frame().applymap(function) ) def getCapacity(self, ColumnName: str, Option: int = 1, distribution: str = "GEV"): @@ -1842,10 +1855,10 @@ def getCapacity(self, ColumnName: str, Option: int = 1, distribution: str = "GEV Returns ------- - the crosssections dataframe will be updated with the following columns. + the cross_sections dataframe will be updated with the following columns. Discharge: [dataframe column] - the calculated discharge will be stored in the crosssections + the calculated discharge will be stored in the cross_sections attribute in the River object in a columns with the given ColumnName ColumnName+"RP":[dataframe column] if you already rad the statistical properties another column containing @@ -1854,70 +1867,70 @@ def getCapacity(self, ColumnName: str, Option: int = 1, distribution: str = "GEV the given ColumnName+"RP", if the ColumnName was QC then the discharge will be in a Qc columns and the return period will be in QcRP column """ - for i in range(len(self.crosssections) - 1): + for i in range(len(self.cross_sections) - 1): # get the slope - if self.crosssections.loc[i, "id"] == self.crosssections.loc[i + 1, "id"]: + if self.cross_sections.loc[i, "id"] == self.cross_sections.loc[i + 1, "id"]: slope = ( - self.crosssections.loc[i, "gl"] - - self.crosssections.loc[i + 1, "gl"] + self.cross_sections.loc[i, "gl"] + - self.cross_sections.loc[i + 1, "gl"] ) / self.dx else: slope = ( abs( - self.crosssections.loc[i, "gl"] - - self.crosssections.loc[i - 1, "gl"] + self.cross_sections.loc[i, "gl"] + - self.cross_sections.loc[i - 1, "gl"] ) / self.dx ) - self.crosssections.loc[i, "Slope"] = slope + self.cross_sections.loc[i, "Slope"] = slope if Option == 1: # bankfull area - self.crosssections.loc[i, ColumnName] = ( - (1 / self.crosssections.loc[i, "m"]) - * self.crosssections.loc[i, "b"] - * (self.crosssections.loc[i, "dbf"]) ** (5 / 3) + self.cross_sections.loc[i, ColumnName] = ( + (1 / self.cross_sections.loc[i, "m"]) + * self.cross_sections.loc[i, "b"] + * (self.cross_sections.loc[i, "dbf"]) ** (5 / 3) ) - self.crosssections.loc[i, ColumnName] = self.crosssections.loc[ + self.cross_sections.loc[i, ColumnName] = self.cross_sections.loc[ i, ColumnName ] * slope ** (1 / 2) else: # lowest dike # get the vortices of the cross sections - H = self.crosssections.loc[i, ["zl", "zr"]].min() - Hl, Hr, Bl, Br, B, dbf = self.crosssections.loc[ + H = self.cross_sections.loc[i, ["zl", "zr"]].min() + Hl, Hr, Bl, Br, B, dbf = self.cross_sections.loc[ i, ["hl", "hr", "bl", "br", "b", "dbf"] ].tolist() - BedLevel = self.crosssections.loc[i, "gl"] + BedLevel = self.cross_sections.loc[i, "gl"] Coords = self.getVortices(H - BedLevel, Hl, Hr, Bl, Br, B, dbf) # get the area and perimeters Area, Perimeter = self.polygonGeometry(Coords) - # self.crosssections.loc[i,'Area'] = Area - # self.crosssections.loc[i,'Perimeter'] = Perimeter - self.crosssections.loc[i, ColumnName] = ( - (1 / self.crosssections.loc[i, "m"]) + # self.cross_sections.loc[i,'Area'] = Area + # self.cross_sections.loc[i,'Perimeter'] = Perimeter + self.cross_sections.loc[i, ColumnName] = ( + (1 / self.cross_sections.loc[i, "m"]) * Area * ((Area / Perimeter) ** (2 / 3)) ) - self.crosssections.loc[i, ColumnName] = self.crosssections.loc[ + self.cross_sections.loc[i, ColumnName] = self.cross_sections.loc[ i, ColumnName ] * slope ** (1 / 2) if isinstance(self.SP, DataFrame): - if "gauge" not in self.crosssections.columns.tolist(): + if "gauge" not in self.cross_sections.columns.tolist(): raise ValueError( "To calculate the return period for each cross-section a column with " "the coresponding gauge id should be in the cross-section file" ) RP = self.getReturnPeriod( - self.crosssections.loc[i, "gauge"], - self.crosssections.loc[i, ColumnName], + self.cross_sections.loc[i, "gauge"], + self.cross_sections.loc[i, ColumnName], distribution=distribution, ) if np.isnan(RP): RP = -1 - self.crosssections.loc[i, ColumnName + "RP"] = round(RP, 2) + self.cross_sections.loc[i, ColumnName + "RP"] = round(RP, 2) def calibrateDike(self, ObjectiveRP: Union[str, int], CurrentRP: Union[str, int]): """CalibrateDike. @@ -1946,99 +1959,99 @@ def calibrateDike(self, ObjectiveRP: Union[str, int], CurrentRP: Union[str, int] "Please read the statistical properties file first using StatisticalProperties method" ) - if not isinstance(self.crosssections, DataFrame): + if not isinstance(self.cross_sections, DataFrame): raise TypeError( "please read the cross section data first with the method CrossSections" ) - if CurrentRP not in self.crosssections.columns: + if CurrentRP not in self.cross_sections.columns: raise ValueError( f"{CurrentRP} in not in the cross section data please use GetCapacity method to " f"calculate the current return perion" ) - if ObjectiveRP not in self.crosssections.columns: + if ObjectiveRP not in self.cross_sections.columns: raise ValueError( f"{ObjectiveRP} in not in the cross section data please create a column in the cross " "section data containing the objective return period" ) - self.crosssections.loc[:, "zlnew"] = self.crosssections.loc[:, "zl"] - self.crosssections.loc[:, "zrnew"] = self.crosssections.loc[:, "zr"] + self.cross_sections.loc[:, "zlnew"] = self.cross_sections.loc[:, "zl"] + self.cross_sections.loc[:, "zrnew"] = self.cross_sections.loc[:, "zr"] - for i in range(len(self.crosssections) - 2): + for i in range(len(self.cross_sections) - 2): - if self.crosssections.loc[i, "id"] == self.crosssections.loc[i + 1, "id"]: + if self.cross_sections.loc[i, "id"] == self.cross_sections.loc[i + 1, "id"]: slope = ( - self.crosssections.loc[i, "gl"] - - self.crosssections.loc[i + 1, "gl"] + self.cross_sections.loc[i, "gl"] + - self.cross_sections.loc[i + 1, "gl"] ) / self.dx else: slope = ( abs( - self.crosssections.loc[i, "gl"] - - self.crosssections.loc[i - 1, "gl"] + self.cross_sections.loc[i, "gl"] + - self.cross_sections.loc[i - 1, "gl"] ) / self.dx ) - # self.crosssections.loc[i,'Slope'] = slope - Hl, Hr, Bl, Br, B, dbf = self.crosssections.loc[ + # self.cross_sections.loc[i,'Slope'] = slope + Hl, Hr, Bl, Br, B, dbf = self.cross_sections.loc[ i, ["hl", "hr", "bl", "br", "b", "dbf"] ].tolist() - BedLevel = self.crosssections.loc[i, "gl"] + BedLevel = self.cross_sections.loc[i, "gl"] # compare the current return period with the desired return period. if ( - self.crosssections.loc[i, CurrentRP] - < self.crosssections.loc[i, ObjectiveRP] - and self.crosssections.loc[i, CurrentRP] != -1 + self.cross_sections.loc[i, CurrentRP] + < self.cross_sections.loc[i, ObjectiveRP] + and self.cross_sections.loc[i, CurrentRP] != -1 ): - logger.debug("XS-" + str(self.crosssections.loc[i, "xsid"])) - logger.debug("Old RP = " + str(self.crosssections.loc[i, CurrentRP])) + logger.debug("XS-" + str(self.cross_sections.loc[i, "xsid"])) + logger.debug("Old RP = " + str(self.cross_sections.loc[i, CurrentRP])) logger.debug( - "Old H = " + str(self.crosssections.loc[i, ["zl", "zr"]].min()) + "Old H = " + str(self.cross_sections.loc[i, ["zl", "zr"]].min()) ) - self.crosssections.loc[i, "New RP"] = self.crosssections.loc[ + self.cross_sections.loc[i, "New RP"] = self.cross_sections.loc[ i, CurrentRP ] while ( - self.crosssections.loc[i, "New RP"] - < self.crosssections.loc[i, ObjectiveRP] + self.cross_sections.loc[i, "New RP"] + < self.cross_sections.loc[i, ObjectiveRP] ): # get the vortices of the cross sections if ( - self.crosssections.loc[i, "zlnew"] - < self.crosssections.loc[i, "zrnew"] + self.cross_sections.loc[i, "zlnew"] + < self.cross_sections.loc[i, "zrnew"] ): - self.crosssections.loc[i, "zlnew"] = ( - self.crosssections.loc[i, "zlnew"] + 0.1 + self.cross_sections.loc[i, "zlnew"] = ( + self.cross_sections.loc[i, "zlnew"] + 0.1 ) else: - self.crosssections.loc[i, "zrnew"] = ( - self.crosssections.loc[i, "zrnew"] + 0.1 + self.cross_sections.loc[i, "zrnew"] = ( + self.cross_sections.loc[i, "zrnew"] + 0.1 ) - H = self.crosssections.loc[i, ["zlnew", "zrnew"]].min() + H = self.cross_sections.loc[i, ["zlnew", "zrnew"]].min() Coords = self.getVortices(H - BedLevel, Hl, Hr, Bl, Br, B, dbf) # get the area and perimeters Area, Perimeter = self.polygonGeometry(Coords) - self.crosssections.loc[i, "New Capacity"] = ( - (1 / self.crosssections.loc[i, "m"]) + self.cross_sections.loc[i, "New Capacity"] = ( + (1 / self.cross_sections.loc[i, "m"]) * Area * ((Area / Perimeter) ** (2 / 3)) ) - self.crosssections.loc[i, "New Capacity"] = self.crosssections.loc[ + self.cross_sections.loc[ i, "New Capacity" - ] * slope ** (1 / 2) + ] = self.cross_sections.loc[i, "New Capacity"] * slope ** (1 / 2) RP = self.getReturnPeriod( - self.crosssections.loc[i, "gauge"], - self.crosssections.loc[i, "New Capacity"], + self.cross_sections.loc[i, "gauge"], + self.cross_sections.loc[i, "New Capacity"], ) - self.crosssections.loc[i, "New RP"] = round(RP, 2) + self.cross_sections.loc[i, "New RP"] = round(RP, 2) logger.info(f"New RP = {round(RP, 2)}") logger.info(f"New H = {round(H, 2)}") @@ -2151,7 +2164,7 @@ def getOvertoppedXS(self, day, allEventdays=True): Overtopping), since inudation maps gets the max depth for the whole event the method can also trace the event back to the beginning and get all the overtopped XS from the beginning of the Event till the given day - (you have to give the River object the EventIndex attribute from the + (you have to give the River object the event_index attribute from the Event Object) Parameters @@ -2178,10 +2191,10 @@ def getOvertoppedXS(self, day, allEventdays=True): 2- from the beginning of the event till the given day RIM2River = RV.River('RIM2.0') RIM2River.Overtopping(wpath2 + "/results/1d/") - # read precreated EventIndex table - RIM2Event.ReadEventIndex(wpath2 + "/" + "EventIndex.txt") - # give the EventIndex table to the River Object - RIM2River.EventIndex = RIM1.EventIndex + # read precreated event_index table + RIM2Event.ReadEventIndex(wpath2 + "/" + "event_index.txt") + # give the event_index table to the River Object + RIM2River.event_index = RIM1.event_index day = 1122 XSleft, XSright = RIM2River.GetOvertoppedXS(day,False) """ @@ -2236,40 +2249,40 @@ def getSubBasin(self, xsid): [Integer] sub-basin id. """ - loc = np.where(self.crosssections["xsid"] == xsid)[0][0] - return self.crosssections.loc[loc, "id"] + loc = np.where(self.cross_sections["xsid"] == xsid)[0][0] + return self.cross_sections.loc[loc, "id"] def getFloodedSubs(self, OvertoppedXS=[], day=[1], allEventdays=True): """GetFloodedSubs. - GetFloodedSubs gets the inundeated sub-basins + GetFloodedSubs gets the inundeated sub-basins Parameters ---------- - 1-OvertoppedXS : [list], optional - list of cross sections overtopped (if you already used the GetOvertoppedXS - method to get the overtopped XSs for a specific day).The default is []. - If entered the algorithm is not going to look at the over arguments - of the method. - 2-day : [list], optional - if you want to get the flooded subs for a specific list of days. The default is 1. - 3-allEventdays : [Bool], optional in case user entered OvertoppedXS - if the user entered day the allEventdays is a must. The default is True. + OvertoppedXS : [list], optional + list of cross sections overtopped (if you already used the GetOvertoppedXS + method to get the overtopped XSs for a specific day).The default is []. + If entered the algorithm is not going to look at the over arguments + of the method. + day : [list], optional + if you want to get the flooded subs for a specific list of days. The default is 1. + allEventdays : [Bool], optional in case user entered OvertoppedXS + if the user entered day the allEventdays is a must. The default is True. Returns ------- - 1-Subs : TYPE - DESCRIPTION. + Subs : TYPE + DESCRIPTION. - examples + Examples -------- - 1- get the flooded subs for a specific days - floodedSubs = RIM1River.GetFloodedSubs(day = [1122,1123], allEventdays=False) + - get the flooded subs for a specific days + >>> floodedSubs = River.GetFloodedSubs(day = [1122,1123], allEventdays=False) - 2- get the flooded subs from already obtained overtopped XSs - day = 1122 - XSleft, XSright = RIM1River.GetOvertoppedXS(day,False) - floodedSubs = RIM1River.GetFloodedSubs(OvertoppedXS = XSleft + XSright, allEventdays=False) + - get the flooded subs from already obtained overtopped XSs + >>> day = 1122 + >>> XSleft, XSright = River.GetOvertoppedXS(day,False) + >>> floodedSubs = River.GetFloodedSubs(OvertoppedXS = XSleft + XSright, allEventdays=False) """ Subs = list() # if you already used the GetOvertoppedXS and have a list of xs overtopped @@ -2307,19 +2320,19 @@ def detailedOvertopping(self, floodedSubs, eventdays): Returns ------- - 1-DetailedOvertoppingLeft : [dataframe attribute] + 1-detailed_overtopping_left : [dataframe attribute] dataframe having for each day of the event the left overtopping to each sub-basin. - 2-DetailedOvertoppingRight : [dataframe attribute] + 2-detailed_overtopping_right : [dataframe attribute] dataframe having for each day of the event the right overtopping to each sub-basin. """ columns = floodedSubs + ["sum"] - self.DetailedOvertoppingLeft = pd.DataFrame( + self.detailed_overtopping_left = pd.DataFrame( index=eventdays + ["sum"], columns=columns ) - self.DetailedOvertoppingRight = pd.DataFrame( + self.detailed_overtopping_right = pd.DataFrame( index=eventdays + ["sum"], columns=columns ) @@ -2339,15 +2352,15 @@ def detailedOvertopping(self, floodedSubs, eventdays): # check whether this sub basin has flooded in this particular day if eventdays[j] in days: # filter the dataframe to the discharge column (3) and the days - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ eventdays[j], floodedSubs[i] ] = data.loc[data[0] == eventdays[j], 3].sum() else: - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ eventdays[j], floodedSubs[i] ] = 0 except: - self.DetailedOvertoppingLeft.loc[:, floodedSubs[i]] = 0 + self.detailed_overtopping_left.loc[:, floodedSubs[i]] = 0 continue # right Bank @@ -2366,36 +2379,36 @@ def detailedOvertopping(self, floodedSubs, eventdays): # check whether this sub basin has flooded in this particular day if eventdays[j] in days: # filter the dataframe to the discharge column (3) and the days - self.DetailedOvertoppingRight.loc[ + self.detailed_overtopping_right.loc[ eventdays[j], floodedSubs[i] ] = data.loc[data[0] == eventdays[j], 3].sum() else: - self.DetailedOvertoppingRight.loc[ + self.detailed_overtopping_right.loc[ eventdays[j], floodedSubs[i] ] = 0 except: - self.DetailedOvertoppingRight.loc[eventdays[j], floodedSubs[i]] = 0 + self.detailed_overtopping_right.loc[eventdays[j], floodedSubs[i]] = 0 continue # sum overtopping for each day for j in range(len(eventdays)): - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ eventdays[j], "sum" - ] = self.DetailedOvertoppingLeft.loc[eventdays[j], :].sum() - self.DetailedOvertoppingRight.loc[ + ] = self.detailed_overtopping_left.loc[eventdays[j], :].sum() + self.detailed_overtopping_right.loc[ eventdays[j], "sum" - ] = self.DetailedOvertoppingRight.loc[eventdays[j], :].sum() + ] = self.detailed_overtopping_right.loc[eventdays[j], :].sum() # sum overtopping for each sub basin for j in range(len(floodedSubs)): - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ "sum", floodedSubs[j] - ] = self.DetailedOvertoppingLeft.loc[:, floodedSubs[j]].sum() - self.DetailedOvertoppingRight.loc[ + ] = self.detailed_overtopping_left.loc[:, floodedSubs[j]].sum() + self.detailed_overtopping_right.loc[ "sum", floodedSubs[j] - ] = self.DetailedOvertoppingRight.loc[:, floodedSubs[j]].sum() + ] = self.detailed_overtopping_right.loc[:, floodedSubs[j]].sum() - # self.DetailedOvertoppingLeft.loc['sum','sum'] = self.DetailedOvertoppingLeft.loc[:,'sum'].sum() - # self.DetailedOvertoppingRight.loc['sum','sum'] = self.DetailedOvertoppingRight.loc[:,'sum'].sum() + # self.detailed_overtopping_left.loc['sum','sum'] = self.detailed_overtopping_left.loc[:,'sum'].sum() + # self.detailed_overtopping_right.loc['sum','sum'] = self.detailed_overtopping_right.loc[:,'sum'].sum() def coordinates(self, Bankful=False): """Coordinates. @@ -2413,7 +2426,7 @@ def coordinates(self, Bankful=False): 1-coordenates will be added to the "crosssection" attribute. """ if Bankful: - self.crosssections = self.crosssections.assign( + self.cross_sections = self.cross_sections.assign( x1=0, y1=0, z1=0, @@ -2440,27 +2453,27 @@ def coordinates(self, Bankful=False): z8=0, ) - for i in range(len(self.crosssections)): - inputs = self.crosssections.loc[ - i, list(self.crosssections.columns)[3:15] + for i in range(len(self.cross_sections)): + inputs = self.cross_sections.loc[ + i, list(self.cross_sections.columns)[3:15] ].tolist() - dbf = self.crosssections.loc[i, list(self.crosssections.columns)[16]] + dbf = self.cross_sections.loc[i, list(self.cross_sections.columns)[16]] outputs = self.getCoordinates(inputs, dbf) - self.crosssections.loc[ + self.cross_sections.loc[ i, ["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8"] ] = outputs[0] - self.crosssections.loc[ + self.cross_sections.loc[ i, ["y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"] ] = outputs[1] - self.crosssections.loc[ + self.cross_sections.loc[ i, ["z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8"] ] = outputs[2] else: - self.crosssections = self.crosssections.assign( + self.cross_sections = self.cross_sections.assign( x1=0, y1=0, z1=0, @@ -2481,22 +2494,22 @@ def coordinates(self, Bankful=False): z6=0, ) dbf = False - for i in range(len(self.crosssections)): - inputs = self.crosssections.loc[ - i, list(self.crosssections.columns)[3:15] + for i in range(len(self.cross_sections)): + inputs = self.cross_sections.loc[ + i, list(self.cross_sections.columns)[3:15] ].tolist() outputs = self.getCoordinates(inputs, dbf) - self.crosssections.loc[ + self.cross_sections.loc[ i, ["x1", "x2", "x3", "x4", "x5", "x6"] ] = outputs[0] - self.crosssections.loc[ + self.cross_sections.loc[ i, ["y1", "y2", "y3", "y4", "y5", "y6"] ] = outputs[1] - self.crosssections.loc[ + self.cross_sections.loc[ i, ["z1", "z2", "z3", "z4", "z5", "z6"] ] = outputs[2] @@ -2927,7 +2940,7 @@ def getRatingCurve(self, MaxH=20, interval=0.02, dx=500): """ So = self.slope / dx # Rating Curve - geom = self.crosssections.loc[self.crosssections.index[0], :] + geom = self.cross_sections.loc[self.cross_sections.index[0], :] # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 "id,xsid,gl,zl,zr,hl,hr,bl,br,xl,yl,xr,yr,b,m,dbf" @@ -2985,20 +2998,20 @@ def getRatingCurve(self, MaxH=20, interval=0.02, dx=500): self.HQ = HQ[:, :] - def getDays(self, fromday: int, today: int): - """GetDays. + def getDays(self, from_day: int, to_day: int): + """getDays. GetDays method check if input days exist in the 1D result data or not since RIM1.0 simulate only days where discharge is above a certain value (2 years return period), you have to enter the - onedresultpath attribute of the instance first to read the results of + one_d_result_path attribute of the instance first to read the results of the given sub-basin Parameters ---------- - 1-fromday : [integer] + 1-from_day : [integer] the day you want to read the result from. - 2-today : [integer] + 2-to_day : [integer] the day you want to read the result to. Returns @@ -3009,19 +3022,18 @@ def getDays(self, fromday: int, today: int): day). """ data = pd.read_csv( - rf"{self.onedresultpath}\{self.id}.txt", header=None, delimiter=r"\s+" + rf"{self.one_d_result_path}\{self.id}.txt", header=None, delimiter=r"\s+" ) data.columns = ["day", "hour", "xs", "q", "h", "wl"] days = list(set(data["day"])) days.sort() - if fromday not in days: - Alt1 = fromday + if from_day not in days: + Alt1 = from_day stop = 0 - # search for the fromday in the days column + # search for the from_day in the days column while stop == 0: - # for i in range(0,10): try: np.where(data["day"] == Alt1)[0][0] # loc = stop = 1 @@ -3032,8 +3044,8 @@ def getDays(self, fromday: int, today: int): stop = 1 continue - Alt2 = fromday - # fromday = + Alt2 = from_day + # from_day = # search for closest later days stop = 0 while stop == 0: @@ -3050,7 +3062,7 @@ def getDays(self, fromday: int, today: int): text = ( """" - the fromday you entered does not exist in the data, and the closest day earlier than your input day is + the from_day you entered does not exist in the data, and the closest day earlier than your input day is """ + str(Alt1) + """ and the closest later day is """ @@ -3058,19 +3070,19 @@ def getDays(self, fromday: int, today: int): ) logger.debug(text) - if abs(Alt1 - fromday) > abs(Alt2 - fromday): + if abs(Alt1 - from_day) > abs(Alt2 - from_day): Alt1 = Alt2 else: - logger.debug("fromday you entered does exist in the data ") + logger.debug("from_day you entered does exist in the data ") # Alt1 = False - Alt1 = fromday + Alt1 = from_day - # if today does not exist in the results - if today not in days: - Alt3 = today + # if to_day does not exist in the results + if to_day not in days: + Alt3 = to_day stop = 0 - # search for the today in the days column + # search for the to_day in the days column while stop == 0: # for i in range(0,10): try: @@ -3083,8 +3095,8 @@ def getDays(self, fromday: int, today: int): stop = 1 continue - Alt4 = today - # fromday = + Alt4 = to_day + # from_day = # search for closest later days stop = 0 while stop == 0: @@ -3102,7 +3114,7 @@ def getDays(self, fromday: int, today: int): # Alt3 = [Alt3, Alt4] text = ( """" - the today you entered does not exist in the data, and the closest day earlier than your input day is + the to_day you entered does not exist in the data, and the closest day earlier than your input day is """ + str(Alt3) + """ and the closest later day is """ @@ -3110,13 +3122,13 @@ def getDays(self, fromday: int, today: int): ) logger.debug(text) - if abs(Alt3 - today) > abs(Alt4 - today): + if abs(Alt3 - to_day) > abs(Alt4 - to_day): Alt3 = Alt4 else: - logger.debug("today you entered does exist in the data ") + logger.debug("to_day you entered does exist in the data ") # Alt3 = False - Alt3 = today + Alt3 = to_day return Alt1, Alt3 @@ -3260,7 +3272,6 @@ def listAttributes(self): class Reach(River): - """Reach segment object. represent a segment of the river to create the Reach instance the @@ -3269,49 +3280,48 @@ class Reach(River): """ reach_attr = dict( - ExtractedValues=dict(), - XSHydrographs=None, - NegQmin=None, + extracted_values=dict(), + xs_hydrograph=None, + neg_qmin=None, Negative=None, - XSWaterLevel=None, - XSWaterDepth=None, + xs_water_level=None, + xs_water_depth=None, RRM=None, RRM2=None, - ResampledQ=None, - ResampledWL=None, - ResampledH=None, + resampled_q=None, + resampled_wl=None, + resampled_h=None, Qrp=None, - DetailedOvertoppingLeft=None, - DetailedOvertoppingRight=None, - AllOvertoppingVSXS=None, - AllOvertoppingVSTime=None, + detailed_overtopping_left=None, + detailed_overtopping_right=None, + all_overtopping_vs_xs=None, + all_overtopping_vs_time=None, BC=None, - AreaPerHigh=None, - AreaPerLow=None, - TotalFlow=None, - RRMProgression=None, - LateralsTable=None, + area_per_high=None, + area_per_Low=None, + total_flow=None, + rrm_progression=None, + laterals_table=None, Laterals=None, - Result1D=None, - USHydrographs=None, + results_1d=None, + us_hydrographs=None, ) @class_attr_initialize(reach_attr) def __init__(self, sub_id: int, River, run_model: bool = False, *args, **kwargs): - # super().__init__(*args, **kwargs) # initializa the attributes with the river attributes for key, val in River.__dict__.items(): setattr(self, key, val) self.id = sub_id - if not isinstance(River.crosssections, DataFrame): + if not isinstance(River.cross_sections, DataFrame): raise ValueError( "please Read the cross section for the whole river with 'ReadCrossSections' " "method before creating the sub-segment instance" ) # filter the whole cross section file and get the cross section of the segment - self.crosssections = River.crosssections[River.crosssections["id"] == sub_id] + self.cross_sections = River.cross_sections[River.cross_sections["id"] == sub_id] self._getXS(run_model=run_model) if isinstance(River.slope, DataFrame) and self.id in River.slope["id"].tolist(): @@ -3332,7 +3342,7 @@ def __init__(self, sub_id: int, River, run_model: bool = False, *args, **kwargs) self.SP.index = list(range(len(self.SP))) def _getXS(self, run_model: bool): - """get the cross sections of the current river reach. + """Get the cross sections of the current river reach. Parameters ---------- @@ -3341,41 +3351,41 @@ def _getXS(self, run_model: bool): Returns ------- - crosssections: [DataFrame] - Replaces the crosssections attributes in the reach object from the whole river cross sections + cross_sections: [DataFrame] + Replaces the cross_sections attributes in the reach object from the whole river cross sections to the cross section of the current reach only - lastxs: [int] + last_xs: [int] the id of the last cross section - firstxs: [int] + first_xs: [int] the id of the last cross section - xsname: [List] + xs_names: [List] list of current reach cross sections id xsno: [int] number of cross sections in the current river reach """ if run_model: - self.xsid = self.crosssections.loc[:, "xsid"].values - self.dbf = self.crosssections.loc[:, "dbf"].values - self.bedlevel = self.crosssections.loc[:, "gl"].values - self.hl = self.crosssections.loc[:, "hl"].values - self.cl = self.crosssections.loc[:, "bl"].values - self.zl = self.crosssections.loc[:, "zl"].values - self.hr = self.crosssections.loc[:, "hr"].values - self.cr = self.crosssections.loc[:, "br"].values - self.zr = self.crosssections.loc[:, "zr"].values - self.mw = self.crosssections.loc[:, "b"].values - self.mn = self.crosssections.loc[:, "m"].values - - self.crosssections.index = list(range(len(self.crosssections))) - self.lastxs = self.crosssections.loc[len(self.crosssections) - 1, "xsid"] - self.firstxs = self.crosssections.loc[0, "xsid"] - self.xsname = self.crosssections["xsid"].tolist() - self.xsno = len(self.xsname) + self.xsid = self.cross_sections.loc[:, "xsid"].values + self.dbf = self.cross_sections.loc[:, "dbf"].values + self.bedlevel = self.cross_sections.loc[:, "gl"].values + self.hl = self.cross_sections.loc[:, "hl"].values + self.cl = self.cross_sections.loc[:, "bl"].values + self.zl = self.cross_sections.loc[:, "zl"].values + self.hr = self.cross_sections.loc[:, "hr"].values + self.cr = self.cross_sections.loc[:, "br"].values + self.zr = self.cross_sections.loc[:, "zr"].values + self.mw = self.cross_sections.loc[:, "b"].values + self.mn = self.cross_sections.loc[:, "m"].values + + self.cross_sections.index = list(range(len(self.cross_sections))) + self.last_xs = self.cross_sections.loc[len(self.cross_sections) - 1, "xsid"] + self.first_xs = self.cross_sections.loc[0, "xsid"] + self.xs_names = self.cross_sections["xsid"].tolist() + self.xsno = len(self.xs_names) def read1DResult( self, - fromday: Union[int, str] = None, - today: Union[int, str] = None, + from_day: Union[int, str] = None, + to_day: Union[int, str] = None, fill_missing: bool = True, addHQ2: bool = False, path: str = None, @@ -3385,169 +3395,168 @@ def read1DResult( ): """read1DResult. - Read1DResult method reads the 1D (1D-2D coupled) result of the sub-basin the object is - created for and return the hydrograph of the first and last cross section. the method will not read the 1 - D result file again if you tried to read results of the same sub-basin again, so you have to re-instantiate - the object. + - Read1DResult method reads the 1D result of the river reach the method is returns the hydrograph of the first + and last cross section. + - the method will not read the 1D result file again if you tried to read results of the same sub-basin again, + so you have to re-instantiate the object. Parameters ---------- - fromday : [integer], optional - the index of the day you want the data to start from. - The default is empty. it means read everything - today : [integer], optional - the index of the day you want the data to end to. The default + from_day : [integer], optional + the order of the day you want the data to start from. + The default is None. it means read everything + to_day : [integer], optional + the order of the day you want the data to end to. The default is empty. means read everything fill_missing : [Bool], optional - Fill the missing days. The default is False. + Fill the missing days with zeroes. The default is True. addHQ2 : [Bool], optional to add the value of HQ2. The default is False. path : [String], optional - path to read the results from. The default is ''. + path to read the results from. The default is None. xsid : [Integer], optional - id of a specific cross section you want to get the results on - it. The default is ''. + id of a specific cross section you want to extract the results for + it. The default is None. chunk_size: [int] - size of the chunk if you want to read the file in chunks Default is = None + size of the chunk if you want to read the file in chunks Default is = None. extension: [str] - the extension of the file. Default is ".txt" + The extension of the file. Default is ".txt" Returns ------- - Result1D : [attribute] - the results read will be stored (as it is without any filter) - in the attribute "Result1D" - XSHydrographs : [dataframe attribute] + results_1d : [attribute] + The results read from the file as is, will be stored in the attribute "results_1d" + xs_hydrograph : [dataframe attribute] dataframe containing hydrographs at the position of the first and last cross section - XSWaterLevel : [dataframe attribute] + xs_water_level : [dataframe attribute] dataframe containing waterlevels at the position of the first and last cross section - firstdayresults:[attribute] + first_day_results:[attribute] the first day in the 1D result - lastday:[attribute] + last_day:[attribute] the last day in the 1D result """ - if path and self.onedresultpath == "": + if path is None and self.one_d_result_path == "": raise ValueError( "User have to either enter the value of the 'path' parameter or" - " define the 'onedresultpath' parameter for the River object" + " define the 'one_d_result_path' parameter for the River object" ) # if the results are not read yet read it - if not isinstance(self.Result1D, DataFrame): + if not isinstance(self.results_1d, DataFrame): River.read1DResult( self, self.id, - fromday, - today, + from_day, + to_day, path=path, fill_missing=fill_missing, chunk_size=chunk_size, extension=extension, ) # get the index of the days and convert them into dates - if not fromday: - fromday = self.Result1D.loc[0, "day"] - if not today: - today = self.Result1D.loc[len(self.Result1D) - 1, "day"] + if not from_day: + from_day = self.results_1d.loc[0, "day"] + if not to_day: + to_day = self.results_1d.loc[len(self.results_1d) - 1, "day"] - start = self.indexToDate(fromday) - end = self.indexToDate(today + 1) + start = self.indexToDate(from_day) + end = self.indexToDate(to_day + 1) - if not isinstance(self.XSHydrographs, DataFrame): - self.XSHydrographs = pd.DataFrame( + if not isinstance(self.xs_hydrograph, DataFrame): + self.xs_hydrograph = pd.DataFrame( index=pd.date_range(start, end, freq="H")[:-1] ) - self.XSWaterLevel = pd.DataFrame( + self.xs_water_level = pd.DataFrame( index=pd.date_range(start, end, freq="H")[:-1] ) - self.XSWaterDepth = pd.DataFrame( + self.xs_water_depth = pd.DataFrame( index=pd.date_range(start, end, freq="H")[:-1] ) # check if the xsid is in the sub-basin if xsid: - if xsid not in self.xsname: + if xsid not in self.xs_names: raise ValueError( f"The given cross-section {xsid} does not exist inside the " - f"current Segment of the river, first XS is {self.firstxs}, and last " - f"XS is {self.lastxs}" + f"current Segment of the river, first XS is {self.first_xs}, and last " + f"XS is {self.last_xs}" ) # get the simulated hydrograph and add the cutted HQ2 if addHQ2: - self.XSHydrographs[self.lastxs] = ( - self.Result1D.loc[self.Result1D["xs"] == self.lastxs, "q"].values + self.xs_hydrograph[self.last_xs] = ( + self.results_1d.loc[self.results_1d["xs"] == self.last_xs, "q"].values + self.RP["HQ2"].tolist()[0] ) - self.XSHydrographs[self.firstxs] = ( - self.Result1D.loc[self.Result1D["xs"] == self.firstxs, "q"].values + self.xs_hydrograph[self.first_xs] = ( + self.results_1d.loc[self.results_1d["xs"] == self.first_xs, "q"].values + self.RP["HQ2"].tolist()[0] ) if xsid: - self.XSHydrographs[xsid] = ( - self.Result1D.loc[self.Result1D["xs"] == xsid, "q"].values + self.xs_hydrograph[xsid] = ( + self.results_1d.loc[self.results_1d["xs"] == xsid, "q"].values + self.RP["HQ2"].tolist()[0] ) else: - self.XSHydrographs[self.lastxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.lastxs, "q" + self.xs_hydrograph[self.last_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.last_xs, "q" ].values - self.XSHydrographs[self.firstxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.firstxs, "q" + self.xs_hydrograph[self.first_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.first_xs, "q" ].values if xsid: - self.XSHydrographs[xsid] = self.Result1D.loc[ - self.Result1D["xs"] == xsid, "q" + self.xs_hydrograph[xsid] = self.results_1d.loc[ + self.results_1d["xs"] == xsid, "q" ].values - self.XSWaterLevel[self.lastxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.lastxs, "wl" + self.xs_water_level[self.last_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.last_xs, "wl" ].values - self.XSWaterLevel[self.firstxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.firstxs, "wl" + self.xs_water_level[self.first_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.first_xs, "wl" ].values - self.XSWaterDepth[self.lastxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.lastxs, "h" + self.xs_water_depth[self.last_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.last_xs, "h" ].values - self.XSWaterDepth[self.firstxs] = self.Result1D.loc[ - self.Result1D["xs"] == self.firstxs, "h" + self.xs_water_depth[self.first_xs] = self.results_1d.loc[ + self.results_1d["xs"] == self.first_xs, "h" ].values if xsid: - self.XSWaterLevel[xsid] = self.Result1D.loc[ - self.Result1D["xs"] == xsid, "wl" + self.xs_water_level[xsid] = self.results_1d.loc[ + self.results_1d["xs"] == xsid, "wl" ].values - self.XSWaterDepth[xsid] = self.Result1D.loc[ - self.Result1D["xs"] == xsid, "h" + self.xs_water_depth[xsid] = self.results_1d.loc[ + self.results_1d["xs"] == xsid, "h" ].values # check the first day in the results and get the date of the first day and last day ## create time series - self.from_beginning = self.Result1D["day"][0] - self.firstday = self.indexToDate(self.from_beginning) + self.from_beginning = self.results_1d["day"][0] + self.first_day = self.indexToDate(self.from_beginning) # if there are empty days at the beginning the filling missing days is # not going to detect it so ignore it here by starting from the first # day in the data (data['day'][0]) dataframe empty days at the # beginning # TODO - # the from_beginning and firstdayresults are exactly the same + # the from_beginning and first_day_results are exactly the same # delete one of them - self.firstdayresults = self.indexToDate(self.Result1D.loc[0, "day"]) - lastday = self.Result1D.loc[self.Result1D.index[-1], "day"] - self.lastday = self.indexToDate(lastday) + self.first_day_results = self.indexToDate(self.results_1d.loc[0, "day"]) + last_day = self.results_1d.loc[self.results_1d.index[-1], "day"] + self.last_day = self.indexToDate(last_day) # last days+1 as range does not include the last element - self.daylist = list( + self.days_list = list( range( - self.Result1D.loc[0, "day"], - self.Result1D.loc[self.Result1D.index[-1], "day"] + 1, + self.results_1d.loc[0, "day"], + self.results_1d.loc[self.results_1d.index[-1], "day"] + 1, ) ) - self.referenceindex_results = pd.date_range( - self.firstdayresults, self.lastday, freq="D" + self.reference_index_results = pd.date_range( + self.first_day_results, self.last_day, freq="D" ) def extractXS(self, xsid: int, addHQ2: bool = False, WaterLevel: bool = True): @@ -3568,22 +3577,22 @@ def extractXS(self, xsid: int, addHQ2: bool = False, WaterLevel: bool = True): None. """ assert isinstance( - self.Result1D, DataFrame + self.results_1d, DataFrame ), "please use the Read1DResult method to read the results first" # assert hasattr(self,"RP"), "please use the Read1DResult method to read the results first" if addHQ2: - self.XSHydrographs[xsid] = ( - self.Result1D["q"][self.Result1D["xs"] == xsid].values + self.xs_hydrograph[xsid] = ( + self.results_1d["q"][self.results_1d["xs"] == xsid].values + self.RP["HQ2"].tolist()[0] ) else: - self.XSHydrographs[xsid] = self.Result1D["q"][ - self.Result1D["xs"] == xsid + self.xs_hydrograph[xsid] = self.results_1d["q"][ + self.results_1d["xs"] == xsid ].values if WaterLevel: - self.XSWaterLevel[xsid] = self.Result1D["wl"][ - self.Result1D["xs"] == xsid + self.xs_water_level[xsid] = self.results_1d["wl"][ + self.results_1d["xs"] == xsid ].values def CheckNegativeQ(self, plot: bool = False, TS: str = "hourly"): @@ -3595,26 +3604,26 @@ def CheckNegativeQ(self, plot: bool = False, TS: str = "hourly"): dictionary with ['NegQ', 'NegXS', 'NegQind'] as keys """ if TS == "hourly": - assert isinstance(self.Result1D, DataFrame), ( - "please use the Result1D method to read the " + assert isinstance(self.results_1d, DataFrame), ( + "please use the results_1d method to read the " "result of this sub-basin first" ) - if self.Result1D["q"].min() < 0: + if self.results_1d["q"].min() < 0: logger.debug("NegativeDischarge") # extract -ve discharge data if exist self.Negative = dict() - self.Negative["NegQ"] = self.Result1D[self.Result1D["q"] < 0] + self.Negative["NegQ"] = self.results_1d[self.results_1d["q"] < 0] self.Negative["NegXS"] = list(set(self.Negative["NegQ"]["xs"])) self.Negative["NegQind"] = self.Negative["NegQ"].index.tolist() self.Negative["QN"] = pd.DataFrame() for i in range(len(self.Negative["NegXS"])): - self.Negative["QN"][self.Negative["NegXS"][i]] = self.Result1D["q"][ - self.Result1D["xs"] == self.Negative["NegXS"][i] - ] + self.Negative["QN"][self.Negative["NegXS"][i]] = self.results_1d[ + "q" + ][self.results_1d["xs"] == self.Negative["NegXS"][i]] - self.Negative["QN"].index = self.XSHydrographs.index + self.Negative["QN"].index = self.xs_hydrograph.index if plot: plt.figure(30, figsize=(15, 8)) @@ -3632,23 +3641,23 @@ def CheckNegativeQ(self, plot: bool = False, TS: str = "hourly"): elif TS == "1min": assert hasattr( self, "q" - ), "please use the Result1D method to read the result of this sub-basin first" - # NegQmin = pd.DataFrame() + ), "please use the results_1d method to read the result of this sub-basin first" + # neg_qmin = pd.DataFrame() NegQmin = self.q NegQmin.loc[:, "date"] = self.q.index[:] NegQmin.index = range(len(NegQmin.index)) - f = NegQmin[NegQmin[self.xsname[0]] < 0] + f = NegQmin[NegQmin[self.xs_names[0]] < 0] - for i in range(len(self.xsname[1:])): - f = f.append(NegQmin[NegQmin[self.xsname[i + 1]] < 0]) + for i in range(len(self.xs_names[1:])): + f = f.append(NegQmin[NegQmin[self.xs_names[i + 1]] < 0]) self.NegQmin = f def readRRMHydrograph( self, station_id: int, - fromday: Union[int, str] = None, - today: Union[int, str] = None, + from_day: Union[int, str] = None, + to_day: Union[int, str] = None, path: str = None, date_format: str = "%d_%m_%Y", location: int = 1, @@ -3663,15 +3672,15 @@ def readRRMHydrograph( ---------- station_id : [Integer] DESCRIPTION. - fromday : [Integer], optional + from_day : [Integer], optional start day of the period you wanrt to read its results. The default is []. - today : [Integer], optional + to_day : [Integer], optional end day of the period you wanrt to read its results. The default is []. path: [str] path to the directory where the result files. if not given the - river.rrmpath should be given. default is '' + river.rrm_path should be given. default is '' date_format: [str] format of the date string, default is "%d_%m_%Y" location: [1] @@ -3695,7 +3704,7 @@ def readRRMHydrograph( self.RRM2 = pd.DataFrame() if not path: - path = self.rrmpath + path = self.rrm_path if location == 2 and not path2: raise ValueError( @@ -3705,31 +3714,31 @@ def readRRMHydrograph( if location == 1: self.RRM[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, station_id, - fromday, - today, + from_day, + to_day, date_format, )[station_id].tolist() else: self.RRM[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, station_id, - fromday, - today, + from_day, + to_day, date_format, )[station_id].tolist() try: self.RRM2[station_id] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path2, station_id, - fromday, - today, + from_day, + to_day, date_format, )[station_id].tolist() except FileNotFoundError: @@ -3740,13 +3749,13 @@ def readRRMHydrograph( logger.info("RRM time series for the gauge " + str(station_id) + " is read") - if not fromday: - fromday = 1 - if not today: - today = len(self.RRM[station_id]) + if not from_day: + from_day = 1 + if not to_day: + to_day = len(self.RRM[station_id]) - start = self.rrmreferenceindex.loc[fromday, "date"] - end = self.rrmreferenceindex.loc[today, "date"] + start = self.rrm_reference_index.loc[from_day, "date"] + end = self.rrm_reference_index.loc[to_day, "date"] if location == 1: self.RRM.index = pd.date_range(start, end, freq="D") @@ -3759,8 +3768,8 @@ def resample( self, xsid, ColumnName, - fromday: Union[int, str] = "", - today: Union[int, str] = "", + from_day: Union[int, str] = "", + to_day: Union[int, str] = "", Delete=False, ): """Resample. Resample method extract the value at the last hour of the dat. @@ -3776,9 +3785,9 @@ def resample( the column name you want to resample in the results1D. ColumnName could be 'q' for discharge, 'wl' for water level, and 'h' for water depth. - fromday : [integer], optional + from_day : [integer], optional starting day. The default is ''. - today : [integer], optional + to_day : [integer], optional end day. The default is ''. Delete : [boolen], optional to delete the previously resampled data frame to create another one. @@ -3786,62 +3795,66 @@ def resample( Returns ------- - ResampledQ, ResampledWL, ResampledH: [dataframe attribute] + resampled_q, resampled_wl, resampled_h: [dataframe attribute] depends on the given ColumnName the attribute will be created, - if 'q' the attribute will be ResampledQ, and the same for "wl", and "H" - and inside the ResampledQ a column will be created with the given xsid + if 'q' the attribute will be resampled_q, and the same for "wl", and "H" + and inside the resampled_q a column will be created with the given xsid containing the resampled valeus """ - assert hasattr(self, "Result1D"), "please read the 1D results" + assert hasattr(self, "results_1d"), "please read the 1D results" - if fromday == "": - fromday = self.Result1D.loc[0, "day"] - if today == "": - today = self.Result1D.loc[len(self.Result1D) - 1, "day"] + if from_day == "": + from_day = self.results_1d.loc[0, "day"] + if to_day == "": + to_day = self.results_1d.loc[len(self.results_1d) - 1, "day"] - # start = self.IndexToDate(fromday) - # end = self.IndexToDate(today) + # start = self.IndexToDate(from_day) + # end = self.IndexToDate(to_day) - # start = self.referenceindex.loc[fromday,'date'] - # end = self.referenceindex.loc[today,'date'] + # start = self.reference_index.loc[from_day,'date'] + # end = self.reference_index.loc[to_day,'date'] ind = pd.date_range( - self.indexToDate(fromday), self.indexToDate(today), freq="D" + self.indexToDate(from_day), self.indexToDate(to_day), freq="D" ) - if ColumnName == "q" and not hasattr(self, "ResampledQ"): - self.ResampledQ = pd.DataFrame(index=ind) + if ColumnName == "q" and not hasattr(self, "resampled_q"): + self.resampled_q = pd.DataFrame(index=ind) elif ColumnName == "q": if Delete: - del self.ResampledQ + del self.resampled_q - if ColumnName == "wl" and not hasattr(self, "ResampledWL"): - self.ResampledWL = pd.DataFrame(index=ind) + if ColumnName == "wl" and not hasattr(self, "resampled_wl"): + self.resampled_wl = pd.DataFrame(index=ind) elif ColumnName == "wl": if Delete: - del self.ResampledWL + del self.resampled_wl - if ColumnName == "h" and not hasattr(self, "ResampledH"): - self.ResampledH = pd.DataFrame(index=ind) + if ColumnName == "h" and not hasattr(self, "resampled_h"): + self.resampled_h = pd.DataFrame(index=ind) elif ColumnName == "h": if Delete: - del self.ResampledH + del self.resampled_h - Q = self.Result1D[self.Result1D["xs"] == xsid][self.Result1D["hour"] == 24] - Q = Q[ColumnName][self.Result1D["day"] >= fromday][ - self.Result1D["day"] <= today + Q = self.results_1d[self.results_1d["xs"] == xsid][ + self.results_1d["hour"] == 24 + ] + Q = Q[ColumnName][self.results_1d["day"] >= from_day][ + self.results_1d["day"] <= to_day ] # self.Q = Q if ColumnName == "q": - self.ResampledQ.loc[:, xsid] = Q.tolist() + self.resampled_q.loc[:, xsid] = Q.tolist() elif ColumnName == "wl": - self.ResampledWL.loc[:, xsid] = Q.tolist() + self.resampled_wl.loc[:, xsid] = Q.tolist() elif ColumnName == "h": - self.ResampledH.loc[:, xsid] = Q.tolist() + self.resampled_h.loc[:, xsid] = Q.tolist() def detailedStatisticalCalculation(self, T): - """DetailedStatisticalCalculation. DetailedStatisticalCalculation method calculates the discharge related to a specific given return period. + """DetailedStatisticalCalculation. + + DetailedStatisticalCalculation method calculates the discharge related to a specific given return period. Parameters ---------- @@ -3870,31 +3883,31 @@ def detailedOvertopping(self, eventdays): Returns ------- - 1- DetailedOvertoppingLeft:[data frame attribute] + 1- detailed_overtopping_left:[data frame attribute] containing the computational node and rainfall-runoff results (hydrograph)with columns ['id', Nodeid ] - 2-DetailedOvertoppingRight:[data frame attribute] + 2-detailed_overtopping_right:[data frame attribute] containing the computational node and rainfall-runoff results (hydrograph)with columns ['id', Nodeid ] - 3-AllOvertoppingVSXS: - 4-AllOvertoppingVSTime: + 3-all_overtopping_vs_xs: + 4-all_overtopping_vs_time: """ # River.DetailedOvertopping(self, [self.id], eventdays) - XSs = self.crosssections.loc[:, "xsid"].tolist() + XSs = self.cross_sections.loc[:, "xsid"].tolist() columns = [self.id] + XSs + ["sum"] - self.DetailedOvertoppingLeft = pd.DataFrame( + self.detailed_overtopping_left = pd.DataFrame( index=eventdays + ["sum"], columns=columns ) - self.DetailedOvertoppingLeft.loc[:, columns] = 0 - self.DetailedOvertoppingRight = pd.DataFrame( + self.detailed_overtopping_left.loc[:, columns] = 0 + self.detailed_overtopping_right = pd.DataFrame( index=eventdays + ["sum"], columns=columns ) - self.DetailedOvertoppingRight.loc[:, columns] = 0 + self.detailed_overtopping_right.loc[:, columns] = 0 # Left Bank try: # try to open and read the overtopping file data = pd.read_csv( - f"{self.onedresultpath}{self.id}{self.leftovertopping_suffix}", + f"{self.one_d_result_path}{self.id}{self.left_over_topping_suffix}", header=None, delimiter=r"\s+", ) @@ -3907,9 +3920,9 @@ def detailedOvertopping(self, eventdays): # check whether this sub basin has flooded in this particular day if eventdays[j] in days: # filter the dataframe to the discharge column (3) and the days - self.DetailedOvertoppingLeft.loc[eventdays[j], self.id] = data.loc[ - data["day"] == eventdays[j], "q" - ].sum() + self.detailed_overtopping_left.loc[ + eventdays[j], self.id + ] = data.loc[data["day"] == eventdays[j], "q"].sum() # get the xss that was overtopped in that particular day XSday = list( set(data.loc[data["day"] == eventdays[j], "xsid"].tolist()) @@ -3917,21 +3930,21 @@ def detailedOvertopping(self, eventdays): for i in range(len(XSday)): # dataXS = data['q'].loc[data['day'] == eventdays[j]][data['xsid'] == XSday[i]].sum() - self.DetailedOvertoppingLeft.loc[eventdays[j], XSday[i]] = ( + self.detailed_overtopping_left.loc[eventdays[j], XSday[i]] = ( data["q"] .loc[data["day"] == eventdays[j]][data["xsid"] == XSday[i]] .sum() ) else: - self.DetailedOvertoppingLeft.loc[eventdays[j], self.id] = 0 + self.detailed_overtopping_left.loc[eventdays[j], self.id] = 0 except: - self.DetailedOvertoppingLeft.loc[:, self.id] = 0 + self.detailed_overtopping_left.loc[:, self.id] = 0 # right Bank try: # try to open and read the overtopping file data = pd.read_csv( - rf"{self.onedresultpath}\{self.id}{self.rightovertopping_suffix}", + rf"{self.one_d_result_path}\{self.id}{self.right_overtopping_suffix}", header=None, delimiter=r"\s+", ) @@ -3943,9 +3956,9 @@ def detailedOvertopping(self, eventdays): # check whether this sub basin has flooded in this particular day if eventdays[j] in days: # filter the dataframe to the discharge column (3) and the days - self.DetailedOvertoppingRight.loc[eventdays[j], self.id] = data.loc[ - data["day"] == eventdays[j], "q" - ].sum() + self.detailed_overtopping_right.loc[ + eventdays[j], self.id + ] = data.loc[data["day"] == eventdays[j], "q"].sum() # get the xss that was overtopped in that particular day XSday = list( set(data.loc[data["day"] == eventdays[j], "xsid"].tolist()) @@ -3953,59 +3966,63 @@ def detailedOvertopping(self, eventdays): for i in range(len(XSday)): # dataXS = data['q'].loc[data['day'] == eventdays[j]][data['xsid'] == XSday[i]].sum() - self.DetailedOvertoppingRight.loc[eventdays[j], XSday[i]] = ( + self.detailed_overtopping_right.loc[eventdays[j], XSday[i]] = ( data["q"] .loc[data["day"] == eventdays[j]][data["xsid"] == XSday[i]] .sum() ) else: - self.DetailedOvertoppingRight.loc[eventdays[j], self.id] = 0 + self.detailed_overtopping_right.loc[eventdays[j], self.id] = 0 except: # logger.debug("file did not open") - self.DetailedOvertoppingRight.loc[:, self.id] = 0 + self.detailed_overtopping_right.loc[:, self.id] = 0 # sum overtopping for each day for j in range(len(eventdays)): - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ eventdays[j], "sum" - ] = self.DetailedOvertoppingLeft.loc[eventdays[j], XSs].sum() - self.DetailedOvertoppingRight.loc[ + ] = self.detailed_overtopping_left.loc[eventdays[j], XSs].sum() + self.detailed_overtopping_right.loc[ eventdays[j], "sum" - ] = self.DetailedOvertoppingRight.loc[eventdays[j], XSs].sum() + ] = self.detailed_overtopping_right.loc[eventdays[j], XSs].sum() # sum overtopping for each sub basin for j in range(len(XSs)): - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ "sum", XSs[j] - ] = self.DetailedOvertoppingLeft.loc[:, XSs[j]].sum() - self.DetailedOvertoppingRight.loc[ + ] = self.detailed_overtopping_left.loc[:, XSs[j]].sum() + self.detailed_overtopping_right.loc[ "sum", XSs[j] - ] = self.DetailedOvertoppingRight.loc[:, XSs[j]].sum() + ] = self.detailed_overtopping_right.loc[:, XSs[j]].sum() - self.DetailedOvertoppingLeft.loc[ + self.detailed_overtopping_left.loc[ "sum", self.id - ] = self.DetailedOvertoppingLeft.loc[:, self.id].sum() - self.DetailedOvertoppingRight.loc[ + ] = self.detailed_overtopping_left.loc[:, self.id].sum() + self.detailed_overtopping_right.loc[ "sum", self.id - ] = self.DetailedOvertoppingRight.loc[:, self.id].sum() + ] = self.detailed_overtopping_right.loc[:, self.id].sum() - self.AllOvertoppingVSXS = ( - self.DetailedOvertoppingLeft.loc["sum", XSs] - + self.DetailedOvertoppingRight.loc["sum", XSs] + self.all_overtopping_vs_xs = ( + self.detailed_overtopping_left.loc["sum", XSs] + + self.detailed_overtopping_right.loc["sum", XSs] ) - self.AllOvertoppingVSTime = pd.DataFrame() - self.AllOvertoppingVSTime["id"] = eventdays - self.AllOvertoppingVSTime.loc[:, "Overtopping"] = ( - self.DetailedOvertoppingLeft.loc[eventdays, "sum"] - + self.DetailedOvertoppingRight.loc[eventdays, "sum"] + self.all_overtopping_vs_time = pd.DataFrame() + self.all_overtopping_vs_time["id"] = eventdays + self.all_overtopping_vs_time.loc[:, "Overtopping"] = ( + self.detailed_overtopping_left.loc[eventdays, "sum"] + + self.detailed_overtopping_right.loc[eventdays, "sum"] ).tolist() - self.AllOvertoppingVSTime.loc[:, "date"] = ( - self.referenceindex.loc[eventdays[0] : eventdays[-1], "date"] + self.all_overtopping_vs_time.loc[:, "date"] = ( + self.reference_index.loc[eventdays[0] : eventdays[-1], "date"] ).tolist() def saveHydrograph(self, xsid: int, path: str = None, Option: int = 1): - """Save Hydrograph. SaveHydrograph method saves the hydrograph of any cross-section in the segment. Mainly the method is created to to be used to save the last cross-section hydrograph to use it as as a boundary condition for the downstream segment. + """Save Hydrograph. + + - SaveHydrograph method saves the hydrograph of any cross-section in the segment. + - Mainly the method is created to to be used to save the last cross-section hydrograph to use it as as a + boundary condition for the downstream segment. Parameters ---------- @@ -4030,8 +4047,8 @@ def saveHydrograph(self, xsid: int, path: str = None, Option: int = 1): ) path = self.customized_runs_path - ts = self.XSHydrographs[xsid].resample("D").last().to_frame() - val = [self.XSHydrographs[xsid][0]] + self.XSHydrographs[xsid].resample( + ts = self.xs_hydrograph[xsid].resample("D").last().to_frame() + val = [self.xs_hydrograph[xsid][0]] + self.xs_hydrograph[xsid].resample( "D" ).last().values.tolist()[:-1] ts[xsid] = val @@ -4041,12 +4058,12 @@ def saveHydrograph(self, xsid: int, path: str = None, Option: int = 1): f["discharge(m3/s)"] = ts if Option == 1: - val = [self.XSWaterDepth[xsid][0]] + self.XSWaterDepth[xsid].resample( + val = [self.xs_water_depth[xsid][0]] + self.xs_water_depth[xsid].resample( "D" ).last().values.tolist()[:-1] f["water depth(m)"] = val else: - val = [self.XSWaterLevel[xsid][0]] + self.XSWaterLevel[xsid].resample( + val = [self.xs_water_level[xsid][0]] + self.xs_water_level[xsid].resample( "D" ).last().values.tolist()[:-1] f["water level(m)"] = val @@ -4056,17 +4073,20 @@ def saveHydrograph(self, xsid: int, path: str = None, Option: int = 1): def plotHydrographProgression( self, xss: list, - start: str, - end: str, - fromxs: Union[str, int] = "", - toxs: Union[str, int] = "", - linewidth: int = 4, + start: str = None, + end: str = None, + from_xs: int = None, + to_xs: int = None, + line_width: int = 4, spacing: int = 5, - figsize: tuple = (7, 5), + fig_size: tuple = (7, 5), xlabels: Union[bool, int] = False, fmt="%Y-%m-%d", ) -> Tuple[Figure, object]: - """PlotHydrographProgression. plot the hydrograph for several vross section in the segment, cross section are chosen based on the spacing (spacing equal 5 mean from the beginning take eavery fifth cross section) + """PlotHydrographProgression. + + - plot the hydrograph for several vross section in the segment, cross section are chosen based on the + spacing (spacing equal 5 mean from the beginning take eavery fifth cross section) Parameters ---------- @@ -4076,63 +4096,64 @@ def plotHydrographProgression( DESCRIPTION. end : TYPE DESCRIPTION. - fromxs: [str, int] + from_xs: [str, int] default "". - toxs: [str, int] + to_xs: [str, int] default is "" - linewidth : [integer], optional + line_width : [integer], optional width of the plots. The default is 4. spacing : [integer] hydrographs are going to be plots every spacing. The default is 5. - figsize: [tuple] + fig_size: [tuple] default is (7, 5). xlabels: [bool, int] defaulr is False. fmt: [string] - format of the date. fmt="%Y-%m-%d %H:%M:%S" + format of the date. fmt="%Y-%m-%d %H:%M:%S". + Returns ------- None. """ - if start == "": - start = self.firstday + if start is None: + start = self.first_day else: start = dt.datetime.strptime(start, fmt) - if end == "": - end = self.lastday + if end is None: + end = self.last_day else: end = dt.datetime.strptime(end, fmt) - if fromxs == "": - fromxs = self.firstxs + if from_xs is None: + from_xs = self.first_xs - if toxs == "": - toxs = self.lastxs - xss.append(toxs) + if to_xs is None: + to_xs = self.last_xs + xss.append(to_xs) - fromxs = self.xsname.index(fromxs) - toxs = self.xsname.index(toxs) - xslist = self.xsname[fromxs : toxs + 1 : spacing] + from_xs = self.xs_names.index(from_xs) + to_xs = self.xs_names.index(to_xs) + xs_list = self.xs_names[from_xs : to_xs + 1 : spacing] - xslist = xslist + xss + xs_list = xs_list + xss # to remove repeated XSs - xslist = list(set(xslist)) + xs_list = list(set(xs_list)) # extract the XS hydrographs - for i in range(len(xslist)): - self.read1DResult(xsid=xslist[i]) + for i in range(len(xs_list)): + self.read1DResult(xsid=xs_list[i]) - # xslist = [self.firstxs] + xslist + [self.lastxs] - xslist.sort() + # xs_list = [self.first_xs] + xs_list + [self.last_xs] + xs_list.sort() - fig, ax = plt.subplots(ncols=1, nrows=1, figsize=figsize) + fig, ax = plt.subplots(ncols=1, nrows=1, figsize=fig_size) - for i in range(len(xslist)): + for i in range(len(xs_list)): ax.plot( - self.XSHydrographs.loc[start:end, xslist[i]], - label="XS-" + str(xslist[i]), - linewidth=linewidth, + self.xs_hydrograph.loc[start:end, xs_list[i]], + label=f"XS-{xs_list[i]}", + linewidth=line_width, ), # color = XScolor,zorder=XSorder ax.legend(fontsize=10, loc="best") @@ -4148,8 +4169,8 @@ def plotHydrographProgression( def readUSHydrograph( self, - fromday: int = None, - today: int = None, + from_day: int = None, + to_day: int = None, path: str = None, date_format: str = "'%Y-%m-%d'", ): @@ -4159,9 +4180,9 @@ def readUSHydrograph( Parameters ---------- - fromday : [int], optional + from_day : [int], optional the day you want to read the result from, the first day is 1 not zero.The default is ''. - today : [int], optional + to_day : [int], optional the day you want to read the result to. path : [str], optional path to read the results from. if path is not given the customized_runs_path @@ -4171,12 +4192,12 @@ def readUSHydrograph( Returns ------- - USHydrographs : [dataframe attribute]. - dataframe contains the hydrograph of each of the upstream segments + us_hydrographs : [dataframe attribute]. + dataframe contains the hydrograph of each of the upstream reachs with segment id as a column name and a column 'total' contains the sum of all the hydrographs. """ - self.USHydrographs = pd.DataFrame() + self.us_hydrographs = pd.DataFrame() if not path: path = self.customized_runs_path @@ -4187,13 +4208,13 @@ def readUSHydrograph( for i in range(len(self.usnode)): Nodeid = self.usnode[i] try: - self.USHydrographs[Nodeid] = self._readRRMResults( + self.us_hydrographs[Nodeid] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, Nodeid, - fromday, - today, + from_day, + to_day, date_format, )[Nodeid] logger.info(f"the US hydrograph '{Nodeid}' has been read") @@ -4209,13 +4230,13 @@ def readUSHydrograph( elif self.usnode: Nodeid = self.usnode[0] try: - self.USHydrographs[Nodeid] = self._readRRMResults( + self.us_hydrographs[Nodeid] = self._readRRMResults( self.version, - self.rrmreferenceindex, + self.rrm_reference_index, path, Nodeid, - fromday, - today, + from_day, + to_day, date_format, )[Nodeid] logger.info(f"the US hydrograph '{Nodeid}' has been read") @@ -4232,16 +4253,16 @@ def readUSHydrograph( ) return - self.USHydrographs["total"] = self.USHydrographs.sum(axis=1) - if not fromday: - fromday = self.USHydrographs.index[0] - if not today: - today = self.USHydrographs.index[-1] + self.us_hydrographs["total"] = self.us_hydrographs.sum(axis=1) + if not from_day: + from_day = self.us_hydrographs.index[0] + if not to_day: + to_day = self.us_hydrographs.index[-1] - start = self.referenceindex.loc[fromday, "date"] - end = self.referenceindex.loc[today, "date"] + start = self.reference_index.loc[from_day, "date"] + end = self.reference_index.loc[to_day, "date"] - self.USHydrographs.index = pd.date_range(start, end, freq="D") + self.us_hydrographs.index = pd.date_range(start, end, freq="D") def getUSHydrograph(self, River): """GetUSHydrograph. GetUSHydrograph methods gets the sum of all the upstream hydrographs whither it is routed inside the model or a boundary condition. @@ -4253,10 +4274,10 @@ def getUSHydrograph(self, River): Returns ------- - USHydrographs : [array]. + us_hydrographs : [array]. array of the hydrograph """ - self.USHydrographs = np.zeros(shape=River.notimesteps) + self.us_hydrographs = np.zeros(shape=River.no_time_steps) if len(self.usnode) > 1: # there is more than one upstream segment @@ -4265,21 +4286,21 @@ def getUSHydrograph(self, River): Nodeid = self.usnode[i] # get the order of the segment River.Segments.index(Nodeid) - self.USHydrographs = ( - self.USHydrographs - + River.RoutedQ[:, River.Segments.index(Nodeid)] + self.us_hydrographs = ( + self.us_hydrographs + + River.routed_q[:, River.Segments.index(Nodeid)] ) # there is one upstream segment elif self.usnode: Nodeid = self.usnode[0] River.Segments.index(Nodeid) - self.USHydrographs = ( - self.USHydrographs + River.RoutedQ[:, River.Segments.index(Nodeid)] + self.us_hydrographs = ( + self.us_hydrographs + River.routed_q[:, River.Segments.index(Nodeid)] ) if type(self.BC) != bool: - self.USHydrographs = self.USHydrographs + self.BC.values.reshape( - len(self.USHydrographs) + self.us_hydrographs = self.us_hydrographs + self.BC.values.reshape( + len(self.us_hydrographs) ) def getXSGeometry(self): @@ -4292,7 +4313,7 @@ def getXSGeometry(self): AreaPerLow = np.zeros(shape=(self.xsno, 2)) AreaPerHigh = np.zeros(shape=(self.xsno, 2)) for i in range(self.xsno): - geom = self.crosssections.loc[i, :] + geom = self.cross_sections.loc[i, :] H = min(geom["hl"], geom["hr"]) + geom["dbf"] Coords = self.getVortices( H, @@ -4321,8 +4342,8 @@ def getXSGeometry(self): def getFlow( self, IF, - fromday: Union[int, str] = "", - today: Union[int, str] = "", + from_day: Union[int, str] = "", + to_day: Union[int, str] = "", date_format="%d_%m_%Y", ): """getFlow. @@ -4335,9 +4356,9 @@ def getFlow( IF : [Interface object] You Have to create the interface object then read the laterals and the boundary conditions first. - fromday : [string], optional + from_day : [string], optional the starting day. The default is ''. - today : [string], optional + to_day : [string], optional the ending day. The default is ''. date_format : [string], optional the format of the given dates. The default is "%d_%m_%Y". @@ -4363,19 +4384,19 @@ def getFlow( "using the 'ReadLaterals' method in the interface model" ) - if fromday == "": - fromday = IF.BC.index[0] + if from_day == "": + from_day = IF.BC.index[0] else: - fromday = dt.datetime.strptime(fromday, date_format) + from_day = dt.datetime.strptime(from_day, date_format) - if today == "": - today = IF.BC.index[-1] + if to_day == "": + to_day = IF.BC.index[-1] else: - today = dt.datetime.strptime(today, date_format) + to_day = dt.datetime.strptime(to_day, date_format) # get the id of the boundary condition - xs_as_set = set(self.xsname) - bclist = [int(i) for i in IF.BCTable["xsid"].tolist()] + xs_as_set = set(self.xs_names) + bclist = [int(i) for i in IF.bc_table["xsid"].tolist()] bcids = list(xs_as_set.intersection(bclist)) if len(bcids) == 0: @@ -4383,33 +4404,35 @@ def getFlow( elif len(bcids) > 1: raise ValueError("There are more than one BC for this Reach-basin") else: - self.BC = IF.BC.loc[fromday:today, bcids[0]].to_frame() + self.BC = IF.BC.loc[from_day:to_day, bcids[0]].to_frame() - if len(IF.LateralsTable) > 0: - self.LateralsTable = [ + if len(IF.laterals_table) > 0: + self.laterals_table = [ value - for value in self.xsname - if value in IF.LateralsTable["xsid"].tolist() + for value in self.xs_names + if value in IF.laterals_table["xsid"].tolist() ] self.Laterals = pd.DataFrame( - index=pd.date_range(fromday, today, freq="D"), - columns=self.LateralsTable, + index=pd.date_range(from_day, to_day, freq="D"), + columns=self.laterals_table, ) - for i in self.LateralsTable: - self.Laterals.loc[:, i] = IF.Laterals.loc[fromday:today, i] + for i in self.laterals_table: + self.Laterals.loc[:, i] = IF.Laterals.loc[from_day:to_day, i] self.Laterals["total"] = self.Laterals.sum(axis=1) # if the rrm hydrograph at the location of the hm or at the location of the rrm is read - if isinstance(IF.routedRRM, DataFrame): - self.RRMProgression = pd.DataFrame( - index=pd.date_range(fromday, today, freq="D"), - columns=self.LateralsTable, + if isinstance(IF.routed_rrm, DataFrame): + self.rrm_progression = pd.DataFrame( + index=pd.date_range(from_day, to_day, freq="D"), + columns=self.laterals_table, ) - for i in self.LateralsTable: - self.RRMProgression.loc[:, i] = IF.routedRRM.loc[fromday:today, i] + for i in self.laterals_table: + self.rrm_progression.loc[:, i] = IF.routed_rrm.loc[ + from_day:to_day, i + ] else: - self.LateralsTable = [] + self.laterals_table = [] self.Laterals = pd.DataFrame() def getLaterals(self, xsid: int): @@ -4428,12 +4451,12 @@ def getLaterals(self, xsid: int): sum of the laterals of all the cross sections in the reach upstream of a given xsid. """ - if not isinstance(self.LateralsTable, list) and not isinstance( + if not isinstance(self.laterals_table, list) and not isinstance( self.Laterals, DataFrame ): raise ValueError("please read the Laterals Table and the Laterals first") - USgauge = self.LateralsTable[: bisect(self.LateralsTable, xsid)] + USgauge = self.laterals_table[: bisect(self.laterals_table, xsid)] return self.Laterals[USgauge].sum(axis=1).to_frame() def getTotalFlow(self, gaugexs: int): @@ -4448,7 +4471,7 @@ def getTotalFlow(self, gaugexs: int): Returns ------- - TotalFlow : [dataframe attribute] + total_flow : [dataframe attribute] dataframe containing the total upstream hydrograph for the location of the given xs, the column name is "total" """ @@ -4456,11 +4479,11 @@ def getTotalFlow(self, gaugexs: int): if not isinstance(self.Laterals, DataFrame): raise ValueError("Please read the lateral flows first using the 'GetFlow'") - if gaugexs not in self.xsname: + if gaugexs not in self.xs_names: raise ValueError( f"The given XS {gaugexs} does not locate in the current river reach" - f"First XS is {self.firstxs} and " - f"Last XS is {self.lastxs}" + f"First XS is {self.first_xs} and " + f"Last XS is {self.last_xs}" ) Laterals = self.getLaterals(gaugexs) try: @@ -4482,16 +4505,18 @@ def getTotalFlow(self, gaugexs: int): + self.BC.loc[s:e, self.BC.columns[0]].values ) logger.info(f"Total flow for the XS-{gaugexs} has been calculated") - elif isinstance(self.USHydrographs, DataFrame) and len(self.USHydrographs) > 0: - s2 = self.USHydrographs.index[0] + elif ( + isinstance(self.us_hydrographs, DataFrame) and len(self.us_hydrographs) > 0 + ): + s2 = self.us_hydrographs.index[0] s = max(s1, s2) - e2 = self.USHydrographs.index[-1] + e2 = self.us_hydrographs.index[-1] e = min(e1, e2) self.TotalFlow = pd.DataFrame(index=pd.date_range(s, e, freq="D")) self.TotalFlow.loc[s:e, "total"] = ( Laterals.loc[s:e, 0].values - + self.USHydrographs.loc[s:e, "total"].values + + self.us_hydrographs.loc[s:e, "total"].values ) logger.info(f"Total flow for the XS-{gaugexs} has been calculated") else: @@ -4635,7 +4660,7 @@ def plotQ( DESCRIPTION. The default is (0.3,0,0). xscolor : TYPE, optional DESCRIPTION. The default is "grey". - linewidth : TYPE, optional + line_width : TYPE, optional DESCRIPTION. The default is 4. hmorder : TYPE, optional DESCRIPTION. The default is 6. @@ -4679,7 +4704,7 @@ def plotQ( default is 4. rrmlinestyle: [int] default is 6. - figsize: [tuple] + fig_size: [tuple] default is (6, 5). Returns @@ -4694,15 +4719,15 @@ def plotQ( fig, ax = plt.subplots(ncols=1, nrows=1, figsize=self.figsize) - if self.XSHydrographs is not None: + if self.xs_hydrograph is not None: # plot if you read the results using ther read1DResults try: ax.plot( - self.XSHydrographs.loc[start:end, gaugexs], + self.xs_hydrograph.loc[start:end, gaugexs], label="RIM", zorder=self.hmorder, linewidth=self.linewidth, - linestyle=V.LineStyle(6), + linestyle=V.getLineStyle(6), color=self.hmcolor, ) except KeyError: @@ -4725,17 +4750,20 @@ def plotQ( label="BC", zorder=self.ushorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.ushstyle), + linestyle=V.getLineStyle(self.ushstyle), color=self.ushcolor, ) # Laterals - if isinstance(self.LateralsTable, list) and len(self.LateralsTable) > 0: + if ( + isinstance(self.laterals_table, list) + and len(self.laterals_table) > 0 + ): ax.plot( Laterals.loc[start:end, 0], label="Laterals", zorder=self.latorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.latstyle), + linestyle=V.getLineStyle(self.latstyle), color=self.latcolor, ) if self.plottotal: @@ -4746,7 +4774,7 @@ def plotQ( label="US/BC + Laterals", zorder=self.totalorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.totalstyle), + linestyle=V.getLineStyle(self.totalstyle), color=self.totalcolor, ) except AttributeError: @@ -4758,16 +4786,16 @@ def plotQ( if self.usnode != [] and self.plotus: try: ax.plot( - self.USHydrographs.loc[start:end, "total"], + self.us_hydrographs.loc[start:end, "total"], label="US Hydrograph", zorder=self.ushorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.ushstyle), + linestyle=V.getLineStyle(self.ushstyle), color=self.ushcolor, ) except KeyError: msg = ( - "Please read the routed hydrograph of the upstream segments using the " + "Please read the routed hydrograph of the upstream reachs using the " "'ReadUSHydrograph' method" ) @@ -4782,7 +4810,7 @@ def plotQ( linewidth=self.linewidth, zorder=self.gaugeorder, color=self.gaugecolor, - linestyle=V.LineStyle(self.gaugestyle), + linestyle=V.getLineStyle(self.gaugestyle), ) # specific XS @@ -4791,12 +4819,12 @@ def plotQ( self.read1DResult(xsid=self.specificxs) # plot the xs ax.plot( - self.XSHydrographs.loc[start:end, self.specificxs], + self.xs_hydrograph.loc[start:end, self.specificxs], label="XS-" + str(self.specificxs), zorder=self.xsorder, linewidth=self.linewidth, color=self.xscolor, - linestyle=V.LineStyle(self.xslinestyle), + linestyle=V.getLineStyle(self.xslinestyle), ) # RRM if self.plotrrm: @@ -4807,7 +4835,7 @@ def plotQ( label="mHM-RIM Loc", zorder=self.rrmorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.rrmlinestyle), + linestyle=V.getLineStyle(self.rrmlinestyle), color=self.rrmcolor, ) except KeyError: @@ -4822,7 +4850,7 @@ def plotQ( label="mHM-mHM Loc", zorder=self.rrmorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.rrm2linesytle), + linestyle=V.getLineStyle(self.rrm2linesytle), color=self.rrm2color, ) except KeyError: @@ -4830,20 +4858,20 @@ def plotQ( f" Station {gaugename} does not have a second RRM discharge time series" ) - elif isinstance(Calib.CalibrationQ, DataFrame): + elif isinstance(Calib.calibration_q, DataFrame): # plot if you read the data using ReadCalirationResult ax.plot( - Calib.CalibrationQ[segment_xs], + Calib.calibration_q[segment_xs], label="RIM", zorder=3, linewidth=self.linewidth, - linestyle=V.LineStyle(6), + linestyle=V.getLineStyle(6), color=self.hmcolor, ) # plot the gauge data ax.plot( Calib.q_gauges.loc[ - Calib.CalibrationQ.index[0] : Calib.CalibrationQ.index[-1], + Calib.calibration_q.index[0] : Calib.calibration_q.index[-1], stationname, ], label="Gauge-" + str(self.id), @@ -4853,7 +4881,7 @@ def plotQ( if self.plotrrm: ax.plot( self.RRM.loc[ - Calib.CalibrationQ.index[0] : Calib.CalibrationQ.index[-1], + Calib.calibration_q.index[0] : Calib.calibration_q.index[-1], stationname, ], label="RRM", @@ -4918,7 +4946,7 @@ def plotRRMProgression(self, specificxs, start, end, *args, **kwargs): DESCRIPTION. The default is "green". latcolor : TYPE, optional DESCRIPTION. The default is (0.3,0,0). - linewidth : TYPE, optional + line_width : TYPE, optional DESCRIPTION. The default is 4. hmorder : TYPE, optional DESCRIPTION. The default is 6. @@ -4940,7 +4968,7 @@ def plotRRMProgression(self, specificxs, start, end, *args, **kwargs): default is False. ylabels: [int, bool] default is False. - figsize: [tuple] + fig_size: [tuple] default is (6, 5). rrmlinesytle: [int] default is 8 @@ -4978,17 +5006,17 @@ def plotRRMProgression(self, specificxs, start, end, *args, **kwargs): label="BC", zorder=self.ushorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.ushstyle), + linestyle=V.getLineStyle(self.ushstyle), color=self.ushcolor, ) # Laterals - if len(self.LateralsTable) > 0: + if len(self.laterals_table) > 0: ax.plot( Laterals.loc[start:end, 0], label="Laterals Sum \n up to - XS-" + str(specificxs), zorder=self.latorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.latstyle), + linestyle=V.getLineStyle(self.latstyle), color=self.latcolor, ) if self.plottotal: @@ -4999,18 +5027,18 @@ def plotRRMProgression(self, specificxs, start, end, *args, **kwargs): label="US/BC \n+ Laterals", zorder=self.totalorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.totalstyle), + linestyle=V.getLineStyle(self.totalstyle), color=self.totalcolor, ) # US hydrograph if self.usnode != [] and self.plotus: ax.plot( - self.USHydrographs.loc[start:end, "total"], + self.us_hydrographs.loc[start:end, "total"], label="US Hydrograph", zorder=self.ushorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.ushstyle), + linestyle=V.getLineStyle(self.ushstyle), color=self.ushcolor, ) @@ -5020,28 +5048,28 @@ def plotRRMProgression(self, specificxs, start, end, *args, **kwargs): self.read1DResult(xsid=specificxs) # plot the xs ax.plot( - self.XSHydrographs.loc[start:end, specificxs], + self.xs_hydrograph.loc[start:end, specificxs], label="RIM", zorder=self.hmorder, linewidth=self.linewidth, - linestyle=V.LineStyle(6), + linestyle=V.getLineStyle(6), color=self.hmcolor, ) # RRM # if plotrrm: - if hasattr(self, "routedRRM"): + if hasattr(self, "routed_rrm"): try: ax.plot( - self.RRMProgression.loc[start:end, specificxs], + self.rrm_progression.loc[start:end, specificxs], label="mHM", zorder=self.rrmorder, linewidth=self.linewidth, - linestyle=V.LineStyle(self.rrmlinesytle), + linestyle=V.getLineStyle(self.rrmlinesytle), color=self.rrmcolor, ) except KeyError: logger.debug( - " XS " + str(specificxs) + "does not exist in the 'routedRRM'" + " XS " + str(specificxs) + "does not exist in the 'routed_rrm'" ) else: msg = ( @@ -5136,18 +5164,18 @@ def calculateQMetrics( end = dt.datetime.strptime(end, fmt) # get the latest date of the filter date and the first date in the result # get the earliest date of the end and the last date in the result - st2 = max(GaugeStart, start, self.firstdayresults) - end2 = min(GaugeEnd, end, self.lastday) + st2 = max(GaugeStart, start, self.first_day_results) + end2 = min(GaugeEnd, end, self.last_day) # get the observed discharge Qobs = Calib.q_gauges.loc[st2:end2, stationname] # resample the times series to average daily ind = pd.date_range( - self.firstdayresults, self.lastday + dt.timedelta(days=1), freq="h" + self.first_day_results, self.last_day + dt.timedelta(days=1), freq="h" )[:-1] - Q = self.Result1D[self.Result1D["xs"] == self.lastxs] + Q = self.results_1d[self.results_1d["xs"] == self.last_xs] Q.index = ind QHM["q"] = Q["q"].resample("D").mean() QHM["q"] = QHM.loc[st2:end2, "q"] @@ -5156,26 +5184,26 @@ def calculateQMetrics( # sub.Resample(gaugexs, 'q', starti, endi, Delete=True) # except: # sub.Resample(gaugexs, 'q', starti, endi, Delete=False) - # q_hm['q'] = sub.ResampledQ[gaugexs][:] + # q_hm['q'] = sub.resampled_q[gaugexs][:] # q_hm.index = pd.date_range(st2, end2) else: - st2 = max(GaugeStart, self.firstdayresults) - end2 = min(GaugeEnd, self.lastday) + st2 = max(GaugeStart, self.first_day_results) + end2 = min(GaugeEnd, self.last_day) # get the observed discharge Qobs = Calib.q_gauges.loc[st2:end2, stationname] # resample the times series to average daily ind = pd.date_range( - self.firstdayresults, self.lastday + dt.timedelta(days=1), freq="h" + self.first_day_results, self.last_day + dt.timedelta(days=1), freq="h" )[:-1] - Q = self.Result1D[self.Result1D["xs"] == self.lastxs] + Q = self.results_1d[self.results_1d["xs"] == self.last_xs] Q.index = ind QHM["q"] = Q["q"].resample("D").mean() QHM["q"] = QHM.loc[st2:end2, "q"] # old - # q_hm['q'] = sub.Result1D['q'][sub.Result1D['xs'] == gaugexs][sub.Result1D['hour'] == 24][:] + # q_hm['q'] = sub.results_1d['q'][sub.results_1d['xs'] == gaugexs][sub.results_1d['hour'] == 24][:] # q_hm.index = pd.date_range(st2, end2) qsim = QHM.loc[st2:end2, "q"].tolist() rmse = round(Pf.RMSE(Qobs, qsim), 0) @@ -5205,7 +5233,7 @@ def plotWL( **kwargs, # gaugecolor: Union[tuple, str] = "#DC143C", # hmcolor: Union[tuple, str] = "#004c99", - # linewidth: Union[int, float] = 2, + # line_width: Union[int, float] = 2, # hmorder: int = 1, # gaugeorder: int = 0, # hmstyle: int = 6, @@ -5213,8 +5241,8 @@ def plotWL( # plotgauge=True, # fmt: str = "%Y-%m-%d", # legendsize: Union[int, float] = 15, - # figsize: tuple = (6, 5), - # nxlabels: int = 4, + # fig_size: tuple = (6, 5), + # xlabels_number: int = 4, ): """Plot water level surface. @@ -5239,7 +5267,7 @@ def plotWL( DESCRIPTION. The default is "#DC143C". hmcolor : TYPE, optional DESCRIPTION. The default is "#004c99". - linewidth : TYPE, optional + line_width : TYPE, optional DESCRIPTION. The default is 2. hmorder : TYPE, optional DESCRIPTION. The default is 1. @@ -5255,8 +5283,8 @@ def plotWL( default is 0. legendsize: [int, float] default is 15. - figsize: tuple=(6, 5), - nxlabels: [int] + fig_size: tuple=(6, 5), + xlabels_number: [int] default is 4. Returns @@ -5298,22 +5326,22 @@ def plotWL( self.extractXS(gaugexs) ax.plot( - self.XSWaterLevel.loc[start:end, gaugexs], + self.xs_water_level.loc[start:end, gaugexs], label="RIM", zorder=self.hmorder, linewidth=self.linewidth, color=self.hmcolor, - linestyle=V.LineStyle(self.hmstyle), + linestyle=V.getLineStyle(self.hmstyle), ) if self.plotgauge: ax.plot( - Calib.WLGauges.loc[start:end, stationname], + Calib.wl_gauges.loc[start:end, stationname], label="Gauge", zorder=self.gaugeorder, linewidth=self.linewidth, color=self.gaugecolor, - linestyle=V.LineStyle(self.gaugestyle), + linestyle=V.getLineStyle(self.gaugestyle), ) start, end = ax.get_xlim() @@ -5363,16 +5391,16 @@ def calculateWLMetrics( if isinstance(end, str): end = dt.datetime.strptime(end, fmt) - st2 = max(GaugeStart, start, self.firstdayresults) - end2 = min(GaugeEnd, end, self.lastday) + st2 = max(GaugeStart, start, self.first_day_results) + end2 = min(GaugeEnd, end, self.last_day) # observed - obs = np.array(Calib.WLGauges.loc[st2:end2, stationname]) + obs = np.array(Calib.wl_gauges.loc[st2:end2, stationname]) # RIM ind = pd.date_range( - self.firstdayresults, self.lastday + dt.timedelta(days=1), freq="h" + self.first_day_results, self.last_day + dt.timedelta(days=1), freq="h" )[:-1] - mod = self.Result1D[self.Result1D["xs"] == self.lastxs] + mod = self.results_1d[self.results_1d["xs"] == self.last_xs] mod.index = ind mod = mod["wl"].resample("D").mean() mod = mod.loc[st2:end2] @@ -5384,18 +5412,18 @@ def calculateWLMetrics( # except: # sub.Resample(gaugexs, 'wl', River.DateToIndex(st2), # River.DateToIndex(end2), Delete = False) - # series1 = np.array(sub.ResampledWL[gaugexs]) + # series1 = np.array(sub.resampled_wl[gaugexs]) else: - st2 = max(GaugeStart, self.firstdayresults) - end2 = min(GaugeEnd, self.lastday) + st2 = max(GaugeStart, self.first_day_results) + end2 = min(GaugeEnd, self.last_day) # Observed - obs = np.array(Calib.WLGauges.loc[st2:end2, stationname]) + obs = np.array(Calib.wl_gauges.loc[st2:end2, stationname]) # RIM ind = pd.date_range( - self.firstdayresults, self.lastday + dt.timedelta(days=1), freq="h" + self.first_day_results, self.last_day + dt.timedelta(days=1), freq="h" )[:-1] - mod = self.Result1D[self.Result1D["xs"] == gaugexs] + mod = self.results_1d[self.results_1d["xs"] == gaugexs] mod.index = ind mod = mod["wl"].resample("D").mean() mod = mod.loc[st2:end2] @@ -5403,7 +5431,7 @@ def calculateWLMetrics( # RIM # sub.Resample(gaugexs, 'wl', River.DateToIndex(st2), # River.DateToIndex(end2), Delete = False) - # series1 = np.array(sub.ResampledWL[gaugexs]) + # series1 = np.array(sub.resampled_wl[gaugexs]) if len(obs) != len(mod) or len(mod) == 0: logger.debug( @@ -5453,10 +5481,10 @@ def histogram( :param filter1: [real] execlude lower values than filter1 :param filter2: [real] execlude values higher than filter2 :return: - ExtractedValues [list] list of extracted values + extracted_values [list] list of extracted values """ - # check if the object has the attribute ExtractedValues - if hasattr(self, "ExtractedValues"): + # check if the object has the attribute extracted_values + if hasattr(self, "extracted_values"): # depth map if Map == 1: path = self.twodresultpath + self.depthprefix + str(Day) + ".zip" @@ -5517,9 +5545,9 @@ def plotBC(self, date: str, fmt: str = "%Y-%m-%d"): fig, ax1 = plt.subplots() ax2 = ax1.twinx() - ax1.plot(self.HBCmin.loc[date]) + ax1.plot(self.h_bc_1min.loc[date]) ax1.set_xlabel("Date", fontsize=15) ax1.set_ylabel("H", fontsize=15) ax1.set_xlim(0, 1440) - ax2.plot(self.QBCmin.loc[date]) + ax2.plot(self.q_bc_1min.loc[date]) ax2.set_ylabel("Q", fontsize=15) diff --git a/Hapi/hm/saintvenant.py b/Hapi/hm/saintvenant.py index 754949a7..d423f820 100644 --- a/Hapi/hm/saintvenant.py +++ b/Hapi/hm/saintvenant.py @@ -24,6 +24,7 @@ def __init__(self, maxiteration=10, beta=1, epsi=0.5, theta=0.5): pass def kinematicraster(self, Model): + """Kinematic wave method Raster.""" beta = 3 / 5 dx = Model.CellSize @@ -146,8 +147,8 @@ def kinematic1d(Model, usbc): """ nt = len(usbc) # int(24*60*60/Model.dt) - Model.q = np.zeros(shape=(nt, len(Model.crosssections))) - Model.h = np.zeros(shape=(nt, len(Model.crosssections))) + Model.q = np.zeros(shape=(nt, len(Model.cross_sections))) + Model.h = np.zeros(shape=(nt, len(Model.cross_sections))) beta = 3 / 5 dtx = Model.dt / Model.dx @@ -156,14 +157,14 @@ def kinematic1d(Model, usbc): Model.q[: len(Model.usbc), 0] = usbc.loc[:, "q"] for t in range(1, len(Model.q)): - for x in range(1, len(Model.crosssections)): - # p = Model.crosssections.loc[x,'b'] + 2 * Model.crosssections.loc[x,'depth'] - p = Model.crosssections.loc[x, "b"] - n = Model.crosssections.loc[x, "n"] + for x in range(1, len(Model.cross_sections)): + # p = Model.cross_sections.loc[x,'b'] + 2 * Model.cross_sections.loc[x,'depth'] + p = Model.cross_sections.loc[x, "b"] + n = Model.cross_sections.loc[x, "n"] alpha1 = n * pow(p, 2 / 3) s = ( - Model.crosssections.loc[x - 1, "bed level"] - - Model.crosssections.loc[x, "bed level"] + Model.cross_sections.loc[x - 1, "bed level"] + - Model.cross_sections.loc[x, "bed level"] ) / Model.dx alpha = pow(alpha1 / pow(s, 0.5), 0.6) @@ -187,14 +188,15 @@ def kinematic1d(Model, usbc): # take the calculated water depth for the first time step the same as the second time step Model.h[0, :] = Model.h[1, :] Model.h[:, 0] = Model.h[:, 1] - Model.wl = Model.h + Model.crosssections["bed level"].values + Model.wl = Model.h + Model.cross_sections["bed level"].values @staticmethod def storagecell(Model, usbc): + """Storage Cell Model.""" nt = len(usbc) - Model.q = np.zeros(shape=(nt, len(Model.crosssections))) - Model.h = np.zeros(shape=(nt, len(Model.crosssections))) + Model.q = np.zeros(shape=(nt, len(Model.cross_sections))) + Model.h = np.zeros(shape=(nt, len(Model.cross_sections))) # calculate area and perimeter of all xssbased on the xs = np.zeros(shape=(Model.xsno, 2)) Model.h[0, :] = 0.1 @@ -207,41 +209,41 @@ def storagecell(Model, usbc): for t in range(1, len(Model.q)): - for x in range(len(Model.crosssections)): + for x in range(len(Model.cross_sections)): # area - xs[x, 1] = Model.h[t, x] * Model.crosssections.loc[x, "b"] + xs[x, 1] = Model.h[t, x] * Model.cross_sections.loc[x, "b"] # perimeter - xs[x, 2] = Model.crosssections.loc[x, "b"] + xs[x, 2] = Model.cross_sections.loc[x, "b"] for x in range(1, len(Model.xsno)): if x < Model.xsno - 1: # friction slope = (so - dhx/dx] - diffusive wave sf = ( Model.h[t, x] - + Model.crosssections.loc[x, "bed level"] + + Model.cross_sections.loc[x, "bed level"] - Model.h[t, x + 1] - - Model.crosssections.loc[x + 1, "bed level"] + - Model.cross_sections.loc[x + 1, "bed level"] ) / Model.dx else: # for LOWER BOUNDARY node sf = ( Model.h[t, x - 1] - + Model.crosssections.loc[x - 1, "bed level"] + + Model.cross_sections.loc[x - 1, "bed level"] - Model.h[t, x] - - Model.crosssections.loc[x, "bed level"] + - Model.cross_sections.loc[x, "bed level"] ) / Model.dx if x < Model.xsno - 1: Area = (xs[x, 1] + xs[x + 1, 2]) / 2 R = (xs[x, 1] / xs[x, 2] + xs[x + 1, 1] / xs[x + 1, 2]) / 2 - # p = Model.crosssections.loc[x,'b'] + 2 * Model.crosssections.loc[x,'depth'] - p = Model.crosssections.loc[x, "b"] - n = Model.crosssections.loc[x, "n"] + # p = Model.cross_sections.loc[x,'b'] + 2 * Model.cross_sections.loc[x,'depth'] + p = Model.cross_sections.loc[x, "b"] + n = Model.cross_sections.loc[x, "n"] alpha1 = n * pow(p, 2 / 3) s = ( - Model.crosssections.loc[x - 1, "bed level"] - - Model.crosssections.loc[x, "bed level"] + Model.cross_sections.loc[x - 1, "bed level"] + - Model.cross_sections.loc[x, "bed level"] ) / Model.dx alpha = pow(alpha1 / pow(s, 0.5), 0.6) @@ -265,9 +267,24 @@ def storagecell(Model, usbc): # take the calculated water depth for the first time step the same as the second time step Model.h[0, :] = Model.h[1, :] Model.h[:, 0] = Model.h[:, 1] - Model.wl = Model.h + Model.crosssections["bed level"].values + Model.wl = Model.h + Model.cross_sections["bed level"].values def GVF00(self, Sub, River, Hbnd, dt, dx, inih, storewl, MinQ): + """GVF00. + + Gradually Varied flow + + Parameters + ---------- + Sub + River + Hbnd + dt + dx + inih + storewl + MinQ + """ xs = np.zeros(shape=(Sub.xsno, 8)) Diff_coeff = np.zeros(shape=(Sub.xsno, 3)) @@ -619,8 +636,8 @@ def GVF00(self, Sub, River, Hbnd, dt, dx, inih, storewl, MinQ): for x in range(1, (Sub.xsno) - 2): # check if the XS has laterals # FindInArrayF(real[xsid[x]],sub_XSLaterals,loc) - if Sub.xsid[x] in Sub.LateralsTable: - loc = Sub.LateralsTable.index(Sub.xsid[x]) + if Sub.xsid[x] in Sub.laterals_table: + loc = Sub.laterals_table.index(Sub.xsid[x]) # have only the laterals qlat = Lateral_q[t, loc] # print(*,*] "Lat= ", qlat @@ -644,7 +661,7 @@ def GVF00(self, Sub, River, Hbnd, dt, dx, inih, storewl, MinQ): bankful_area = Sub.Dbf[x] * Sub.mw[x] # if the new area is less than the area of the min[hl,hr] - if XSarea[x] <= Sub.AreaPerLow[x, 0]: + if XSarea[x] <= Sub.area_per_Low[x, 0]: # if the area is less than the bankful area if XSarea[x] <= bankful_area: hx[x] = XSarea[x] / Sub.mw[x] @@ -659,7 +676,7 @@ def GVF00(self, Sub, River, Hbnd, dt, dx, inih, storewl, MinQ): hx[x] = dummyh + Sub.Dbf[x] # if the new area is less than the area of the max[hl,hr] - elif XSarea[x] <= Sub.AreaPerHigh[x, 1]: + elif XSarea[x] <= Sub.area_per_high[x, 1]: dummyarea = XSarea[x] - bankful_area # check which dike is higher if Sub.hl[x] < Sub.hr[x]: @@ -734,10 +751,12 @@ def GVF00(self, Sub, River, Hbnd, dt, dx, inih, storewl, MinQ): @staticmethod def QuadraticEqn(a, b, c): + """QuadraticEqn.""" delta = (b**2) - 4 * a * c return (-b + np.sqrt(delta)) / (2 * a) def preissmann(self, Model): + """Preissmann scheme.""" # calculating no of discritization points theta = self.theta epsi = self.epsi @@ -745,7 +764,7 @@ def preissmann(self, Model): dx = Model.dx dt = Model.dt - X = len(Model.crosssections) # round(Model.L / dx) + 1 + X = len(Model.cross_sections) # round(Model.L / dx) + 1 T = round(Model.Time / dt) + 1 q = np.zeros(shape=(T, X), dtype=np.float32) @@ -778,9 +797,9 @@ def preissmann(self, Model): # Unknowns = np.zeros(2 * X, dtype=np.float32) cols = 0 - b = Model.crosssections.loc[:, "b"] - n = Model.crosssections.loc[:, "n"] - s = Model.crosssections.loc[:, "bed level"] + b = Model.cross_sections.loc[:, "b"] + n = Model.cross_sections.loc[:, "n"] + s = Model.cross_sections.loc[:, "bed level"] s = s[:-1].values - s[1:].values s = s / dx s = np.append(s, s[-1]) @@ -945,7 +964,7 @@ def preissmann(self, Model): # end # end of computation for all time steps Model.q = q[:, :] Model.h = h[:, :] - Model.wl = Model.h + Model.crosssections["bed level"].values + Model.wl = Model.h + Model.cross_sections["bed level"].values area = h * b.values v = q / area diff --git a/Hapi/plot/__init__.py b/Hapi/plot/__init__.py index 59dedd85..b7fbe0fb 100644 --- a/Hapi/plot/__init__.py +++ b/Hapi/plot/__init__.py @@ -1,3 +1,4 @@ +"""Plotting module.""" # import Hapi.plot.visualizer as visualizer if __name__ == "__main__": diff --git a/Hapi/plot/visualizer.py b/Hapi/plot/visualizer.py index 04969ecd..6dbc13aa 100644 --- a/Hapi/plot/visualizer.py +++ b/Hapi/plot/visualizer.py @@ -7,7 +7,8 @@ import datetime as dt import math import os -from collections import OrderedDict + +# from collections import OrderedDict from typing import List, Optional, Union import matplotlib as mpl @@ -15,6 +16,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd +from cleopatra.array import Array from matplotlib import animation, gridspec from matplotlib.animation import FuncAnimation from scipy.stats import gumbel_r @@ -41,62 +43,29 @@ class Visualize: xlabel="", legend="", legend_size=10, - figsize=(10, 8), - labelsize=10, - fontsize=10, + fig_size=(10, 8), + label_size=10, + font_size=10, name="hist.tif", color1="#3D59AB", color2="#DC143C", - linewidth=3, - Axisfontsize=15, - ) - - linestyles = OrderedDict( - [ - ("solid", (0, ())), # 0 - ("loosely dotted", (0, (1, 10))), # 1 - ("dotted", (0, (1, 5))), # 2 - ("densely dotted", (0, (1, 1))), # 3 - ("loosely dashed", (0, (5, 10))), # 4 - ("dashed", (0, (5, 5))), # 5 - ("densely dashed", (0, (5, 1))), # 6 - ("loosely dashdotted", (0, (3, 10, 1, 10))), # 7 - ("dashdotted", (0, (3, 5, 1, 5))), # 8 - ("densely dashdotted", (0, (3, 1, 1, 1))), # 9 - ("loosely dashdotdotted", (0, (3, 10, 1, 10, 1, 10))), # 10 - ("dashdotdotted", (0, (3, 5, 1, 5, 1, 5))), # 11 - ("densely dashdotdotted", (0, (3, 1, 1, 1, 1, 1))), # 12 - ("densely dashdotdottededited", (0, (6, 1, 1, 1, 1, 1))), # 13 - ] + line_width=3, + axis_font_size=15, ) - MarkerStyleList = [ - "--o", - ":D", - "-.H", - "--x", - ":v", - "--|", - "-+", - "-^", - "--s", - "-.*", - "-.h", - ] - def __init__(self, resolution: str = "Hourly"): self.resolution = resolution self.Anim = None @staticmethod - def LineStyle(Style: Union[str, int] = "loosely dotted"): + def getLineStyle(style: Union[str, int] = "loosely dotted"): """LineStyle. Line styles for plotting Parameters ---------- - Style : TYPE, optional + style : TYPE, optional DESCRIPTION. The default is 'loosely dotted'. Returns @@ -104,28 +73,17 @@ def LineStyle(Style: Union[str, int] = "loosely dotted"): TYPE DESCRIPTION. """ - if isinstance(Style, str): - try: - return Visualize.linestyles[Style] - except KeyError: - msg = ( - " The Style name you entered-{0}-does not exist please" - "choose from the available styles" - ).format(Style) - print(msg) - print(list(Visualize.linestyles)) - else: - return list(Visualize.linestyles.items())[Style][1] + return Array.getLineStyle(style) @staticmethod - def MarkerStyle(Style: int): + def getMarkerStyle(style: int): """MarkerStyle. Marker styles for plotting Parameters ---------- - Style : TYPE + style : TYPE DESCRIPTION. Returns @@ -133,21 +91,19 @@ def MarkerStyle(Style: int): TYPE DESCRIPTION. """ - if Style > len(Visualize.MarkerStyleList) - 1: - Style = Style % len(Visualize.MarkerStyleList) - return Visualize.MarkerStyleList[Style] + return Array.getMarkerStyle(style) - def GroundSurface( + def plotGroundSurface( self, Sub, - fromxs: Optional[int] = None, - toxs: Optional[int] = None, + from_xs: Optional[int] = None, + to_xs: Optional[int] = None, floodplain: bool = False, - plotlateral: bool = False, - nxlabels: int = 10, - figsize: tuple = (20, 10), - LateralsColor: Union[str, tuple] = "red", - LaterlasLineWidth: int = 1, + plot_lateral: bool = False, + xlabels_number: int = 10, + fig_size: tuple = (20, 10), + laterals_color: Union[str, tuple] = "red", + laterals_line_width: int = 1, option: int = 1, size: int = 50, ): @@ -157,21 +113,21 @@ def GroundSurface( ---------- Sub : TYPE DESCRIPTION. - fromxs : TYPE, optional + from_xs : TYPE, optional DESCRIPTION. The default is ''. - toxs : TYPE, optional + to_xs : TYPE, optional DESCRIPTION. The default is ''. floodplain : TYPE, optional DESCRIPTION. The default is False. - plotlateral : TYPE, optional + plot_lateral : TYPE, optional DESCRIPTION. The default is False. - nxlabels: [int] + xlabels_number: [int] Default is 10 - figsize: [tuple] + fig_size: [tuple] Default is (20, 10) - LateralsColor: [str, tuple] + laterals_color: [str, tuple] Defaut is "red", - LaterlasLineWidth: [int] + laterals_line_width: [int] Default is 1. option: [int] Default is 1 @@ -182,73 +138,77 @@ def GroundSurface( ------- None. """ - GroundSurfacefig = plt.figure(70, figsize=figsize) + GroundSurfacefig = plt.figure(70, figsize=fig_size) gs = gridspec.GridSpec(nrows=2, ncols=6, figure=GroundSurfacefig) axGS = GroundSurfacefig.add_subplot(gs[0:2, 0:6]) - if not fromxs: - fromxs = Sub.xsname[0] + if not from_xs: + from_xs = Sub.xs_names[0] - if not toxs: - toxs = Sub.xsname[-1] + if not to_xs: + to_xs = Sub.xs_names[-1] # not the whole sub-basin - axGS.set_xticks(list(range(fromxs, toxs))) - axGS.set_xticklabels(list(range(fromxs, toxs))) + axGS.set_xticks(list(range(from_xs, to_xs))) + axGS.set_xticklabels(list(range(from_xs, to_xs))) - axGS.set_xlim(fromxs - 1, toxs + 1) + axGS.set_xlim(from_xs - 1, to_xs + 1) axGS.tick_params(labelsize=8) # plot dikes axGS.plot( - Sub.xsname, - Sub.crosssections["zl"], + Sub.xs_names, + Sub.cross_sections["zl"], "k--", dashes=(5, 1), linewidth=2, label="Left Dike", ) axGS.plot( - Sub.xsname, Sub.crosssections["zr"], "k.-", linewidth=2, label="Right Dike" + Sub.xs_names, + Sub.cross_sections["zr"], + "k.-", + linewidth=2, + label="Right Dike", ) if floodplain: fpl = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hl"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hl"] ) fpr = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hr"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hr"] ) - axGS.plot(Sub.xsname, fpl, "b-.", linewidth=2, label="Floodplain left") - axGS.plot(Sub.xsname, fpr, "r-.", linewidth=2, label="Floodplain right") + axGS.plot(Sub.xs_names, fpl, "b-.", linewidth=2, label="Floodplain left") + axGS.plot(Sub.xs_names, fpr, "r-.", linewidth=2, label="Floodplain right") - if plotlateral: - if isinstance(Sub.LateralsTable, list) and len(Sub.LateralsTable) > 0: + if plot_lateral: + if isinstance(Sub.laterals_table, list) and len(Sub.laterals_table) > 0: if option == 1: # plot location of laterals - for i in range(len(Sub.LateralsTable)): + for i in range(len(Sub.laterals_table)): axGS.vlines( - Sub.LateralsTable[i], + Sub.laterals_table[i], 0, - int(Sub.Result1D["q"].max()), - colors=LateralsColor, + int(Sub.results_1d["q"].max()), + colors=laterals_color, linestyles="dashed", - linewidth=LaterlasLineWidth, + linewidth=laterals_line_width, ) else: lat = pd.DataFrame() - lat["xsid"] = Sub.LateralsTable - lat = lat.merge(Sub.crosssections, on="xsid", how="left") + lat["xsid"] = Sub.laterals_table + lat = lat.merge(Sub.cross_sections, on="xsid", how="left") axGS.scatter( - Sub.LateralsTable, + Sub.laterals_table, lat["gl"].tolist(), - c=LateralsColor, - linewidth=LaterlasLineWidth, + c=laterals_color, + linewidth=laterals_line_width, zorder=10, s=size, ) @@ -256,47 +216,49 @@ def GroundSurface( print(" Please Read the Laterals data") maxelevel1 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zr"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zr"][ + Sub.cross_sections["xsid"] <= to_xs ] ) maxelevel2 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zl"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zl"][ + Sub.cross_sections["xsid"] <= to_xs ] ) maxlelv = max(maxelevel1, maxelevel2) - minlev = Sub.crosssections.loc[Sub.crosssections["xsid"] == toxs, "gl"].values + minlev = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == to_xs, "gl" + ].values axGS.set_ylim(minlev - 5, maxlelv + 5) # plot the bedlevel/baklevel if Sub.version == 1: axGS.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Bankful level", ) else: axGS.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Ground level", ) axGS.plot( - Sub.xsname, - Sub.crosssections["gl"] + Sub.crosssections["dbf"], + Sub.xs_names, + Sub.cross_sections["gl"] + Sub.cross_sections["dbf"], "k", linewidth=2, label="Bankful depth", ) - if nxlabels != "": + if xlabels_number != "": start, end = axGS.get_xlim() - label_list = [int(i) for i in np.linspace(start, end, nxlabels)] + label_list = [int(i) for i in np.linspace(start, end, xlabels_number)] axGS.xaxis.set_ticks(label_list) title = "Water surface Profile Simulation Subid = {0}".format(Sub.id) @@ -313,18 +275,18 @@ def WaterSurfaceProfile( start: Union[str, dt.datetime], end: Union[str, dt.datetime], fps: int = 100, - fromxs: Optional[int] = None, - toxs: Optional[int] = None, + from_xs: Optional[int] = None, + to_xs: Optional[int] = None, fmt: str = "%Y-%m-%d", - figsize: tuple = (20, 10), - textlocation: tuple = (1, 1), - LateralsColor: Union[int, str] = "#3D59AB", - LaterlasLineWidth: int = 1, - xaxislabelsize: int = 10, - yaxislabelsize: int = 10, - nxlabels: int = 10, - xticklabelsize: int = 8, - Lastsegment: bool = True, + fig_size: tuple = (20, 10), + text_location: tuple = (1, 1), + laterals_color: Union[int, str] = "#3D59AB", + laterals_line_width: int = 1, + x_axis_label_size: int = 10, + y_axis_label_size: int = 10, + xlabels_number: int = 10, + x_tick_label_size: int = 8, + last_river_reach: bool = True, floodplain: bool = True, repeat: bool = True, ) -> FuncAnimation: @@ -343,31 +305,31 @@ def WaterSurfaceProfile( fps : [integer], optional It is an optional integer value that represents the delay between each frame in milliseconds. Its default is 100. - fromxs : [integer], optional + from_xs : [integer], optional number of cross sections to be displayed before the chosen cross section . The default is 10. - toxs : [integer], optional + to_xs : [integer], optional number of cross sections to be displayed after the chosen cross section . The default is 10. - xticklabelsize: [] + x_tick_label_size: [] - nxlabels:[] + xlabels_number:[] - yaxislabelsize: [] + y_axis_label_size: [] - LaterlasLineWidth: [] + laterals_line_width: [] - xaxislabelsize:[] + x_axis_label_size:[] - LateralsColor: [] + laterals_color: [] - textlocation: [] + text_location: [] fmt: [] - figsize: [] + fig_size: [] - Lastsegment: [bool] + last_river_reach: [bool] Default is True. floodplain: [bool] Default is True. @@ -381,15 +343,15 @@ def WaterSurfaceProfile( end = dt.datetime.strptime(end, fmt) msg = """The start date does not exist in the results, Results are between {0} and {1}""".format( - Sub.firstday, Sub.lastday + Sub.first_day, Sub.last_day ) - assert start in Sub.referenceindex_results, msg + assert start in Sub.reference_index_results, msg msg = """ The end date does not exist in the results, Results are between {0} and {1}""".format( - Sub.firstday, Sub.lastday + Sub.first_day, Sub.last_day ) - assert end in Sub.referenceindex_results, msg + assert end in Sub.reference_index_results, msg msg = """please read the boundary condition files using the 'ReadBoundaryConditions' method """ @@ -400,9 +362,9 @@ def WaterSurfaceProfile( assert start < end, msg if Sub.from_beginning == 1: - Period = Sub.daylist[ - np.where(Sub.referenceindex == start)[0][0] : np.where( - Sub.referenceindex == end + Period = Sub.days_list[ + np.where(Sub.reference_index == start)[0][0] : np.where( + Sub.reference_index == end )[0][0] + 1 ] @@ -413,61 +375,61 @@ def WaterSurfaceProfile( counter = [(i, j) for i in Period for j in hours] - fig = plt.figure(60, figsize=figsize) + fig = plt.figure(60, figsize=fig_size) gs = gridspec.GridSpec(nrows=2, ncols=6, figure=fig) ax1 = fig.add_subplot(gs[0, 2:6]) - ax1.set_ylim(0, int(Sub.Result1D["q"].max())) + ax1.set_ylim(0, int(Sub.results_1d["q"].max())) - if not fromxs: + if not from_xs: # xs = 0 # plot the whole sub-basin - fromxs = Sub.xsname[0] + from_xs = Sub.xs_names[0] else: # xs = 1 # not the whole sub-basin - if fromxs < Sub.xsname[0]: - fromxs = Sub.xsname[0] + if from_xs < Sub.xs_names[0]: + from_xs = Sub.xs_names[0] - if not toxs: - toxs = Sub.xsname[-1] + if not to_xs: + to_xs = Sub.xs_names[-1] else: - if toxs > Sub.xsname[-1]: - toxs = Sub.xsname[-1] + if to_xs > Sub.xs_names[-1]: + to_xs = Sub.xs_names[-1] - ax1.set_xlim(fromxs - 1, toxs + 1) - ax1.set_xticks(list(range(fromxs, toxs + 1))) - ax1.set_xticklabels(list(range(fromxs, toxs + 1))) + ax1.set_xlim(from_xs - 1, to_xs + 1) + ax1.set_xticks(list(range(from_xs, to_xs + 1))) + ax1.set_xticklabels(list(range(from_xs, to_xs + 1))) - ax1.tick_params(labelsize=xticklabelsize) - ax1.locator_params(axis="x", nbins=nxlabels) + ax1.tick_params(labelsize=x_tick_label_size) + ax1.locator_params(axis="x", nbins=xlabels_number) - ax1.set_xlabel("Cross section No", fontsize=xaxislabelsize) - ax1.set_ylabel("Discharge (m3/s)", fontsize=yaxislabelsize, labelpad=0.3) + ax1.set_xlabel("Cross section No", fontsize=x_axis_label_size) + ax1.set_ylabel("Discharge (m3/s)", fontsize=y_axis_label_size, labelpad=0.3) ax1.set_title("Reach-Basin" + " " + str(Sub.id), fontsize=15) ax1.legend(["Discharge"], fontsize=15) # plot location of laterals - for i in range(len(Sub.LateralsTable)): + for i in range(len(Sub.laterals_table)): ax1.vlines( - Sub.LateralsTable[i], + Sub.laterals_table[i], 0, - int(Sub.Result1D["q"].max()), - colors=LateralsColor, + int(Sub.results_1d["q"].max()), + colors=laterals_color, linestyles="dashed", - linewidth=LaterlasLineWidth, + linewidth=laterals_line_width, ) lat = pd.DataFrame() - lat["xsid"] = Sub.LateralsTable - lat = lat.merge(Sub.crosssections, on="xsid", how="left") + lat["xsid"] = Sub.laterals_table + lat = lat.merge(Sub.cross_sections, on="xsid", how="left") lim = ax1.get_ylim() - y = np.ones(len(Sub.LateralsTable), dtype=int) * (lim[1] - 50) + y = np.ones(len(Sub.laterals_table), dtype=int) * (lim[1] - 50) lat = ax1.scatter( - Sub.LateralsTable, + Sub.laterals_table, y, - c=LateralsColor, - linewidth=LaterlasLineWidth, + c=laterals_color, + linewidth=laterals_line_width, zorder=10, s=50, ) @@ -481,8 +443,8 @@ def WaterSurfaceProfile( ax2.set_xlim(1, 25) ax2.set_ylim(0, int(Sub.QBC.max().max()) + 1) - ax2.set_xlabel("Time", fontsize=yaxislabelsize) - ax2.set_ylabel("Q (m3/s)", fontsize=yaxislabelsize, labelpad=0.1) + ax2.set_xlabel("Time", fontsize=y_axis_label_size) + ax2.set_ylabel("Q (m3/s)", fontsize=y_axis_label_size, labelpad=0.1) ax2.set_title("BC - Q", fontsize=20) ax2.legend(["Q"], fontsize=15) @@ -495,8 +457,8 @@ def WaterSurfaceProfile( ax3.set_xlim(1, 25) ax3.set_ylim(float(Sub.HBC.min().min()), float(Sub.HBC.max().max())) - ax3.set_xlabel("Time", fontsize=yaxislabelsize) - ax3.set_ylabel("water level", fontsize=yaxislabelsize, labelpad=0.5) + ax3.set_xlabel("Time", fontsize=y_axis_label_size) + ax3.set_ylabel("water level", fontsize=y_axis_label_size, labelpad=0.5) ax3.set_title("BC - H", fontsize=20) ax3.legend(["WL"], fontsize=10) @@ -508,56 +470,62 @@ def WaterSurfaceProfile( ax4 = fig.add_subplot(gs[1, 0:6]) ymax1 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zr"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zr"][ + Sub.cross_sections["xsid"] <= to_xs ] ) ymax2 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zl"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zl"][ + Sub.cross_sections["xsid"] <= to_xs ] ) ymax = max(ymax1, ymax2) - minlev = Sub.crosssections.loc[Sub.crosssections["xsid"] == toxs, "gl"].values + minlev = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == to_xs, "gl" + ].values ax4.set_ylim(minlev - 5, ymax + 5) - ax4.set_xlim(fromxs - 1, toxs + 1) - ax4.set_xticks(list(range(fromxs, toxs + 1))) - ax4.set_xticklabels(list(range(fromxs, toxs + 1))) + ax4.set_xlim(from_xs - 1, to_xs + 1) + ax4.set_xticks(list(range(from_xs, to_xs + 1))) + ax4.set_xticklabels(list(range(from_xs, to_xs + 1))) - ax4.tick_params(labelsize=xticklabelsize) - ax4.locator_params(axis="x", nbins=nxlabels) + ax4.tick_params(labelsize=x_tick_label_size) + ax4.locator_params(axis="x", nbins=xlabels_number) ax4.plot( - Sub.xsname, - Sub.crosssections["zl"], + Sub.xs_names, + Sub.cross_sections["zl"], "k--", dashes=(5, 1), linewidth=2, label="Left Dike", ) ax4.plot( - Sub.xsname, Sub.crosssections["zr"], "k.-", linewidth=2, label="Right Dike" + Sub.xs_names, + Sub.cross_sections["zr"], + "k.-", + linewidth=2, + label="Right Dike", ) if Sub.version == 1: ax4.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Bankful level", ) else: ax4.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Ground level", ) ax4.plot( - Sub.xsname, - Sub.crosssections["gl"] + Sub.crosssections["dbf"], + Sub.xs_names, + Sub.cross_sections["gl"] + Sub.cross_sections["dbf"], "k", linewidth=2, label="Bankful depth", @@ -565,44 +533,44 @@ def WaterSurfaceProfile( if floodplain: fpl = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hl"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hl"] ) fpr = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hr"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hr"] ) - ax4.plot(Sub.xsname, fpl, "b-.", linewidth=2, label="Floodplain left") - ax4.plot(Sub.xsname, fpr, "r-.", linewidth=2, label="Floodplain right") + ax4.plot(Sub.xs_names, fpl, "b-.", linewidth=2, label="Floodplain left") + ax4.plot(Sub.xs_names, fpr, "r-.", linewidth=2, label="Floodplain right") ax4.set_title("Water surface Profile Simulation", fontsize=15) ax4.legend(fontsize=15) - ax4.set_xlabel("Profile", fontsize=yaxislabelsize) - ax4.set_ylabel("Elevation m", fontsize=yaxislabelsize) + ax4.set_xlabel("Profile", fontsize=y_axis_label_size) + ax4.set_ylabel("Elevation m", fontsize=y_axis_label_size) ax4.grid() # plot location of laterals - for i in range(len(Sub.LateralsTable)): - ymin = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == Sub.LateralsTable[i], "gl" + for i in range(len(Sub.laterals_table)): + ymin = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == Sub.laterals_table[i], "gl" ].values[0] ax4.vlines( - Sub.LateralsTable[i], + Sub.laterals_table[i], ymin, ymax, - colors=LateralsColor, + colors=laterals_color, linestyles="dashed", - linewidth=LaterlasLineWidth, + linewidth=laterals_line_width, ) day_text = ax4.annotate( "", xy=( - fromxs + textlocation[0], - Sub.crosssections.loc[Sub.crosssections["xsid"] == toxs, "gl"].values - + textlocation[1], + from_xs + text_location[0], + Sub.cross_sections.loc[Sub.cross_sections["xsid"] == to_xs, "gl"].values + + text_location[1], ), fontsize=20, ) @@ -640,66 +608,66 @@ def init_q(): # animation function. this is called sequentially def animate_q(i): - x = Sub.xsname - y = Sub.Result1D.loc[Sub.Result1D["day"] == counter[i][0], "q"][ - Sub.Result1D["hour"] == counter[i][1] + x = Sub.xs_names + y = Sub.results_1d.loc[Sub.results_1d["day"] == counter[i][0], "q"][ + Sub.results_1d["hour"] == counter[i][1] ] # the Saintvenant subroutine writes the # results of the last xs in the next segment with the current # segment - if not Lastsegment: + if not last_river_reach: y = y.values[:-1] q_line.set_data(x, y) - day = Sub.referenceindex.loc[counter[i][0], "date"] + day = Sub.reference_index.loc[counter[i][0], "date"] - if len(Sub.LateralsTable) > 0: + if len(Sub.laterals_table) > 0: lat.set_sizes( - sizes=Sub.Laterals.loc[day, Sub.LateralsTable].values * 100 + sizes=Sub.Laterals.loc[day, Sub.laterals_table].values * 100 ) day_text.set_text("day = " + str(day + dt.timedelta(hours=counter[i][1]))) - y = Sub.Result1D.loc[Sub.Result1D["day"] == counter[i][0], "wl"][ - Sub.Result1D["hour"] == counter[i][1] + y = Sub.results_1d.loc[Sub.results_1d["day"] == counter[i][0], "wl"][ + Sub.results_1d["hour"] == counter[i][1] ] # the Saintvenant subroutine writes the results # of the last xs in the next segment with the current segment - if not Lastsegment: + if not last_river_reach: y = y.values[:-1] wl_line.set_data(x, y) y = ( - Sub.Result1D.loc[Sub.Result1D["day"] == counter[i][0], "h"][ - Sub.Result1D["hour"] == counter[i][1] + Sub.results_1d.loc[Sub.results_1d["day"] == counter[i][0], "h"][ + Sub.results_1d["hour"] == counter[i][1] ] * 2 ) # temporary as now the Saintvenant subroutine writes the results # of the last xs in the next segment with the current segment - if not Lastsegment: + if not last_river_reach: y = y.values[:-1] y = ( y - + Sub.crosssections.loc[ - Sub.crosssections.index[len(Sub.xsname) - 1], "gl" + + Sub.cross_sections.loc[ + Sub.cross_sections.index[len(Sub.xs_names) - 1], "gl" ] ) hLline.set_data(x, y) x = Sub.QBC.columns.values - y = Sub.QBC.loc[Sub.referenceindex.loc[counter[i][0], "date"]].values + y = Sub.QBC.loc[Sub.reference_index.loc[counter[i][0], "date"]].values bc_q_line.set_data(x, y) - y = Sub.HBC.loc[Sub.referenceindex.loc[counter[i][0], "date"]].values + y = Sub.HBC.loc[Sub.reference_index.loc[counter[i][0], "date"]].values bc_h_line.set_data(x, y) x = counter[i][1] - y = Sub.referenceindex.loc[counter[i][0], "date"] + y = Sub.reference_index.loc[counter[i][0], "date"] scatter1 = ax2.scatter(x, Sub.QBC[x][y], s=300) scatter2 = ax3.scatter(x, Sub.HBC[x][y], s=300) @@ -734,17 +702,17 @@ def WaterSurfaceProfile1Min( start: Union[str, dt.datetime], end: Union[str, dt.datetime], interval: float = 0.00002, - fromxs: Union[str, int] = "", - toxs: Union[str, int] = "", + from_xs: Union[str, int] = "", + to_xs: Union[str, int] = "", fmt: str = "%Y-%m-%d", figsize: tuple = (20, 10), - textlocation: tuple = (1, 1), - LateralsColor: Union[str, tuple] = "#3D59AB", - LaterlasLineWidth: int = 1, - xaxislabelsize: int = 10, - yaxislabelsize: int = 10, - nxlabels: int = 20, - xticklabelsize: int = 8, + text_location: tuple = (1, 1), + laterals_color: Union[str, tuple] = "#3D59AB", + laterals_line_width: int = 1, + x_axis_label_size: int = 10, + y_axis_label_size: int = 10, + xlabels_number: int = 20, + x_tick_label_size: int = 8, floodplain: bool = True, repeat: bool = True, ) -> FuncAnimation: @@ -762,9 +730,9 @@ def WaterSurfaceProfile1Min( DESCRIPTION. interval : TYPE, optional DESCRIPTION. The default is 0.00002. - fromxs : TYPE, optional + from_xs : TYPE, optional DESCRIPTION. The default is 10. - toxs : TYPE, optional + to_xs : TYPE, optional DESCRIPTION. The default is 10. floodplain: [bool] Default is True. @@ -772,19 +740,19 @@ def WaterSurfaceProfile1Min( Default is "%Y-%m-%d". figsize: [tuple] Default is (20, 10). - textlocation: [tuple] + text_location: [tuple] Default is (1, 1). - LateralsColor: [str] + laterals_color: [str] Default is "#3D59AB". - LaterlasLineWidth: [int] + laterals_line_width: [int] Default is 1. - xaxislabelsize: [int] + x_axis_label_size: [int] Default is 10. - yaxislabelsize: [int] + y_axis_label_size: [int] Default is 10. - nxlabels: [int] + xlabels_number: [int] Default is 20. - xticklabelsize: [int] + x_tick_label_size: [int] Default is 8. floodplain: bool Default is True. @@ -824,61 +792,61 @@ def WaterSurfaceProfile1Min( ax1 = fig2.add_subplot(gs[0, 2:6]) - if fromxs == "": - fromxs = Sub.xsname[0] - # toxs = Reach.xsname[-1] + if from_xs == "": + from_xs = Sub.xs_names[0] + # to_xs = Reach.xs_names[-1] else: - if fromxs < Sub.xsname[0]: - fromxs = Sub.xsname[0] + if from_xs < Sub.xs_names[0]: + from_xs = Sub.xs_names[0] - # if toxs > Reach.xsname[-1]: - # toxs = Reach.xsname[-1] + # if to_xs > Reach.xs_names[-1]: + # to_xs = Reach.xs_names[-1] - if toxs == "": - toxs = Sub.xsname[-1] + if to_xs == "": + to_xs = Sub.xs_names[-1] else: - if toxs > Sub.xsname[-1]: - toxs = Sub.xsname[-1] + if to_xs > Sub.xs_names[-1]: + to_xs = Sub.xs_names[-1] - ax1.set_xlim(fromxs - 1, toxs + 1) + ax1.set_xlim(from_xs - 1, to_xs + 1) - ax1.set_xticks(list(range(fromxs, toxs + 1))) - ax1.set_xticklabels(list(range(fromxs, toxs + 1))) + ax1.set_xticks(list(range(from_xs, to_xs + 1))) + ax1.set_xticklabels(list(range(from_xs, to_xs + 1))) - ax1.tick_params(labelsize=xticklabelsize) - ax1.locator_params(axis="x", nbins=nxlabels) + ax1.tick_params(labelsize=x_tick_label_size) + ax1.locator_params(axis="x", nbins=xlabels_number) - ax1.set_xlabel("Cross section No", fontsize=xaxislabelsize) - ax1.set_ylabel("Discharge (m3/s)", fontsize=yaxislabelsize, labelpad=0.5) + ax1.set_xlabel("Cross section No", fontsize=x_axis_label_size) + ax1.set_ylabel("Discharge (m3/s)", fontsize=y_axis_label_size, labelpad=0.5) ax1.set_title("Reach-Basin" + " " + str(Sub.id), fontsize=15) ax1.legend(["Discharge"], fontsize=15) ax1.set_ylim(0, int(Sub.q.max().max())) if Sub.version < 4: - # ax1.set_ylim(0, int(Reach.Result1D['q'].max())) + # ax1.set_ylim(0, int(Reach.results_1d['q'].max())) # plot location of laterals - for i in range(len(Sub.LateralsTable)): + for i in range(len(Sub.laterals_table)): ax1.vlines( - Sub.LateralsTable[i], + Sub.laterals_table[i], 0, int(int(Sub.q.max().max())), - colors=LateralsColor, + colors=laterals_color, linestyles="dashed", - linewidth=LaterlasLineWidth, + linewidth=laterals_line_width, ) lat = pd.DataFrame() - lat["xsid"] = Sub.LateralsTable - lat = lat.merge(Sub.crosssections, on="xsid", how="left") + lat["xsid"] = Sub.laterals_table + lat = lat.merge(Sub.cross_sections, on="xsid", how="left") lim = ax1.get_ylim() - y = np.ones(len(Sub.LateralsTable), dtype=int) * (lim[1] - 50) + y = np.ones(len(Sub.laterals_table), dtype=int) * (lim[1] - 50) lat = ax1.scatter( - Sub.LateralsTable, + Sub.laterals_table, y, - c=LateralsColor, - linewidth=LaterlasLineWidth, + c=laterals_color, + linewidth=laterals_line_width, zorder=10, s=50, ) @@ -893,12 +861,12 @@ def WaterSurfaceProfile1Min( ax2 = fig2.add_subplot(gs[0, 1:2]) ax2.set_xlim(1, nstep) if Sub.version < 4: - ax2.set_ylim(0, int(Sub.QBCmin.max().max())) + ax2.set_ylim(0, int(Sub.q_bc_1min.max().max())) else: ax2.set_ylim(0, int(Sub.USBC.max())) - ax2.set_xlabel("Time", fontsize=yaxislabelsize) - ax2.set_ylabel("Q (m3/s)", fontsize=yaxislabelsize, labelpad=0.1) + ax2.set_xlabel("Time", fontsize=y_axis_label_size) + ax2.set_ylabel("Q (m3/s)", fontsize=y_axis_label_size, labelpad=0.1) ax2.set_title("BC - Q", fontsize=20) ax2.legend(["Q"], fontsize=15) @@ -910,10 +878,12 @@ def WaterSurfaceProfile1Min( ax3 = fig2.add_subplot(gs[0, 0:1]) ax3.set_xlim(1, nstep) if Sub.version < 4: - ax3.set_ylim(float(Sub.HBCmin.min().min()), float(Sub.HBCmin.max().max())) + ax3.set_ylim( + float(Sub.h_bc_1min.min().min()), float(Sub.h_bc_1min.max().max()) + ) - ax3.set_xlabel("Time", fontsize=yaxislabelsize) - ax3.set_ylabel("water level", fontsize=yaxislabelsize, labelpad=0.5) + ax3.set_xlabel("Time", fontsize=y_axis_label_size) + ax3.set_ylabel("water level", fontsize=y_axis_label_size, labelpad=0.5) ax3.set_title("BC - H", fontsize=20) ax3.legend(["WL"], fontsize=10) @@ -926,55 +896,61 @@ def WaterSurfaceProfile1Min( ax4 = fig2.add_subplot(gs[1, 0:6]) ymax1 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zr"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zr"][ + Sub.cross_sections["xsid"] <= to_xs ] ) ymax2 = max( - Sub.crosssections.loc[Sub.crosssections["xsid"] >= fromxs, "zl"][ - Sub.crosssections["xsid"] <= toxs + Sub.cross_sections.loc[Sub.cross_sections["xsid"] >= from_xs, "zl"][ + Sub.cross_sections["xsid"] <= to_xs ] ) ymax = max(ymax1, ymax2) - minlev = Sub.crosssections.loc[Sub.crosssections["xsid"] == toxs, "gl"].values + minlev = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == to_xs, "gl" + ].values ax4.set_ylim(minlev - 5, ymax + 5) - ax4.set_xlim(fromxs - 1, toxs + 1) - ax4.set_xticks(list(range(fromxs, toxs + 1))) + ax4.set_xlim(from_xs - 1, to_xs + 1) + ax4.set_xticks(list(range(from_xs, to_xs + 1))) - ax4.tick_params(labelsize=xaxislabelsize) - ax4.locator_params(axis="x", nbins=nxlabels) + ax4.tick_params(labelsize=x_axis_label_size) + ax4.locator_params(axis="x", nbins=xlabels_number) ax4.plot( - Sub.xsname, - Sub.crosssections["zl"], + Sub.xs_names, + Sub.cross_sections["zl"], "k--", dashes=(5, 1), linewidth=2, label="Left Dike", ) ax4.plot( - Sub.xsname, Sub.crosssections["zr"], "k.-", linewidth=2, label="Right Dike" + Sub.xs_names, + Sub.cross_sections["zr"], + "k.-", + linewidth=2, + label="Right Dike", ) if Sub.version == 1: ax4.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Bankful level", ) else: ax4.plot( - Sub.xsname, - Sub.crosssections["gl"], + Sub.xs_names, + Sub.cross_sections["gl"], "k-", linewidth=5, label="Ground level", ) ax4.plot( - Sub.xsname, - Sub.crosssections["gl"] + Sub.crosssections["dbf"], + Sub.xs_names, + Sub.cross_sections["gl"] + Sub.cross_sections["dbf"], "k", linewidth=2, label="Bankful depth", @@ -982,17 +958,17 @@ def WaterSurfaceProfile1Min( if floodplain: fpl = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hl"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hl"] ) fpr = ( - Sub.crosssections["gl"] - + Sub.crosssections["dbf"] - + Sub.crosssections["hr"] + Sub.cross_sections["gl"] + + Sub.cross_sections["dbf"] + + Sub.cross_sections["hr"] ) - ax4.plot(Sub.xsname, fpl, "b-.", linewidth=2, label="Floodplain left") - ax4.plot(Sub.xsname, fpr, "r-.", linewidth=2, label="Floodplain right") + ax4.plot(Sub.xs_names, fpl, "b-.", linewidth=2, label="Floodplain left") + ax4.plot(Sub.xs_names, fpr, "r-.", linewidth=2, label="Floodplain right") ax4.set_title("Water surface Profile Simulation", fontsize=15) ax4.legend(fontsize=10) @@ -1001,25 +977,25 @@ def WaterSurfaceProfile1Min( ax4.grid() # plot location of laterals - for i in range(len(Sub.LateralsTable)): - ymin = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == Sub.LateralsTable[i], "gl" + for i in range(len(Sub.laterals_table)): + ymin = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == Sub.laterals_table[i], "gl" ].values[0] ax4.vlines( - Sub.LateralsTable[i], + Sub.laterals_table[i], ymin, ymax, - colors=LateralsColor, + colors=laterals_color, linestyles="dashed", - linewidth=LaterlasLineWidth, + linewidth=laterals_line_width, ) day_text = ax4.annotate( "", xy=( - fromxs + textlocation[0], - Sub.crosssections.loc[Sub.crosssections["xsid"] == toxs, "gl"].values - + textlocation[1], + from_xs + text_location[0], + Sub.cross_sections.loc[Sub.cross_sections["xsid"] == to_xs, "gl"].values + + text_location[1], ), fontsize=20, ) @@ -1058,7 +1034,7 @@ def animate_min(i): day_text.set_text("Date = " + str(counter[i])) # discharge (ax1) - x = Sub.xsname + x = Sub.xs_names y = Sub.q[Sub.q.index == counter[i]].values[0] q_line.set_data(x, y) @@ -1068,24 +1044,24 @@ def animate_min(i): day = counter[i].floor(freq="D") - lat.set_sizes(sizes=Sub.Laterals.loc[day, Sub.LateralsTable].values * 100) + lat.set_sizes(sizes=Sub.Laterals.loc[day, Sub.laterals_table].values * 100) # BC Q (ax2) - x = Sub.QBCmin.columns.values + x = Sub.q_bc_1min.columns.values - y = Sub.QBCmin.loc[day].values + y = Sub.q_bc_1min.loc[day].values bc_q_line.set_data(x, y) # BC H (ax3) - y = Sub.HBCmin.loc[day].values + y = Sub.h_bc_1min.loc[day].values bc_h_line.set_data(x, y) # BC Q point (ax2) x = ((counter[i] - day).seconds / 60) + 1 - scatter1 = ax2.scatter(x, Sub.QBCmin[x][day], s=150) + scatter1 = ax2.scatter(x, Sub.q_bc_1min[x][day], s=150) # BC h point (ax3) - scatter2 = ax3.scatter(x, Sub.HBCmin[x][day], s=150) + scatter2 = ax3.scatter(x, Sub.h_bc_1min[x][day], s=150) return ( q_line, @@ -1117,14 +1093,14 @@ def river1d( end, interval=0.00002, xs=0, - xsbefore=10, - xsafter=10, + xs_before=10, + xs_after=10, fmt="%Y-%m-%d", - textlocation=2, - xaxislabelsize=15, - yaxislabelsize=15, - nxlabels=50, - plotbanhfuldepth=False, + text_location=2, + x_axis_label_size=15, + y_axis_label_size=15, + xlabels_number=50, + plot_bankfull_depth=False, ): """river1d. @@ -1142,9 +1118,9 @@ def river1d( DESCRIPTION. The default is 0.00002. xs : TYPE, optional DESCRIPTION. The default is 0. - xsbefore : TYPE, optional + xs_before : TYPE, optional DESCRIPTION. The default is 10. - xsafter : TYPE, optional + xs_after : TYPE, optional DESCRIPTION. The default is 10. Returns @@ -1163,17 +1139,17 @@ def river1d( " from {0} - and ends on {1} " ) - assert start in Sub.referenceindex_results, msg.format( - Sub.referenceindex_results[0], Sub.referenceindex_results[-1] + assert start in Sub.reference_index_results, msg.format( + Sub.reference_index_results[0], Sub.reference_index_results[-1] ) - assert end in Sub.referenceindex_results, msg.format( - Sub.referenceindex_results[0], Sub.referenceindex_results[-1] + assert end in Sub.reference_index_results, msg.format( + Sub.reference_index_results[0], Sub.reference_index_results[-1] ) - counter = Sub.referenceindex_results[ - np.where(Sub.referenceindex_results == start)[0][0] : np.where( - Sub.referenceindex_results == end + counter = Sub.reference_index_results[ + np.where(Sub.reference_index_results == start)[0][0] : np.where( + Sub.reference_index_results == end )[0][0] + 1 ] @@ -1188,12 +1164,12 @@ def river1d( ax1.set_ylim(0, int(Sub.usbc.max() + margin)) ax1.set_xlabel("Time", fontsize=15) if Sub.usbc.columns[0] == "q": - # ax1.set_ylabel('USBC - Q (m3/s)', fontsize=15) + # ax1.set_ylabel('USBC - Q (m3/s)', font_size=15) ax1.set_title("USBC - Q (m3/s)", fontsize=20) else: - # ax1.set_ylabel('USBC - H (m)', fontsize=15) + # ax1.set_ylabel('USBC - H (m)', font_size=15) ax1.set_title("USBC - H (m)", fontsize=20) - # ax1.legend(["Q"], fontsize=10) + # ax1.legend(["Q"], font_size=10) ax1.set_xlim(1, 25) (usbc_line,) = ax1.plot([], [], linewidth=5) # usbc_point = ax1.scatter([], [], s=150) @@ -1202,32 +1178,32 @@ def river1d( ax2 = fig2.add_subplot(gs[0, 1:5]) if xs == 0: # plot the whole sub-basin - ax2.set_xlim(Sub.xsname[0] - 1, Sub.xsname[-1] + 1) - ax2.set_xticks(Sub.xsname) - ax2.set_xticklabels(Sub.xsname) + ax2.set_xlim(Sub.xs_names[0] - 1, Sub.xs_names[-1] + 1) + ax2.set_xticks(Sub.xs_names) + ax2.set_xticklabels(Sub.xs_names) - FigureFirstXS = Sub.xsname[0] - FigureLastXS = Sub.xsname[-1] + FigureFirstXS = Sub.xs_names[0] + FigureLastXS = Sub.xs_names[-1] else: # not the whole sub-basin - FigureFirstXS = Sub.xsname[xs] - xsbefore - if FigureFirstXS < Sub.xsname[0]: - FigureFirstXS = Sub.xsname[0] + FigureFirstXS = Sub.xs_names[xs] - xs_before + if FigureFirstXS < Sub.xs_names[0]: + FigureFirstXS = Sub.xs_names[0] - FigureLastXS = Sub.xsname[xs] + xsafter - if FigureLastXS > Sub.xsname[-1]: - FigureLastXS = Sub.xsname[-1] + FigureLastXS = Sub.xs_names[xs] + xs_after + if FigureLastXS > Sub.xs_names[-1]: + FigureLastXS = Sub.xs_names[-1] ax2.set_xlim(FigureFirstXS, FigureLastXS) ax2.set_xticks(list(range(FigureFirstXS, FigureLastXS))) ax2.set_xticklabels(list(range(FigureFirstXS, FigureLastXS))) ax2.set_ylim(np.nanmin(Sub.q) - 10, int(np.nanmax(Sub.q)) + 10) - ax2.tick_params(labelsize=xaxislabelsize) - ax2.locator_params(axis="x", nbins=nxlabels) - ax2.set_xlabel("Cross section No", fontsize=xaxislabelsize) + ax2.tick_params(labelsize=x_axis_label_size) + ax2.locator_params(axis="x", nbins=xlabels_number) + ax2.set_xlabel("Cross section No", fontsize=x_axis_label_size) ax2.set_title("Discharge (m3/s)", fontsize=20) - # ax2.set_ylabel('Discharge (m3/s)', fontsize=yaxislabelsize, labelpad=0.5) + # ax2.set_ylabel('Discharge (m3/s)', font_size=y_axis_label_size, labelpad=0.5) ax2.legend(["Discharge"], fontsize=15) (q_line,) = ax2.plot([], [], linewidth=5) @@ -1242,13 +1218,13 @@ def river1d( ax3.set_xlabel("Time", fontsize=15) if Sub.dsbc.columns[0] == "q": - # ax3.set_ylabel('DSBC', fontsize=15, labelpad=0.5) + # ax3.set_ylabel('DSBC', font_size=15, labelpad=0.5) ax3.set_title("DSBC - Q (m3/s)", fontsize=20) else: - # ax3.set_ylabel('USBC', fontsize=15, labelpad=0.5) + # ax3.set_ylabel('USBC', font_size=15, labelpad=0.5) ax3.set_title("DSBC - H(m)", fontsize=20) - # ax3.legend(["WL"], fontsize=10) + # ax3.legend(["WL"], font_size=10) (dsbc_line,) = ax3.plot([], [], linewidth=5) # dsbc_point = ax3.scatter([], [], s=300) @@ -1258,42 +1234,42 @@ def river1d( ax4 = fig2.add_subplot(gs[1, 0:6]) if xs == 0: - ax4.set_xlim(Sub.xsname[0] - 1, Sub.xsname[-1] + 1) - ax4.set_xticks(Sub.xsname) - ymin = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == FigureFirstXS, "bed level" + ax4.set_xlim(Sub.xs_names[0] - 1, Sub.xs_names[-1] + 1) + ax4.set_xticks(Sub.xs_names) + ymin = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == FigureFirstXS, "bed level" ].values.min() - ymax = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == FigureFirstXS, "bed level" + ymax = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == FigureFirstXS, "bed level" ].values.max() ax4.set_ylim(ymin, ymax + np.nanmax(Sub.h) + 5) else: ax4.set_xlim(FigureFirstXS, FigureLastXS) ax4.set_xticks(list(range(FigureFirstXS, FigureLastXS))) ax4.set_ylim( - Sub.crosssections.loc[ - Sub.crosssections["xsid"] == FigureFirstXS, "bed level" + Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == FigureFirstXS, "bed level" ].values, - Sub.crosssections.loc[ - Sub.crosssections["xsid"] == FigureLastXS, "zr" + Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == FigureLastXS, "zr" ].values + 5, ) - ax4.tick_params(labelsize=xaxislabelsize) - ax4.locator_params(axis="x", nbins=nxlabels) + ax4.tick_params(labelsize=x_axis_label_size) + ax4.locator_params(axis="x", nbins=xlabels_number) ax4.plot( - Sub.xsname, - Sub.crosssections["bed level"], + Sub.xs_names, + Sub.cross_sections["bed level"], "k-", linewidth=5, label="Ground level", ) - if plotbanhfuldepth: + if plot_bankfull_depth: ax4.plot( - Sub.xsname, - Sub.crosssections["bed level"] + Sub.crosssections["depth"], + Sub.xs_names, + Sub.cross_sections["bed level"] + Sub.cross_sections["depth"], "k", linewidth=2, label="Bankful depth", @@ -1306,19 +1282,19 @@ def river1d( ax4.grid() if xs == 0: - textlocation = textlocation + Sub.xsname[0] + text_location = text_location + Sub.xs_names[0] day_text = ax4.annotate( " ", - xy=(textlocation, Sub.crosssections["bed level"].min() + 1), + xy=(text_location, Sub.cross_sections["bed level"].min() + 1), fontsize=20, ) else: day_text = ax4.annotate( " ", xy=( - FigureFirstXS + textlocation, - Sub.crosssections.loc[ - Sub.crosssections["xsid"] == FigureLastXS, "gl" + FigureFirstXS + text_location, + Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == FigureLastXS, "gl" ].values + 1, ), @@ -1346,13 +1322,13 @@ def animate_min(i): day_text.set_text("Date = " + str(counter[i])) # discharge (ax1) - x = Sub.xsname + x = Sub.xs_names - y = Sub.q[np.where(Sub.referenceindex_results == counter[i])[0][0], :] + y = Sub.q[np.where(Sub.reference_index_results == counter[i])[0][0], :] q_line.set_data(x, y) # water level (ax4) - y = Sub.wl[np.where(Sub.referenceindex_results == counter[i])[0][0], :] + y = Sub.wl[np.where(Sub.reference_index_results == counter[i])[0][0], :] wl_line.set_data(x, y) # USBC @@ -1378,7 +1354,7 @@ def animate_min(i): dsbc_line.set_data(x, y) # x = counter[i][1] - # y = Reach.referenceindex.loc[counter[i][0], 'date'] + # y = Reach.reference_index.loc[counter[i][0], 'date'] # ax2.scatter(x, Reach.QBC[x][y]) # # BC Q point (ax2) @@ -1407,22 +1383,22 @@ def animate_min(i): return anim - def CrossSections( + def plotCrossSections( self, Sub, - fromxs: Optional[int] = None, - toxs: Optional[int] = None, - xsrows: int = 3, - xscolumns: int = 3, + from_xs: Optional[int] = None, + to_xs: Optional[int] = None, + xs_rows: int = 3, + xs_columns: int = 3, bedlevel: bool = False, - titlesize: int = 15, - textsize: int = 15, - figsize: tuple = (18, 10), - linewidth: int = 6, - samescale: bool = False, - textspacing: List[tuple] = [(1, 1), (1, 2)], - plottingoption: int = 1, - plotannotation: bool = True, + title_size: int = 15, + text_size: int = 15, + fig_size: tuple = (18, 10), + line_width: int = 6, + same_scale: bool = False, + text_spacing: List[tuple] = [(1, 1), (1, 2)], + plotting_option: int = 1, + plot_annotation: bool = True, ): """CrossSections. @@ -1432,65 +1408,69 @@ def CrossSections( ---------- Sub : [Object] Reach-object created as a sub class from River object.. - fromxs : TYPE, optional + from_xs : TYPE, optional DESCRIPTION. The default is ''. - toxs : TYPE, optional + to_xs : TYPE, optional DESCRIPTION. The default is ''. - xsrows : TYPE, optional + xs_rows : TYPE, optional DESCRIPTION. The default is 3. - xscolumns : TYPE, optional + xs_columns : TYPE, optional DESCRIPTION. The default is 3. bedlevel : TYPE, optional DESCRIPTION. The default is False. - titlesize : TYPE, optional + title_size : TYPE, optional DESCRIPTION. The default is 15. - textsize : TYPE, optional + text_size : TYPE, optional DESCRIPTION. The default is 15. - figsize : TYPE, optional + fig_size : TYPE, optional DESCRIPTION. The default is (18, 10). - linewidth : TYPE, optional + line_width : TYPE, optional DESCRIPTION. The default is 6. - plottingoption : [integer] + plotting_option : [integer] 1 if you want to plot the whole cross-section, 2 to execlude the dikes(river bankfull area and floodplain will be plotted), 3 to plot only the bankfull area. - samescale: [bool] + same_scale: [bool] Default is False. - textspacing: [tuple] + text_spacing: [tuple] Default is [(1, 1), (1, 2)]. - plotannotation: [bool] + plot_annotation: [bool] Default is True. Returns ------- None. """ - if not fromxs: + if not from_xs: startxs_ind = 0 else: - startxs_ind = Sub.xsname.index(fromxs) + startxs_ind = Sub.xs_names.index(from_xs) - if not toxs: + if not to_xs: endxs_ind = Sub.xsno - 1 else: - endxs_ind = Sub.xsname.index(toxs) + endxs_ind = Sub.xs_names.index(to_xs) names = list(range(1, 17)) XSS = pd.DataFrame( - columns=names, index=Sub.crosssections.loc[startxs_ind:endxs_ind, "xsid"] + columns=names, index=Sub.cross_sections.loc[startxs_ind:endxs_ind, "xsid"] ) # calculate the vertices of the cross sections for i in range(startxs_ind, endxs_ind + 1): ind = XSS.index[i - startxs_ind] - ind2 = Sub.crosssections.index[i] + ind2 = Sub.cross_sections.index[i] XSS[1].loc[XSS.index == ind] = 0 XSS[2].loc[XSS.index == ind] = 0 - bl = Sub.crosssections.loc[Sub.crosssections.index == ind2, "bl"].values[0] - b = Sub.crosssections.loc[Sub.crosssections.index == ind2, "b"].values[0] - br = Sub.crosssections.loc[Sub.crosssections.index == ind2, "br"].values[0] + bl = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "bl"].values[ + 0 + ] + b = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "b"].values[0] + br = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "br"].values[ + 0 + ] XSS[3].loc[XSS.index == ind] = bl XSS[4].loc[XSS.index == ind] = bl @@ -1499,27 +1479,37 @@ def CrossSections( XSS[7].loc[XSS.index == ind] = bl + b + br XSS[8].loc[XSS.index == ind] = bl + b + br - gl = Sub.crosssections.loc[Sub.crosssections.index == ind2, "gl"].values[0] + gl = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "gl"].values[ + 0 + ] if bedlevel: subtract = 0 else: subtract = gl - zl = Sub.crosssections.loc[Sub.crosssections.index == ind2, "zl"].values[0] - zr = Sub.crosssections.loc[Sub.crosssections.index == ind2, "zr"].values[0] + zl = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "zl"].values[ + 0 + ] + zr = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "zr"].values[ + 0 + ] - if "dbf" in Sub.crosssections.columns: - dbf = Sub.crosssections.loc[ - Sub.crosssections.index == ind2, "dbf" + if "dbf" in Sub.cross_sections.columns: + dbf = Sub.cross_sections.loc[ + Sub.cross_sections.index == ind2, "dbf" ].values[0] - hl = Sub.crosssections.loc[Sub.crosssections.index == ind2, "hl"].values[0] - hr = Sub.crosssections.loc[Sub.crosssections.index == ind2, "hr"].values[0] + hl = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "hl"].values[ + 0 + ] + hr = Sub.cross_sections.loc[Sub.cross_sections.index == ind2, "hr"].values[ + 0 + ] XSS[9].loc[XSS.index == ind] = zl - subtract - if "dbf" not in Sub.crosssections.columns: + if "dbf" not in Sub.cross_sections.columns: XSS[10].loc[XSS.index == ind] = gl + hl - subtract XSS[11].loc[XSS.index == ind] = gl - subtract XSS[14].loc[XSS.index == ind] = gl - subtract @@ -1537,13 +1527,13 @@ def CrossSections( # plot the cross sections xsplot = len(range(startxs_ind, endxs_ind + 1)) - figno = int(math.ceil(xsplot / (xscolumns * xsrows))) + figno = int(math.ceil(xsplot / (xs_columns * xs_rows))) ind2 = startxs_ind ind = XSS.index[ind2 - startxs_ind] for i in range(figno): - if samescale: + if same_scale: sharex = True sharey = True else: @@ -1551,19 +1541,19 @@ def CrossSections( sharey = False fig, ax_XS = plt.subplots( - ncols=xscolumns, - nrows=xsrows, - figsize=figsize, + ncols=xs_columns, + nrows=xs_rows, + figsize=fig_size, sharex=sharex, sharey=sharey, ) - # gs = gridspec.GridSpec(xsrows, xscolumns) + # gs = gridspec.GridSpec(xs_rows, xs_columns) - for j in range(xsrows): - for k in range(xscolumns): + for j in range(xs_rows): + for k in range(xs_columns): if ind2 <= endxs_ind: - XsId = Sub.crosssections.loc[ - Sub.crosssections.index[ind2], "xsid" + XsId = Sub.cross_sections.loc[ + Sub.cross_sections.index[ind2], "xsid" ] xcoord = ( XSS[names[0:8]].loc[XSS.index == ind].values.tolist()[0] @@ -1571,76 +1561,76 @@ def CrossSections( ycoord = ( XSS[names[8:16]].loc[XSS.index == ind].values.tolist()[0] ) - b = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == ind, "b" + b = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == ind, "b" ].values[0] - bl = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == ind, "bl" + bl = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == ind, "bl" ].values[0] - gl = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == ind, "gl" + gl = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == ind, "gl" ].values[0] # ax_XS = fig.add_subplot(gs[j, k]) - if plottingoption == 1: - ax_XS[j, k].plot(xcoord, ycoord, linewidth=linewidth) - x = textspacing[0][0] - x1 = textspacing[1][0] - elif plottingoption == 2: + if plotting_option == 1: + ax_XS[j, k].plot(xcoord, ycoord, linewidth=line_width) + x = text_spacing[0][0] + x1 = text_spacing[1][0] + elif plotting_option == 2: ax_XS[j, k].plot( - xcoord[1:-1], ycoord[1:-1], linewidth=linewidth + xcoord[1:-1], ycoord[1:-1], linewidth=line_width ) - x = textspacing[0][0] + bl - x1 = textspacing[1][0] + bl + x = text_spacing[0][0] + bl + x1 = text_spacing[1][0] + bl else: ax_XS[j, k].plot( - xcoord[2:-2], ycoord[2:-2], linewidth=linewidth + xcoord[2:-2], ycoord[2:-2], linewidth=line_width ) - x = textspacing[0][0] + bl - x1 = textspacing[1][0] + bl + x = text_spacing[0][0] + bl + x1 = text_spacing[1][0] + bl ax_XS[j, k].title.set_text("xs ID = " + str(XsId)) - ax_XS[j, k].title.set_fontsize(titlesize) + ax_XS[j, k].title.set_fontsize(title_size) - if samescale: + if same_scale: # when sharex and sharey are true the labels # disappear so set thier visability to true ax_XS[j, k].xaxis.set_tick_params(labelbottom=True) ax_XS[j, k].yaxis.set_tick_params(labelbottom=True) - if plotannotation: + if plot_annotation: if Sub.version > 1: - dbf = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == ind, "dbf" + dbf = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == ind, "dbf" ].values[0] - b = Sub.crosssections.loc[ - Sub.crosssections["xsid"] == ind, "b" + b = Sub.cross_sections.loc[ + Sub.cross_sections["xsid"] == ind, "b" ].values[0] if bedlevel: ax_XS[j, k].annotate( "dbf=" + str(round(dbf, 2)), - xy=(x, gl + textspacing[0][1]), - fontsize=textsize, + xy=(x, gl + text_spacing[0][1]), + fontsize=text_size, ) ax_XS[j, k].annotate( "b=" + str(round(b, 2)), - xy=(x1, gl + textspacing[1][1]), - fontsize=textsize, + xy=(x1, gl + text_spacing[1][1]), + fontsize=text_size, ) else: ax_XS[j, k].annotate( "dbf=" + str(round(dbf, 2)), - xy=(x, textspacing[0][1]), - fontsize=textsize, + xy=(x, text_spacing[0][1]), + fontsize=text_size, ) ax_XS[j, k].annotate( "b=" + str(round(b, 2)), - xy=(x1, textspacing[1][1]), - fontsize=textsize, + xy=(x1, text_spacing[1][1]), + fontsize=text_size, ) ind2 = ind2 + 1 @@ -1656,8 +1646,8 @@ def plot1minProfile( self, Sub, date: str, - xaxislabelsize: int = 10, - nxlabels: int = 50, + x_axis_label_size: int = 10, + xlabels_number: int = 50, fmt: str = "%Y-%m-%d", ): """Plot1minProfile. @@ -1670,9 +1660,9 @@ def plot1minProfile( DESCRIPTION. date : TYPE DESCRIPTION. - xaxislabelsize : TYPE, optional + x_axis_label_size : TYPE, optional DESCRIPTION. The default is 10. - nxlabels : TYPE, optional + xlabels_number : TYPE, optional DESCRIPTION. The default is 50. fmt : TYPE, optional DESCRIPTION. The default is "%Y-%m-%d". @@ -1691,14 +1681,14 @@ def plot1minProfile( ax1.set_ylabel("Discharge", fontsize=20) ax2.set_ylabel("Water level", fontsize=20) ax1.set_xlabel("Cross sections", fontsize=20) - ax1.set_xticks(Sub.xsname) - ax1.tick_params(labelsize=xaxislabelsize) - ax1.locator_params(axis="x", nbins=nxlabels) + ax1.set_xticks(Sub.xs_names) + ax1.tick_params(labelsize=x_axis_label_size) + ax1.locator_params(axis="x", nbins=xlabels_number) ax1.grid() @staticmethod - def saveProfileAnimation(Anim, Path="", fps=3, ffmpegPath=""): + def saveProfileAnimation(Anim, Path="", fps=3, ffmpeg_path=""): """SaveProfileAnimation. save video animation @@ -1712,7 +1702,7 @@ def saveProfileAnimation(Anim, Path="", fps=3, ffmpegPath=""): DESCRIPTION. The default is ''. fps : TYPE, optional DESCRIPTION. The default is 3. - ffmpegPath : TYPE, optional + ffmpeg_path : TYPE, optional DESCRIPTION. The default is ''. in order to save a video using matplotlib you have to download ffmpeg @@ -1731,11 +1721,13 @@ def saveProfileAnimation(Anim, Path="", fps=3, ffmpegPath=""): compitable with your operating system, and copy the content of the folder and paste it in the "c:/user/.matplotlib/ffmpeg-static/" """ - if ffmpegPath == "": - ffmpegPath = os.getenv("HOME") + "/.matplotlib/ffmpeg-static/bin/ffmpeg.exe" - assert os.path.exists(ffmpegPath), "{0}".format(message) + if ffmpeg_path == "": + ffmpeg_path = ( + os.getenv("HOME") + "/.matplotlib/ffmpeg-static/bin/ffmpeg.exe" + ) + assert os.path.exists(ffmpeg_path), "{0}".format(message) - mpl.rcParams["animation.ffmpeg_path"] = ffmpegPath + mpl.rcParams["animation.ffmpeg_path"] = ffmpeg_path Ext = Path.split(".")[1] if Ext == "gif": @@ -1832,11 +1824,11 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): Example ------- - Vis.Histogram(Val1, val2,2,figsize=(5.5,4.5), color1='#27408B', + Vis.Histogram(Val1, val2,2,fig_size=(5.5,4.5), color1='#27408B', xlabel = 'Inundation Depth (m)', ylabel = 'Frequency', legend_size = 15, - fontsize=15, labelsize = 15, Axisfontsize = 11, + font_size=15, label_size = 15, axis_font_size = 11, legend = ['RIM1.0', 'RIM2.0'], pdf = False, Save = False, - name = str(Event1.EventIndex.loc[EndInd,'id'])) + name = str(Event1.event_index.loc[EndInd,'id'])) """ # update the default options Fkeys = list(kwargs.keys()) @@ -1864,7 +1856,7 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): # if key == 'legend': # plt.legend(kwargs['legend']) # if key == 'legend size': - # plt.legend(kwargs['legend'],fontsize = int(kwargs['legend_size'])) + # plt.legend(kwargs['legend'],font_size = int(kwargs['legend_size'])) # if key == 'xlabel': # plt.xlabel(kwargs['xlabel']) # if key == 'ylabel': @@ -1875,7 +1867,7 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): # # # xlabel = kwargs['xlabel'] elif NoAxis == 2: - fig, ax1 = plt.subplots(figsize=Visualize.FigureDefaultOptions["figsize"]) + fig, ax1 = plt.subplots(figsize=Visualize.FigureDefaultOptions["fig_size"]) # n1= ax1.hist([v1,v2], bins=15, alpha = 0.7, color=[color1,color2]) n1 = ax1.hist( [v1, v2], @@ -1906,7 +1898,7 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): pdf_fitted1, "-.", color=Visualize.FigureDefaultOptions["color1"], - linewidth=Visualize.FigureDefaultOptions["linewidth"], + linewidth=Visualize.FigureDefaultOptions["line_width"], label="RIM1.0 pdf", ) ax2.plot( @@ -1914,7 +1906,7 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): pdf_fitted2, "-.", color=Visualize.FigureDefaultOptions["color2"], - linewidth=Visualize.FigureDefaultOptions["linewidth"], + linewidth=Visualize.FigureDefaultOptions["line_width"], label="RIM2.0 pdf", ) ax2.set_ylabel( @@ -1934,11 +1926,11 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): # # xlabel = kwargs['xlabel'] # n2 = ax2.hist(v2, bins=n1[1], alpha = 0.4, color=color2)#width=0.2, - # ax2.set_ylabel("Frequency", fontsize = 15) + # ax2.set_ylabel("Frequency", font_size = 15) # ax2.yaxis.label.set_color(color2) # ax2.tick_params(axis='y', color = color2) - # plt.title("Reach-Basin = " + str(Subid), fontsize = 15) + # plt.title("Reach-Basin = " + str(Subid), font_size = 15) # minall = min(min(n1[1]), min(n2[1])) # if minall < 0: @@ -1962,7 +1954,7 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): # ax1.set_xlabel(self.FigureOptions['xlabel']) # if key == 'ylabel': # ax1.set_ylabel(self.FigureOptions['ylabel']) - # if key == 'labelsize': + # if key == 'label_size': ax1.set_xlabel( Visualize.FigureDefaultOptions["xlabel"], fontsize=Visualize.FigureDefaultOptions["AxisLabelSize"], @@ -1971,12 +1963,12 @@ def Histogram(v1, v2, NoAxis=2, filter1=0.2, Save=False, pdf=True, **kwargs): Visualize.FigureDefaultOptions["ylabel"], fontsize=Visualize.FigureDefaultOptions["AxisLabelSize"], ) - # if key == 'fontsize': + # if key == 'font_size': plt.rcParams.update( - {"font.size": int(Visualize.FigureDefaultOptions["Axisfontsize"])} + {"font.size": int(Visualize.FigureDefaultOptions["axis_font_size"])} ) - # fig.legend(loc="upper right", bbox_to_anchor=(1,1), bbox_transform=ax1.transAxes,fontsize = 15) + # fig.legend(loc="upper right", bbox_to_anchor=(1,1), bbox_transform=ax1.transAxes,font_size = 15) plt.tight_layout() @@ -1993,17 +1985,13 @@ def ListAttributes(self): """ print("\n") print( - "Attributes List of: " - + repr(self.__dict__["name"]) - + " - " - + self.__class__.__name__ - + " Instance\n" + f"Attributes List of: {repr(self.__dict__['name'])} - {self.__class__.__name__} Instance\n" ) self_keys = list(self.__dict__.keys()) self_keys.sort() for key in self_keys: if key != "name": - print(str(key) + " : " + repr(self.__dict__[key])) + print(f"{key} : {repr(self.__dict__[key])}") print("\n") diff --git a/Hapi/rrm/calibration.py b/Hapi/rrm/calibration.py index 74380135..efeba201 100644 --- a/Hapi/rrm/calibration.py +++ b/Hapi/rrm/calibration.py @@ -206,17 +206,25 @@ def extractDischarge(self, Factor=None): # return error def runCalibration(self, SpatialVarFun, OptimizationArgs, printError=None): - """runCalibration. + """Run Calibration. - this function runs the calibration algorithm for the conceptual distributed - hydrological model + - This function runs the calibration algorithm for the conceptual distributed + hydrological model - Inputs: + Parameters ---------- - 1-ConceptualModel: + SpatialVarFun: [function] + + OptimizationArgs: [Dict] + + printError: [bool] + Default is None. + + Parameters that should be defined before running the function. + ConceptualModel: [function] conceptual model and it should contain a function called simulate - 2-Basic_inputs: + Basic_inputs: 1-p2: [List] list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step @@ -227,27 +235,27 @@ def runCalibration(self, SpatialVarFun, OptimizationArgs, printError=None): [Numeric] upper bound of the values of the parameters 4-LB: [Numeric] Lower bound of the values of the parameters - 3-Q_obs: + Q_obs: [Numeric] Observed values of discharge - 6-lumpedParNo: + lumpedParNo: [int] nomber of lumped parameters, you have to enter the value of the lumped parameter at the end of the list, default is 0 (no lumped parameters) - 7-lumpedParPos: + lumpedParPos: [List] list of order or position of the lumped parameter among all the parameters of the lumped model (order starts from 0 to the length of the model parameters), default is [] (empty), the following order of parameters is used for the lumped HBV model used [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] - 8-objective_function: + objective_function: [function] objective function to calculate the performance of the model and to be used in the calibration - 9-*args: + *args: other arguments needed on the objective function - Outputs: - ---------- + Returns + ------- st: [4D array] state variables q_out: [1D array] @@ -264,7 +272,7 @@ def runCalibration(self, SpatialVarFun, OptimizationArgs, printError=None): >>> FlowDPath = "GIS/4000/fd4000.tif" >>> ParPath = "meteodata/4000/parameters.txt" >>> p2 = [1, 227.31] - >>> st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, + >>> st, q_out, q_uz_routed = RunModel(PrecPath, Evap_Path, TempPath, DemPath, >>> FlowAccPath,FlowDPath,ParPath,p2) """ # input dimensions diff --git a/Hapi/rrm/distparameters.py b/Hapi/rrm/distparameters.py index a14009f1..105265d9 100644 --- a/Hapi/rrm/distparameters.py +++ b/Hapi/rrm/distparameters.py @@ -15,7 +15,10 @@ class DistParameters: - """Distripute parameter class is used to distribute the values of the parameter vector in the calibration process into the 3D array, considering if some of the parameters are lumped parameters, if you want to distribute the parameters in HRUs. + """Distripute. + + parameter class is used to distribute the values of the parameter vector in the calibration + process into the 3D array, considering if some of the parameters are lumped parameters, if you want to distribute the parameters in HRUs. the method included are 1- par3d @@ -312,7 +315,10 @@ def par3d(self, par_g): # , kub=1,klb=0.5,Maskingum=True # self.Par3d[self.celli[i],self.cellj[i],-2]= DistParameters.calculateK(self.Par3d[self.celli[i],self.cellj[i],-1],self.Par3d[self.celli[i],self.cellj[i],-2],kub,klb) def par3dLumped(self, par_g): # , kub=1, klb=0.5, Maskingum = True - """par3dLumped method takes a list of parameters [saved as one column or generated as 1D list from optimization algorithm] and distribute them horizontally on number of cells given by a raster. + r"""par3dLumped method. + + takes a list of parameters [saved as one column or generated as 1D list from + optimization algorithm] and distribute them horizontally on number of cells given by a raster. Parameters ---------- @@ -328,21 +334,21 @@ def par3dLumped(self, par_g): # , kub=1, klb=0.5, Maskingum = True if the routing function is muskingum. The default is False. Returns - ---------- + ------- par_3d: [3d array] 3D array of the parameters distributed horizontally on the cells - Example: - ---------- + Example + ------- EX1:Lumped parameters raster=gdal.Open("dem.tif") [fc, beta, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] - raster=gdal.Open(path+"soil_classes.tif") - no_parameters=12 - par_g=np.random.random(no_parameters) #no_elem*(no_parameters-no_lumped_par) + >>> raster = gdal.Open(f"{path}\soil_classes.tif") + >>> no_parameters = 12 + >>> par_g = np.random.random(no_parameters) #no_elem*(no_parameters-no_lumped_par) - tot_dist_par=DP.par3dLumped(par_g,raster,no_parameters,lumped_par_pos,kub=1,klb=0.5) + >>> tot_dist_par = DP.par3dLumped(par_g, raster, no_parameters, lumped_par_pos, kub=1, klb=0.5) """ # input data validation # data type @@ -478,7 +484,11 @@ def par2d_lumpedK1_lake(self, par_g, no_parameters_lake): # ,kub,klb # return self.Par3d, lake_par def HRU(self, par_g): # ,kub=1,klb=0.5 - """HRU method takes a list of parameters [saved as one column or generated as 1D list from optimization algorithm] and distribute them horizontally on number of cells given by a raster the input raster should be classified raster (by numbers) into class to be used to define the HRUs. + """HRU. + + method takes a list of parameters [saved as one column or generated as 1D list from optimization algorithm] + and distribute them horizontally on number of cells given by a raster the input raster should be classified + raster (by numbers) into class to be used to define the HRUs. Parameters ---------- @@ -507,7 +517,7 @@ def HRU(self, par_g): # ,kub=1,klb=0.5 default is 0.5 hour (30 min) Returns - ---------- + ------- par_3d: 3D array of the parameters distributed horizontally on the cells @@ -515,12 +525,11 @@ def HRU(self, par_g): # ,kub=1,klb=0.5 -------- EX1:HRU without lumped parameters [fc, beta, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] - raster = gdal.Open("soil_types.tif") - no_lumped_par=0 - lumped_par_pos=[] - par_g=np.random.random(no_elem*(no_parameters-no_lumped_par)) - - par_hru=HRU(par_g,raster,no_parameters,no_lumped_par,lumped_par_pos,kub=1,klb=0.5) + >>> raster = gdal.Open("soil_types.tif") + >>> no_lumped_par = 0 + >>> lumped_par_pos = [] + >>> par_g = np.random.random(no_elem*(no_parameters-no_lumped_par)) + >>> par_hru = HRU(par_g, raster, no_parameters, no_lumped_par, lumped_par_pos, kub=1,klb=0.5) EX2: HRU with one lumped parameters @@ -529,14 +538,14 @@ def HRU(self, par_g): # ,kub=1,klb=0.5 K1 is lumped so its value is inserted at the end and its order should be after K - raster = gdal.Open("soil_types.tif") - no_lumped_par=1 - lumped_par_pos=[6] - par_g=np.random.random(no_elem* (no_parameters-no_lumped_par)) - # insert the value of k1 at the end - par_g=np.append(par_g,0.005) + >>> raster = gdal.Open("soil_types.tif") + >>> no_lumped_par = 1 + >>> lumped_par_pos = [6] + >>> par_g = np.random.random(no_elem* (no_parameters-no_lumped_par)) + >>> # insert the value of k1 at the end + >>> par_g = np.append(par_g,0.005) - par_hru=HRU(par_g,raster,no_parameters,no_lumped_par,lumped_par_pos,kub=1,klb=0.5) + >>> par_hru = HRU(par_g, raster, no_parameters, no_lumped_par, lumped_par_pos, kub=1, klb=0.5) """ # input data validation # data type @@ -747,7 +756,10 @@ def ParametersNumber(self): self.ParametersNO = self.no_elem * self.no_parameters def saveParameters(self, Path): - """saveParameters. saveParameters method takes generated parameters by the calibration algorithm, distributed them with a given function and save them as a rasters. + """SaveParameters. + + saveParameters method takes generated parameters by the calibration algorithm, distributed them with a given + function and save them as a rasters. Parameters ---------- @@ -779,7 +791,7 @@ def saveParameters(self, Path): Rasters for parameters of the distributed model Examples - ---------- + -------- >>> from Hapi.rrm.distparameters import DistParameters as DP >>> DemPath = "GIS/4000/dem4000.tif" >>> raster = gdal.Open(DemPath) diff --git a/Hapi/rrm/distrrm.py b/Hapi/rrm/distrrm.py index 49589a10..fa3b36a6 100644 --- a/Hapi/rrm/distrrm.py +++ b/Hapi/rrm/distrrm.py @@ -10,9 +10,13 @@ class DistributedRRM: - """DistributedRRM class runs simulation in lumped form for each cell separetly and rout the discharge between the cells following the rivernetwork. + """DistributedRRM class. - Methods: + runs simulation in lumped form for each cell separetly and rout the discharge between the cells following + the rivernetwork. + + Methods + ------- 1-RunLumpedRRM 2-SpatialRouting 3-DistMaxBas1 @@ -25,48 +29,53 @@ def __init__(self): @staticmethod def RunLumpedRRM(Model): - """RunLumpedRRM method runs the rainfall runoff lumped model (HBV, GR4,...) separately for each cell and return a time series of arrays. + """Run Lumped RRM. + + - runs the rainfall runoff lumped model (HBV, GR4,...) separately for each cell and return a time series + of arrays. - Inputs: + Parameters ---------- - 1-ConceptualModel: - [function] conceptual model function - 2-Raster: - [gdal.dataset] raster to get the spatial information (nodata cells) + Model: + ConceptualModel: [function] + conceptual model function + Raster: [gdal.dataset] + raster to get the spatial information (nodata cells) raster input could be dem, flow accumulation or flow direction raster of the catchment but the nodata value stored in the raster should be far from the range of values that could result from the calculation - 3-sp_prec: - [numpy array] 3d array of the precipitation data, sp_prec should + sp_prec: [numpy array] + 3d array of the precipitation data, sp_prec should have the same 2d dimension of raster input - 4-sp_et: - [numpy array] 3d array of the evapotranspiration data, sp_et should + sp_et: [numpy array] + 3d array of the evapotranspiration data, sp_et should have the same 2d dimension of raster input - 5-sp_temp: - [numpy array] 3d array of the temperature data, sp_temp should + sp_temp: [numpy array] + 3d array of the temperature data, sp_temp should have the same 2d dimension of raster input - 6-sp_pars: - [numpy array] number of 2d arrays of the catchment properties spatially + sp_pars: [numpy array] + number of 2d arrays of the catchment properties spatially distributed in 2d and the third dimension is the number of parameters, sp_pars should have the same 2d dimension of raster input - 7-p2: - [List] list of unoptimized parameters + p2: [List] + list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 - 8-init_st: - [list] initial state variables values [sp, sm, uz, lz, wc]. default=None - 9-ll_temp: - [numpy array] 3d array of the long term average temperature data - 10-q_init: - [float] initial discharge m3/s - Outputs: - ---------- - 1-statevariables : [numpy ndarray] - 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] - 2-qlz : [numpy ndarray] - 3D array of the lower zone discharge (lumped calculation for each cell separately) - 3-quz : [numpy ndarray] - 3D array of the upper zone discharge + init_st: [list] + initial state variables values [sp, sm, uz, lz, wc]. default=None + ll_temp: [numpy array] + 3d array of the long term average temperature data + q_init: [float] + initial discharge m3/s + + Returns + ------- + statevariables : [numpy ndarray] + 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] + qlz : [numpy ndarray] + 3D array of the lower zone discharge (lumped calculation for each cell separately) + quz : [numpy ndarray] + 3D array of the upper zone discharge """ Model.statevariables = np.zeros( [Model.rows, Model.cols, Model.TS, 5], dtype=np.float32 @@ -107,35 +116,38 @@ def RunLumpedRRM(Model): @staticmethod def SpatialRouting(Model): - """SpatialRouting method routes the discharge from cell to another following the flow direction input raster. + """Spatial Routing method. + + routes the discharge from cell to another following the flow direction input raster. - Inputs: + Parameters ---------- - 1-qlz: - [numpy ndarray] 3D array of the lower zone discharge - 2-quz: - [numpy ndarray] 3D array of the upper zone discharge - 3-flow_acc: - [gdal.dataset] flow accumulation raster file of the catchment (clipped to the catchment only) - 4-flow_direct: - [gdal.dataset] flow Direction raster file of the catchment (clipped to the catchment only) - 5-sp_pars: - [numpy ndarray] 3D array of the parameters - 6-p2: - [List] list of unoptimized parameters + Model: + qlz: [numpy ndarray] + 3D array of the lower zone discharge + quz: [numpy ndarray] + 3D array of the upper zone discharge + flow_acc: [gdal.dataset] + flow accumulation raster file of the catchment (clipped to the catchment only) + flow_direct: [gdal.dataset] + flow Direction raster file of the catchment (clipped to the catchment only) + sp_pars: [numpy ndarray] + 3D array of the parameters + p2: [List] + list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 - Outputs: - ---------- - 1-qout: - [numpy array] 1D timeseries of discharge at the outlet of the catchment - of unit m3/sec - 2-quz_routed: - [numpy ndarray] 3D array of the upper zone discharge accumulated and - routed at each time step - 3-qlz_translated: - [numpy ndarray] 3D array of the lower zone discharge translated at each time step + Returns + ------- + qout: [numpy array] + 1D timeseries of discharge at the outlet of the catchment + of unit m3/sec + quz_routed: [numpy ndarray] + 3D array of the upper zone discharge accumulated and + routed at each time step + qlz_translated: [numpy ndarray] + 3D array of the lower zone discharge translated at each time step """ # # routing lake discharge with DS cell k & x and adding to cell Q # q_lake=Routing.Muskingum_V(q_lake,q_lake[0],sp_pars[lakecell[0],lakecell[1],10],sp_pars[lakecell[0],lakecell[1],11],p2[0]) @@ -146,11 +158,11 @@ def SpatialRouting(Model): ### cells at the divider Model.quz_routed = np.zeros_like(Model.quz) - """ - lower zone discharge is going to be just translated without any attenuation - in order to be able to calculate total discharge (uz+lz) at internal points - in the catchment - """ + + # lower zone discharge is going to be just translated without any attenuation + # in order to be able to calculate total discharge (uz+lz) at internal points + # in the catchment + Model.qlz_translated = np.zeros_like(Model.quz) # Model.Qtot = np.zeros_like(Model.quz) # for all cell with 0 flow acc put the quz @@ -279,7 +291,7 @@ def Dist_HBV2( ll_temp=None, q_0=None, ): - """original function.""" + """Original Routing function.""" n_steps = sp_prec.shape[2] + 1 # no of time steps =length of time series +1 # intiialise vector of nans to fill states diff --git a/Hapi/rrm/hbv.py b/Hapi/rrm/hbv.py index ea9b1f6a..123a966d 100644 --- a/Hapi/rrm/hbv.py +++ b/Hapi/rrm/hbv.py @@ -280,7 +280,7 @@ def Soil(fc, beta, etf, temp, tm, e_corr, lp, c_flux, inf, ep, sm_old, uz_old): def Response(perc, alpha, k, k1, lz_old, uz_int_1): # tfac,area, - """Response The response routine of the HBV-96 model. + r"""Response The response routine of the HBV-96 model. The response routine is in charge of transforming the current values of upper and lower zone into discharge. This routine also controls the @@ -368,7 +368,9 @@ def Routing(q, maxbas=1): def StepRun(p, v, St, snow=0): - """StepRun Makes the calculation of next step of discharge and states. + """Step run. + + The function makes the calculation of next step of discharge and states. Parameters ---------- diff --git a/Hapi/rrm/hbv_lake.py b/Hapi/rrm/hbv_lake.py index 3f4af46c..b5a15900 100644 --- a/Hapi/rrm/hbv_lake.py +++ b/Hapi/rrm/hbv_lake.py @@ -229,8 +229,9 @@ def _soil(fc, beta, etf, temp, tm, e_corr, lp, tfac, c_flux, inf, ep, sm_old, uz def _response(tfac, perc, alpha, k, k1, area, lz_old, uz_int_1, qdr): - """Response. The response routine of the HBV-96 model. + r"""Response. + The response routine of the HBV-96 model. The response routine is in charge of transforming the current values of upper and lower zone into discharge. This routine also controls the recharge of the lower zone tank (baseflow). The transformation of units diff --git a/Hapi/rrm/hbvlumped.py b/Hapi/rrm/hbvlumped.py index 1ed96f59..d6d89268 100644 --- a/Hapi/rrm/hbvlumped.py +++ b/Hapi/rrm/hbvlumped.py @@ -17,33 +17,34 @@ - Model equations are solved using explicit scheme - Inputs - ---------- - precipitation : array_like [n] - Average precipitation [mm/h] - evapotranspitration : array_like [n] - Potential Evapotranspiration [mm/h] - temperature : array_like [n] - Average temperature [C] - init_st : array_like [5], optional - Initial model states, [sp, sm, uz, lz, wc]. If unspecified, - [0.0, 30.0, 30.0, 30.0, 0.0] mm - q_init : float, optional - Initial discharge value. If unspecified set to 10.0 - ll_temp : array_like [n], optional - Long term average temptearature. If unspecified, calculated from temp. - p2 : array_like [2] - Problem parameter vector setup as: - [tfac, area] - par : array_like [10] - Parameter vector, set up as: - [fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc] - Returns - ------- - q_sim : array_like [n] - Discharge for the n time steps of the precipitation vector [m3/s] - st : array_like [n, 5] - Model states for the complete time series [mm] +Parameters +---------- +precipitation : array_like [n] + Average precipitation [mm/h] +evapotranspitration : array_like [n] + Potential Evapotranspiration [mm/h] +temperature : array_like [n] + Average temperature [C] +init_st : array_like [5], optional + Initial model states, [sp, sm, uz, lz, wc]. If unspecified, + [0.0, 30.0, 30.0, 30.0, 0.0] mm +q_init : float, optional + Initial discharge value. If unspecified set to 10.0 +ll_temp : array_like [n], optional + Long term average temptearature. If unspecified, calculated from temp. +p2 : array_like [2] + Problem parameter vector setup as: + [tfac, area] +par : array_like [10] + Parameter vector, set up as: + [fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc] + +Returns +------- +q_sim : array_like [n] + Discharge for the n time steps of the precipitation vector [m3/s] +st : array_like [n, 5] + Model states for the complete time series [mm] - model structure uses 18 parameters if the catchment has snow [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp, @@ -53,47 +54,47 @@ [rfcf, fc, beta, etf, lp, c_flux, k, k1, alpha, perc] - Parameters - ---------- - ltt : float - Lower temperature treshold [C] - utt : float - Upper temperature treshold [C] - rfcf : float - Rainfall corrector factor - sfcf : float - Snowfall corrector factor - ttm : float - Temperature treshold for Melting [C] - cfmax : float - Day degree factor - cwh : float - Capacity for water holding in snow pack - cfr : float - Refreezing factor - fc : float - Filed capacity - beta : float - Shape coefficient for effective precipitation separation - e_corr : float - Evapotranspiration corrector factor - etf : float - Total potential evapotranspiration - lp : float _soil - wilting point - c_flux : float - Capilar flux in the root zone - k : float - Upper zone recession coefficient - Upper zone response coefficient - k1 : float - Lower zone recession coefficient - Lowe zone response coefficient - alpha : float - Response box parameter - upper zone runoff coefficient - perc: float - percolation +Parameters +---------- +ltt : float + Lower temperature treshold [C] +utt : float + Upper temperature treshold [C] +rfcf : float + Rainfall corrector factor +sfcf : float + Snowfall corrector factor +ttm : float + Temperature treshold for Melting [C] +cfmax : float + Day degree factor +cwh : float + Capacity for water holding in snow pack +cfr : float + Refreezing factor +fc : float + Filed capacity +beta : float + Shape coefficient for effective precipitation separation +e_corr : float + Evapotranspiration corrector factor +etf : float + Total potential evapotranspiration +lp : float _soil + wilting point +c_flux : float + Capilar flux in the root zone +k : float + Upper zone recession coefficient + Upper zone response coefficient +k1 : float + Lower zone recession coefficient + Lowe zone response coefficient +alpha : float + Response box parameter + upper zone runoff coefficient +perc: float + percolation """ import numpy as np @@ -353,7 +354,7 @@ def Soil(fc, beta, etf, temp, tm, e_corr, lp, tfac, c_flux, inf, ep, sm_old, uz_ def Response(tfac, perc, alpha, k, k1, area, lz_old, uz_int_1): - """Response The response routine of the HBV-96 model. + r"""Response The response routine of the HBV-96 model. The response routine is in charge of transforming the current values of upper and lower zone into discharge. This routine also controls the @@ -413,12 +414,13 @@ def Response(tfac, perc, alpha, k, k1, area, lz_old, uz_int_1): def Lake(temp, curve, tfac, rf, sf, q_new, lv_old, ltt, c_le, ep, lakeA): - # lower zone - # explicit representation of the lake where lake will be represented by a rating curve - """ + """Lake subroutine. + + lower zone + explicit representation of the lake where lake will be represented by a rating curve + l_ea :lake evaporation c_le : lake _evaporation correction factor - """ # lake evaporation if temp >= ltt: @@ -475,6 +477,7 @@ def Step_run(p, p2, v, St, curve, lake_sim, snow=0): St : array_like [5] Previous model states setup as: [sp, sm, uz, lz, wc] + Returns ------- q_new : float diff --git a/Hapi/rrm/hbvold.py b/Hapi/rrm/hbvold.py index 2ba0ba1f..e64cbf28 100644 --- a/Hapi/rrm/hbvold.py +++ b/Hapi/rrm/hbvold.py @@ -57,8 +57,10 @@ def get_random_pars(): get_random_pars generates random parameter values - Returns: - array of random values between the upper and lower bounds. + Returns + ------- + array: [np.ndarray] + of random values between the upper and lower bounds. """ return np.random.uniform(P_LB, P_UB) @@ -197,17 +199,14 @@ def _snow(cfmax, tfac, temp, ttm, cfr, cwh, _rf, _sf, wc_old, sp_old): def _soil(fc, beta, etf, temp, tm, e_corr, lp, tfac, c_flux, inf, ep, sm_old, uz_old): - """ - ==== - Soil - ==== + """Soil. - Soil routine of the HBV-96 model. + Soil routine of the HBV-96 model. - The model checks for the amount of water that can infiltrate the soil, - coming from the liquid precipitation and the snow pack melting. A part of - the water will be stored as soil moisture, while other will become runoff, - and routed to the upper zone tank. + The model checks for the amount of water that can infiltrate the soil, + coming from the liquid precipitation and the snow pack melting. A part of + the water will be stored as soil moisture, while other will become runoff, + and routed to the upper zone tank. Parameters ---------- @@ -278,14 +277,14 @@ def _soil(fc, beta, etf, temp, tm, e_corr, lp, tfac, c_flux, inf, ep, sm_old, uz def _response(tfac, perc, alpha, k, k1, area, lz_old, uz_int_1): - """Response. + r"""Response. The response routine of the HBV-96 model. - The response routine is in charge of transforming the current values of - upper and lower zone into discharge. This routine also controls the - recharge of the lower zone tank (baseflow). The transformation of units - also occurs in this point. + The response routine is in charge of transforming the current values of + upper and lower zone into discharge. This routine also controls the + recharge of the lower zone tank (baseflow). The transformation of units + also occurs in this point. Parameters ---------- @@ -352,7 +351,7 @@ def _tf(maxbas): def _routing(q, maxbas=1): - """This function implements the transfer function using a triangular function.""" + """Routing function using a triangular function.""" assert maxbas >= 1, "Maxbas value has to be larger than 1" # Get integer part of maxbas # maxbas = int(maxbas) diff --git a/Hapi/rrm/inputs.py b/Hapi/rrm/inputs.py index 2aab4b16..e84c6d1e 100644 --- a/Hapi/rrm/inputs.py +++ b/Hapi/rrm/inputs.py @@ -17,6 +17,27 @@ import Hapi +ParamList = [ + "01_tt", + "02_rfcf", + "03_sfcf", + "04_cfmax", + "05_cwh", + "06_cfr", + "07_fc", + "08_beta", + "09_etf", + "10_lp", + "11_k0", + "12_k1", + "13_k2", + "14_uzl", + "15_perc", + "16_maxbas", + "17_K_muskingum", + "18_x_muskingum", +] + class Inputs: """Rainfall-runoff Inputs class. @@ -31,16 +52,44 @@ class Inputs: 3- extractParameters 4- createLumpedInputs 5- renameFiles - 6- changetext2time - 7- ReadExcelData 8- ListAttributes """ - def __init__(self): - """No parameters needed for instantiating the object.""" + def __init__(self, src): + """Rainfall Inputs. + + Parameters + ---------- + src: [str] + path to the spatial information source raster to get the spatial information + (coordinate system, no of rows & columns) A_path should include the name of the raster + and the extension like "data/dem.tif". + """ + self.source_dem = src pass - def prepareInputs(src: str, input_folder: str, folder_name: str): + @staticmethod + def createTempFolder(file_name: str = "AllignedRasters"): + """Create a temporary folder for calculation. + + creates a folder in the temporary directory for making some operations on rasters + + Parameters + ---------- + file_name: [str] + folder name + """ + try: + os.makedirs(os.path.join(os.environ["TEMP"], file_name)) + except WindowsError: + # if not able to create the folder delete the folder with the same name and create one empty + shutil.rmtree(os.path.join(os.environ["TEMP"] + f"/{file_name}")) + os.makedirs(os.path.join(os.environ["TEMP"], file_name)) + + temp = os.environ["TEMP"] + f"/{file_name}/" + return temp + + def prepareInputs(self, input_folder: str, folder_name: str): """prepareInputs. this function prepare downloaded raster data to have the same align and @@ -49,10 +98,6 @@ def prepareInputs(src: str, input_folder: str, folder_name: str): Parameters ---------- - src: [str] - path to the spatial information source raster to get the spatial information - (coordinate system, no of rows & columns) A_path should include the name of the raster - and the extension like "data/dem.tif". input_folder: [str] path of the folder of the rasters you want to adjust their no of rows, columns and resolution (alignment) like raster A @@ -63,14 +108,16 @@ def prepareInputs(src: str, input_folder: str, folder_name: str): Example ------- Ex1: - >>> dem_path="01GIS/inputs/4000/acc4000.tif" - >>> prec_in_path="02Precipitation/CHIRPS/Daily/" - >>> Inputs.prepareInputs(dem_path,prec_in_path,"prec") + >>> dem_path = "01GIS/inputs/4000/acc4000.tif" + >>> prec_in_path = "02Precipitation/CHIRPS/Daily/" + >>> In = Inputs(dem_path) + >>> In.prepareInputs(prec_in_path, "prec") Ex2: >>> dem_path="01GIS/inputs/4000/acc4000.tif" >>> outputpath="00inputs/meteodata/4000/" >>> evap_in_path="03Weather_Data/evap/" - >>> Inputs.prepareInputs(dem_path,evap_in_path,outputpath+"evap") + >>> In = Inputs(dem_path) + >>> Inputs.prepareInputs(evap_in_path, f"{outputpath}/evap") """ if not isinstance(folder_name, str): print("folder_name input should be string type") @@ -90,7 +137,7 @@ def prepareInputs(src: str, input_folder: str, folder_name: str): "- First alligned files will be created in a folder 'AllignedRasters' in the Temp folder in you " "environment variable" ) - raster.matchDataAlignment(src, input_folder, temp) + raster.matchDataAlignment(self.source_dem, input_folder, temp) # create new folder in the current directory for alligned and nodatavalue matched cells try: os.makedirs(os.path.join(os.getcwd(), folder_name)) @@ -104,7 +151,7 @@ def prepareInputs(src: str, input_folder: str, folder_name: str): print( "- Second matching NoDataValue from the DEM raster too all raster will be created in the outputpath" ) - raster.cropAlignedFolder(temp, src, f"{folder_name}/") + raster.cropAlignedFolder(temp, self.source_dem, f"{folder_name}/") # delete the processing folder from temp shutil.rmtree(temp) @@ -225,26 +272,6 @@ def extractParameters(self, gdf, scenario, as_raster=False, save_to=""): """ ParametersPath = os.path.dirname(Hapi.__file__) ParametersPath = ParametersPath + "/parameters/" + scenario - ParamList = [ - "01_tt", - "02_rfcf", - "03_sfcf", - "04_cfmax", - "05_cwh", - "06_cfr", - "07_fc", - "08_beta", - "09_etf", - "10_lp", - "11_k0", - "12_k1", - "13_k2", - "14_uzl", - "15_perc", - "16_maxbas", - "17_K_muskingum", - "18_x_muskingum", - ] if not as_raster: raster_obj = rasterio.open(f"{ParametersPath}/{ParamList[0]}.tif") @@ -374,69 +401,7 @@ def renameFiles( f"{path}/{df.loc[i, 'files']}", f"{path}/{df.loc[i, 'new_names']}" ) - @staticmethod - def changetext2time(string): - """changetext2time. - - this functions changes the date from a string to a date time - format - """ - time = dt.datetime( - int(string[:4]), - int(string[5:7]), - int(string[8:10]), - int(string[11:13]), - int(string[14:16]), - int(string[17:]), - ) - return time - - @staticmethod - def ReadExcelData(path, years, months): - """ReadExcelData. - - this function reads data listed in excel sheet with years and months are - listed as columns and days are listed in the first row - year month 1 2 3 4 5 6 7 8 9 .....................31 - 2012 1 5 6 2 6 8 6 9 7 4 3 ...................31 - 2012 2 9 8 7 6 3 2 1 5 5 9 ...................31 - - Parameters - ---------- - path: - [string] path of the excel file - years: - [list] list of the years you want to read - months: - [list] list of the months you you want to read - - Returns - ------- - List of the values in the excel file - - Examples - -------- - >>> years = [2009,2010,2011]#,2012,2013] - >>> months = [1,2,3,4,5,6,7,8,9,10,11,12] - >>> Q = Inputs.ReadExcelData("{path}/Discharge/Qout.xlsx", years, months) - """ - - Qout = pd.read_excel(path) - Q = [] - # years=[2009,2010,2011]#,2012,2013] - # months=[1,2,3,4,5,6,7,8,9,10,11,12] - for year in years: - for month in months: - row = Qout[Qout["year"] == year][Qout["month"] == month] - row = row.drop(["year", "month"], axis=1) - row = row.values.tolist()[0] - Q = Q + row - - Q = [Q[i] for i in range(len(Q)) if not np.isnan(Q[i])] - - return Q - - def ListAttributes(self): + def listAttributes(self): """Print Attributes List.""" logger.info("\n") logger.info( diff --git a/Hapi/rrm/routing.py b/Hapi/rrm/routing.py index 5d4dfb9e..55b82e34 100644 --- a/Hapi/rrm/routing.py +++ b/Hapi/rrm/routing.py @@ -17,6 +17,7 @@ class Routing: """Routing class contains routing method. Methods + ------- 1- Muskingum 2- Muskingum_V 3- TriangularRouting1 @@ -35,30 +36,29 @@ def __init__(self): def Muskingum(inflow, Qinitial, k, x, dt): """Muskingum. - inputs: + Parameters ---------- - 1-inflow: - [numpy array] time series of inflow hydrograph - 2-Qinitial: - [numeric] initial value for outflow - 3-k: - [numeric] travelling time (hours) - 4-x: - [numeric] surface nonlinearity coefficient (0,0.5) - 5-dt: - [numeric] delta t - - Outputs: - ---------- - 1-outflow: - [numpy array] time series of routed hydrograph - - examples: - ---------- - pars[10]=k - pars[11]=x - p2[0]=1 # hourly time step - q_routed = Routing.muskingum(q_uz,q_uz[0],pars[10],pars[11],p2[0]) + inflow: [numpy array] + time series of inflow hydrograph + Qinitial: [numeric] + initial value for outflow + k: [numeric] + travelling time (hours) + x: [numeric] + surface nonlinearity coefficient (0,0.5) + dt: [numeric] + delta t + + Returns + ------- + outflow: [numpy array] + time series of routed hydrograph + + Examples + -------- + >>> q = [] # discharge time series + >>> time_resolution = 1 # hourly time step + >>> q_routed = Routing.Muskingum(q, q[0], k, x, time_resolution) """ c1 = (dt - 2 * k * x) / (2 * k * (1 - x) + dt) c2 = (dt + 2 * k * x) / (2 * k * (1 - x) + dt) @@ -89,30 +89,29 @@ def Muskingum_V( Vectorized version of Muskingum - inputs: - ---------- - 1-inflow: [numpy array] - time series of inflow hydrograph - 2-Qinitial: [numeric] - initial value for outflow - 3-k: [numeric] - travelling time (hours) - 4-x: [numeric] - surface nonlinearity coefficient (0,0.5) - 5-dt: [numeric] - delta t - - Outputs: - ---------- - 1-outflow: - [numpy array] time series of routed hydrograph - - examples: + Parameters ---------- - pars[10]=k - pars[11]=x - p2[0]=1 # hourly time step - q_routed = Routing.muskingum(q_uz,q_uz[0],pars[10],pars[11],p2[0]) + inflow: [numpy array] + time series of inflow hydrograph + Qinitial: [numeric] + initial value for outflow + k: [numeric] + travelling time (hours) + x: [numeric] + surface nonlinearity coefficient (0,0.5) + dt: [numeric] + delta t + + Returns + ------- + outflow: + [numpy array] time series of routed hydrograph + + Examples + -------- + >>> q = [] # discharge time series + >>> time_resolution = 1 # hourly time step + >>> q_routed = Routing.Muskingum_V(q, q[0], k, x, time_resolution) """ c1 = (dt - 2 * k * x) / (2 * k * (1 - x) + dt) c2 = (dt + 2 * k * x) / (2 * k * (1 - x) + dt) @@ -138,21 +137,21 @@ def Tf(maxbas): Transfer function weight generator in a shape of a triangle. - Inputs: - ---------- - 1-maxbas: - [integer] number of time steps that the triangular routing function - is going to divide the discharge into, based on the weights - generated from this function, min value is 1 and default value is 1 - - Outputs: - ---------- - 1-wi: - [numpy array] array of normalised weights - - Example: + Parameters ---------- - ws=Tf(5) + maxbas: [integer] + number of time steps that the triangular routing function + is going to divide the discharge into, based on the weights + generated from this function, min value is 1 and default value is 1 + + Returns + ------- + wi: [numpy array] + array of normalised weights + + Examples + -------- + >>> ws = Routing.Tf(5) """ wi = [] @@ -172,25 +171,28 @@ def Tf(maxbas): @staticmethod def TriangularRouting2(q, maxbas=1): - """TriangularRouting implements the transfer function using a triangular function (considers only integer values of Maxbas parameter) + """Triangular Routing. - Inputs: - ---------- - 1-q: - [numpy array] time series of discharge hydrographs - 2-maxbas: - [integer] number of time steps that the triangular routing function - is going to divide the discharge into, based on the weights - generated from this function, min value is 1 and default value is 1 - - Outputs: - ---------- - 1-q_r: - [numpy array] time series of routed hydrograph + The function implements the transfer function using a triangular function (considers only integer values of + Maxbas parameter) - examples: + Parameters ---------- - q_sim=TriangularRouting(np.array(q_sim), parameters[-1]) + q: [numpy array] + time series of discharge hydrographs + maxbas: [integer] + number of time steps that the triangular routing function + is going to divide the discharge into, based on the weights + generated from this function, min value is 1 and default value is 1 + + Returns + ------- + q_r: [numpy array] + time series of routed hydrograph + + Examples + -------- + >>> q_sim = Routing.TriangularRouting2(np.array(q_sim), parameters[-1]) """ # input data validation assert maxbas >= 1, "Maxbas value has to be larger than 1" @@ -212,22 +214,22 @@ def TriangularRouting2(q, maxbas=1): @staticmethod def CalculateWeights(MAXBAS): - """CalculateMaxBas calculate the MAXBAS Weights based on a MAXBAX number The MAXBAS is a HBV parameter that controls the routing. + """Calculate Weights. - Inputs: - ---------- - 1-MAXBAS: - [] + - calculate the MAXBAS Weights based on a MAXBAX number The MAXBAS is a HBV parameter that + controls the routing. + - It is important to mention that this function allows to obtain weights + not only for interger values but from decimals values as well. - Example: + Parameters ---------- - maxbasW = CalculateMaxBas(5) - maxbasW = - - 0.0800 0.2400 0.3600 0.2400 0.0800 + MAXBAS: [Numeric] - It is important to mention that this function allows to obtain weights - not only for interger values but from decimals values as well. + Examples + -------- + >>> maxbasW = Routing.CalculateWeights(5) + >>> print(maxbasW) + >>> 0.0800 0.2400 0.3600 0.2400 0.0800 """ yant = 0 Total = 0 # Just to verify how far from the unit is the result @@ -295,10 +297,13 @@ def CalculateWeights(MAXBAS): @staticmethod def TriangularRouting1(Q, MAXBAS): - """TriangularRouting1 calculate the routing from a input hydrograph using the MAXBAS parameter from the HBV model (considers float values of Maxbas parameter). + """TriangularRouting1. - EXAMPLE: - ---------- + calculate the routing from a input hydrograph using the MAXBAS parameter from the HBV + model (considers float values of Maxbas parameter). + + Examples + -------- [Qout,maxbasW]=RoutingMAXBAS(Q,5); where: Qout = output hydrograph @@ -306,7 +311,6 @@ def TriangularRouting1(Q, MAXBAS): Q = input hydrograph 5 = MAXBAS parameter value. """ - # CALCULATE MAXBAS WEIGHTS maxbasW = Routing.CalculateWeights(MAXBAS) diff --git a/Hapi/rrm/wrapper.py b/Hapi/rrm/wrapper.py index 72f9aa01..25a98814 100644 --- a/Hapi/rrm/wrapper.py +++ b/Hapi/rrm/wrapper.py @@ -10,7 +10,6 @@ class Wrapper: - """Wrapper. The class connect different commponent together (lumped run of the distributed with the spatial routing) for Hapi @@ -26,7 +25,7 @@ class Wrapper: """ def __init__(self): - """wrapper object does not need any information to be created.""" + """Run the whole setup.""" pass @staticmethod @@ -37,58 +36,59 @@ def RRMModel(Model, ll_temp=None, q_0=None): 1- The distributed rainfall runoff: model runs separately for each cell 2- The Spatial routing scheme (routing is following river network) - Inputs: + Parameters ---------- - 1-DEM: - [gdal.dataset] DEM raster file of the catchment (clipped to the catchment only) - 2-flow_acc: - [gdal.dataset] flow accumulation raster file of the catchment (clipped to the catchment only) - 3-flow_direct: - [gdal.dataset] flow Direction raster file of the catchment (clipped to the catchment only) - 4-sp_prec: - [numpy array] 3d array of the precipitation data, sp_prec should + Model + DEM: [gdal.dataset] + DEM raster file of the catchment (clipped to the catchment only) + flow_acc: [gdal.dataset] + flow accumulation raster file of the catchment (clipped to the catchment only) + flow_direct: [gdal.dataset] + flow Direction raster file of the catchment (clipped to the catchment only) + sp_prec: [numpy array] + 3d array of the precipitation data, sp_prec should have the same 2d dimension of raster input - 5-sp_et: - [numpy array] 3d array of the evapotranspiration data, sp_et should + sp_et: [numpy array] + 3d array of the evapotranspiration data, sp_et should have the same 2d dimension of raster input - 6-sp_temp: - [numpy array] 3d array of the temperature data, sp_temp should + sp_temp: [numpy array] + 3d array of the temperature data, sp_temp should have the same 2d dimension of raster input - 7-sp_par: - [numpy array] number of 2d arrays of the catchment properties spatially + sp_par: [numpy array] + number of 2d arrays of the catchment properties spatially distributed in 2d and the third dimension is the number of parameters, sp_pars should have the same 2d dimension of raster input - 8-p2: - [List] list of unoptimized parameters + p2: [List] + list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 - 9-kub: - [float] upper bound of K value (traveling time in muskingum routing method) - 10-klb: - [float] Lower bound of K value (traveling time in muskingum routing method) - 11-init_st: - [list] initial state variables values [sp, sm, uz, lz, wc]. default=None - 12-ll_temp: - [numpy array] 3d array of the long term average temperature data - 13-q_0: - [float] initial discharge m3/s - - Outputs: - ---------- - 1-statevariables: - [numpy ndarray] 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] - 2-qlz: - [numpy ndarray] 3D array of the lower zone discharge - 3-quz: - [numpy ndarray] 3D array of the upper zone discharge - 4-qout: - [numpy array] 1D timeseries of discharge at the outlet of the catchment - of unit m3/sec - 5-quz_routed: - [numpy ndarray] 3D array of the upper zone discharge accumulated and - routed at each time step - 6-qlz_translated: - [numpy ndarray] 3D array of the lower zone discharge translated at each time step + kub: [float] + upper bound of K value (traveling time in muskingum routing method) + klb: [float] + Lower bound of K value (traveling time in muskingum routing method) + init_st: [list] + initial state variables values [sp, sm, uz, lz, wc]. default=None + ll_temp: [numpy array] + 3d array of the long term average temperature data + q_0: [float] + initial discharge m3/s + + Returns + ------- + statevariables: + [numpy ndarray] 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] + qlz: + [numpy ndarray] 3D array of the lower zone discharge + quz: + [numpy ndarray] 3D array of the upper zone discharge + qout: + [numpy array] 1D timeseries of discharge at the outlet of the catchment + of unit m3/sec + quz_routed: + [numpy ndarray] 3D array of the upper zone discharge accumulated and + routed at each time step + qlz_translated: + [numpy ndarray] 3D array of the lower zone discharge translated at each time step """ # run the rainfall runoff model separately distrrm.RunLumpedRRM(Model) @@ -319,12 +319,12 @@ def Lumped(Model, Routing=0, RoutingFn=[]): q_lz: [numpy array] 1d array of the calculated discharge. - examples + Examples -------- - p2=[24, 1530] - #[sp,sm,uz,lz,wc] - init_st=[0,5,5,5,0] - snow=0 + >>> p2=[24, 1530] + >>> #[sp,sm,uz,lz,wc] + >>> init_st=[0,5,5,5,0] + >>> snow=0 """ ### input data validation assert callable( diff --git a/Hapi/run.py b/Hapi/run.py index b665dfb7..7b02d2bd 100644 --- a/Hapi/run.py +++ b/Hapi/run.py @@ -19,12 +19,11 @@ class Run(Catchment): + """Run the catchment model. - """Run. - - Run sub-class validate the spatial data and hand it to the wrapper class, It is - a sub-class from the catchment class, so you need to create the Catchment - object first to run the model + Run sub-class validate the spatial data and hand it to the wrapper class, It is + a sub-class from the catchment class, so you need to create the Catchment + object first to run the model Methods ------- @@ -40,61 +39,25 @@ def __init__(self): pass def RunHapi(self): - """RunModel. - - this function runs the conceptual distributed hydrological model - - Inputs: - ---------- - 1-Paths: - - 4-FlowAccPath: - - 5-FlowDPath: - [String] path to the Flow Direction raster of the catchment (it should - include the raster name and extension) - 7-ParPath: - [String] path to the Folder contains parameters rasters of the catchment - 8-p2: - [List] list of unoptimized parameters - p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step - p2[1] = catchment area in km2 + """Run Model. - Outputs: - ---------- - 1-statevariables: [numpy attribute] - 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] - 2-qlz: [numpy attribute] - 3D array of the lower zone discharge - 3-quz: [numpy attribute] - 3D array of the upper zone discharge - 4-qout: [numpy attribute] - 1D timeseries of discharge at the outlet of the catchment - of unit m3/sec - 5-quz_routed: [numpy attribute] - 3D array of the upper zone discharge accumulated and - routed at each time step - 6-qlz_translated: [numpy attribute] - 3D array of the lower zone discharge translated at each time step - - Example: - ---------- - PrecPath = prec_path="meteodata/4000/calib/prec" - Evap_Path = evap_path="meteodata/4000/calib/evap" - TempPath = temp_path="meteodata/4000/calib/temp" - DemPath = "GIS/4000/dem4000.tif" - FlowAccPath = "GIS/4000/acc4000.tif" - FlowDPath = "GIS/4000/fd4000.tif" - ParPath = "meteodata/4000/parameters" - p2=[1, 227.31] - st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, - FlowAccPath,FlowDPath,ParPath,p2) + Returns + ------- + statevariables: [numpy attribute] + 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] + qlz: [numpy attribute] + 3D array of the lower zone discharge + quz: [numpy attribute] + 3D array of the upper zone discharge + qout: [numpy attribute] + 1D timeseries of discharge at the outlet of the catchment + of unit m3/sec + quz_routed: [numpy attribute] + 3D array of the upper zone discharge accumulated and + routed at each time step + qlz_translated: [numpy attribute] + 3D array of the lower zone discharge translated at each time step """ - ### input data validation - # data type - # assert type(self.FlowAcc)==gdal.Dataset, "flow_acc should be read using gdal (gdal dataset please read it using gdal library) " - # assert type(self.FlowDir)==gdal.Dataset, "flow_direct should be read using gdal (gdal dataset please read it using gdal library) " - # input dimensions [fd_rows, fd_cols] = self.FlowDirArr.shape assert ( @@ -124,61 +87,10 @@ def RunHapi(self): print("Model Run has finished") def RunFloodModel(self): - """RunFloodModel. - - this function runs the conceptual distributed hydrological model - - Inputs: - ---------- - 1-Paths: - - 4-FlowAccPath: - - 5-FlowDPath: - [String] path to the Flow Direction raster of the catchment (it should - include the raster name and extension) - 7-ParPath: - [String] path to the Folder contains parameters rasters of the catchment - 8-p2: - [List] list of unoptimized parameters - p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step - p2[1] = catchment area in km2 + """Run flood model. - Outputs: - ---------- - 1-statevariables: [numpy attribute] - 4D array (rows,cols,time,states) states are [sp,wc,sm,uz,lv] - 2-qlz: [numpy attribute] - 3D array of the lower zone discharge - 3-quz: [numpy attribute] - 3D array of the upper zone discharge - 4-qout: [numpy attribute] - 1D timeseries of discharge at the outlet of the catchment - of unit m3/sec - 5-quz_routed: [numpy attribute] - 3D array of the upper zone discharge accumulated and - routed at each time step - 6-qlz_translated: [numpy attribute] - 3D array of the lower zone discharge translated at each time step - - Example: - ---------- - PrecPath = prec_path="meteodata/4000/calib/prec" - Evap_Path = evap_path="meteodata/4000/calib/evap" - TempPath = temp_path="meteodata/4000/calib/temp" - DemPath = "GIS/4000/dem4000.tif" - FlowAccPath = "GIS/4000/acc4000.tif" - FlowDPath = "GIS/4000/fd4000.tif" - ParPath = "meteodata/4000/parameters" - p2=[1, 227.31] - st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, - FlowAccPath,FlowDPath,ParPath,p2) + - This function runs the conceptual distributed hydrological model """ - ### input data validation - # data type - # assert type(self.FlowAcc)==gdal.Dataset, "flow_acc should be read using gdal (gdal dataset please read it using gdal library) " - # assert type(self.FlowDir)==gdal.Dataset, "flow_direct should be read using gdal (gdal dataset please read it using gdal library) " - # input dimensions [fd_rows, fd_cols] = self.FlowDirArr.shape assert ( @@ -223,53 +135,18 @@ def RunFloodModel(self): print("1D model Run has finished") def runHAPIwithLake(self, Lake): - """RunDistwithLake. + """Run model with lake. - this function runs the conceptual distributed hydrological model + - This function runs the conceptual distributed hydrological model - Inputs: - ---------- - 1-Paths: - 1-PrecPath: - [String] path to the Folder contains precipitation rasters - 2-Evap_Path: - [String] path to the Folder contains Evapotranspiration rasters - 3-TempPath: - [String] path to the Folder contains Temperature rasters - 4-FlowAccPath: - [String] path to the Flow Accumulation raster of the catchment (it should - include the raster name and extension) - 5-FlowDPath: - [String] path to the Flow Direction raster of the catchment (it should - include the raster name and extension) - 7-ParPath: - [String] path to the Folder contains parameters rasters of the catchment - 8-p2: - [List] list of unoptimized parameters - p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step - p2[1] = catchment area in km2 - - Outputs: - ---------- - 1- st: - [4D array] state variables - 2- q_out: - [1D array] calculated Discharge at the outlet of the catchment - 3- q_uz: - [3D array] Distributed discharge for each cell - - Example: - ---------- - PrecPath = prec_path="meteodata/4000/calib/prec" - Evap_Path = evap_path="meteodata/4000/calib/evap" - TempPath = temp_path="meteodata/4000/calib/temp" - DemPath = "GIS/4000/dem4000.tif" - FlowAccPath = "GIS/4000/acc4000.tif" - FlowDPath = "GIS/4000/fd4000.tif" - ParPath = "meteodata/4000/parameters" - p2=[1, 227.31] - st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, - FlowAccPath,FlowDPath,ParPath,p2) + Returns + ------- + st: [4D array] + state variables. + q_out: [1D array] + calculated Discharge at the outlet of the catchment. + q_uz: [3D array] + Distributed discharge for each cell. """ # input dimensions [fd_rows, fd_cols] = self.FlowDirArr.shape @@ -307,56 +184,19 @@ def runHAPIwithLake(self, Lake): print("Model Run has finished") def runFW1(self): - """RunDistwithLake. + """Run DistwithLake. - this function runs the conceptual distributed hydrological model + This function runs the conceptual distributed hydrological model - Inputs: - ---------- - 1-Paths: - 1-PrecPath: - [String] path to the Folder contains precipitation rasters - 2-Evap_Path: - [String] path to the Folder contains Evapotranspiration rasters - 3-TempPath: - [String] path to the Folder contains Temperature rasters - 4-FlowAccPath: - [String] path to the Flow Accumulation raster of the catchment (it should - include the raster name and extension) - 5-FlowDPath: - [String] path to the Flow Direction raster of the catchment (it should - include the raster name and extension) - 7-ParPath: - [String] path to the Folder contains parameters rasters of the catchment - 8-p2: - [List] list of unoptimized parameters - p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step - p2[1] = catchment area in km2 - - Outputs: - ---------- - 1- st: - [4D array] state variables - 2- q_out: - [1D array] calculated Discharge at the outlet of the catchment - 3- q_uz: - [3D array] Distributed discharge for each cell - - Example: - ---------- - PrecPath = prec_path="meteodata/4000/calib/prec" - Evap_Path = evap_path="meteodata/4000/calib/evap" - TempPath = temp_path="meteodata/4000/calib/temp" - DemPath = "GIS/4000/dem4000.tif" - FlowAccPath = "GIS/4000/acc4000.tif" - FlowDPath = "GIS/4000/fd4000.tif" - ParPath = "meteodata/4000/parameters" - p2=[1, 227.31] - st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, - FlowAccPath,FlowDPath,ParPath,p2) + Returns + ------- + st: [4D array] + state variables + q_out: [1D array] + calculated Discharge at the outlet of the catchment + q_uz: [3D array] + Distributed discharge for each cell """ - # input data validation - # input dimensions assert ( np.shape(self.Prec)[0] == self.rows @@ -462,44 +302,46 @@ def runLumped( Route: int = 0, RoutingFn=None, ): - """runLumped. + """Run lumped model. - this function runs lumped conceptual model + - This function runs lumped conceptual model Parameters ---------- - ConceptualModel: [function] - conceptual model and it should contain a function called simulate - data: [numpy array] - meteorological data as array with the first column as precipitation - second as evapotranspiration, third as temperature and forth column as - long term average temperature - parameters: [numpy array] - conceptual model parameters as array - p2: [List] - list of unoptimized parameters - p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step - p2[1] = catchment area in km2 - init_st: [list] - initial state variables values [sp, sm, uz, lz, wc]. - Routing: [0 or 1] + Route: [0 or 1] to decide wether t route the generated discharge hydrograph or not RoutingFn: [function] function to route the dischrge hydrograph. - Returns - ---------- - 1- st: - [numpy array] 3d array of the 5 state variable data for each cell - 2- q_lz: - [numpy array] 1d array of the calculated discharge. + Parameters that should be defined before calling the function + ConceptualModel: [function] + conceptual model and it should contain a function called simulate + data: [numpy array] + meteorological data as array with the first column as precipitation + second as evapotranspiration, third as temperature and forth column as + long term average temperature + parameters: [numpy array] + conceptual model parameters as array + p2: [List] + list of unoptimized parameters + p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step + p2[1] = catchment area in km2 + init_st: [list] + initial state variables values [sp, sm, uz, lz, wc]. - examples: - ---------- - p2=[24, 1530] - #[sp,sm,uz,lz,wc] - init_st=[0,5,5,5,0] - snow=0 + Returns + ------- + st: [numpy array] + 3d array of the 5 state variable data for each cell + q_lz: [numpy array] + 1d array of the calculated discharge. + + Examples + -------- + >>> p2 = [24, 1530] + >>> #[sp,sm,uz,lz,wc] + >>> init_st = [0,5,5,5,0] + >>> snow = 0 """ if RoutingFn is None: RoutingFn = [] diff --git a/Hapi/test.asc b/Hapi/test.asc deleted file mode 100644 index 1bf84bfc..00000000 --- a/Hapi/test.asc +++ /dev/null @@ -1,10 +0,0 @@ -NCOLS 5 -NROWS 3 -XLLCORNER 0.0 -YLLCORNER 0.0 -CELLSIZE 200.0 -NODATA_VALUE -99999 - -0.770195 0.937035 1.112300 0.937035 0.770195 -0.770195 0.937035 1.112300 0.937035 0.770195 -0.770195 0.937035 1.112300 0.937035 0.770195 diff --git a/Hapi/utils.py b/Hapi/utils.py deleted file mode 100644 index 60185663..00000000 --- a/Hapi/utils.py +++ /dev/null @@ -1,254 +0,0 @@ -import datetime -import os -import pickle -import sys - - -def save_obj(obj, saved_name): - """save_obj. - - this function is used to save any python object to your hard desk - - Parameters - ---------- - obj: [Any] - any python object - saved_name: [str] - name of the object - - Returns - ------- - the object will be saved to the given path/current working directory - with the given name - - Examples - -------- - >>> path = "path/to/your/disk" - >>> data={"key1":[1,2,3,5],"key2":[6,2,9,7]} - >>> save_obj(data, f'{path}/flow_acc_table') - """ - with open(saved_name + ".pkl", "wb") as f: - pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) - - -def load_obj(saved_name): - """load_obj. - - this function is used to save any python object to your hard desk - - Parameters - ---------- - 1-saved_name: - ['String'] name of the object - - Returns - ------- - the object will be loaded - - Examples - -------- - >>> path = r"c:\my_computer\files" - >>> load_obj(f'{path}/flow_acc_table') - """ - with open(saved_name + ".pkl", "rb") as f: - return pickle.load(f) - - -def dateformated(x): - """dateformated. - - this function converts the the date read from a list to a datetime format - - Parameters - ---------- - x: [list] - is a list of tuples of string date read from database - - Returns - ------- - list od dates as a datetime format YYYY-MM-DD HH:MM:SS - """ - x = [i[0] for i in x] - # - x1 = [] - for i in x: - if len(i) == 19: - x1.append( - datetime.datetime( - int(i[:4]), - int(i[5:7]), - int(i[8:10]), - int(i[11:13]), - int(i[14:16]), - int(i[17:18]), - ) - ) - # elif len(i)==13: - # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) - # else: - # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) - # del i,x - return x1 - - -def printWaitBar(i, total, prefix="", suffix="", decimals=1, length=100, fill=" "): - """This function will print a waitbar in the console. - - Parameters: - i: - Iteration number - total: - Total iterations - fronttext: - Name in front of bar - prefix: - Name after bar - suffix: - Decimals of percentage - length: - width of the waitbar - fill: - bar fill - """ - # Adjust when it is a linux computer - if os.name == "posix" and total == 0: - total = 0.0001 - - percent = ("{0:." + str(decimals) + "f}").format(100 * (i / float(total))) - filled = int(length * i // total) - bar = fill * filled + "-" * (length - filled) - - sys.stdout.write("\r%s |%s| %s%% %s" % (prefix, bar, percent, suffix)) - sys.stdout.flush() - - if i == total: - print() - - -def class_method_parse(initial_args): - """check and assign values of parameters to the self object. - - check values of a method and assign the valuse of the parameters to the self object (the method has self/cls - as first parameter) - - Parameters - ---------- - initial_args: [Dict] - dictionary contains all the parameters of the function, positional and key word parameters, each parameter is a - key(i.e 'name' in the below example), and the value is a dict that has at least a key called "type", - and a value that is an available data type in python, (i.e 'name' : {"type": str}), - - If the parameter has a default value, the dict has to have another key: value i.e "default": - - if there is no "default" key in the parameter dict, the default value will be taken None - >>> initial_args = { - >>> 'name' : {"type": str}, - >>> 'version' : {"default": 3, "type": int} - >>> } - - Returns - ------- - assign the valuse of the parameters to the self object - """ - - def apply_func(func): - """apply the function that the decorator preceded. - - Parameters - ---------- - func: [function] - the function that the decorator precedes - - Returns - ------- - returns the same outputs of the input function - """ - - def wrapper(*args, **kwargs): - - self = args[0] - # get wrong kwargs - wrong_kwargs = set(kwargs) - set(initial_args) - if len(wrong_kwargs) > 0: - print(initial_args) - raise KeyError(f"Invalid parameter {wrong_kwargs}") - - for key, val in initial_args.items(): - # if the parameter is given by user - if key in kwargs.keys(): - default = initial_args.get(key) - # check the type - key_type = default.get("type") - # make the type as a list - if not isinstance(key_type, list): - key_type = [key_type] - # get the given value - val = kwargs.get(key) - if type(val) in key_type: - # set the given value - setattr(self, key, val) - else: - raise TypeError( - f"The parameter {key} should be of type {key_type}" - ) - else: - # positional args - if "default" in val.keys(): - setattr(self, key, val.get("default")) - - res = func(*args, **kwargs) - return res - - return wrapper - - return apply_func - - -def class_attr_initialize(attributes): - """check and assign values of parameters to the self object. - - check values of a method and assign the valuse of the parameters to the self object - (the method has self/cls - as first parameter) - - Parameters - ---------- - initial_args: [Dict] - dictionary contains all the parameters of the function, positional and key word - parameters, each parameter is a key(i.e 'name' in the below example), and the - value is a dict that has at least a key called "type", and a value that is an - available data type in python, (i.e 'name' : {"type": str}), - - If the parameter has a default value, the dict has to have another - key: value i.e "default": - >>> initial_args = { - >>> 'name' : {"type": str}, - >>> 'version' : {"default": 3, "type": int} - >>> } - - Returns - ------- - assign the valuse of the parameters to the self object - """ - - def apply_func(func): - """apply the function that the decorator preceded. - - Parameters - ---------- - func: [function] - the function that the decorator precedes - - Returns - ------- - returns the same outputs of the input function - """ - - def wrapper(*args, **kwargs): - self = args[0] - # initialize attributes - for key, val in attributes.items(): - setattr(self, key, val) - - func(*args, **kwargs) - - return wrapper - - return apply_func diff --git a/README.md b/README.md index 848d3176..06105ba5 100644 --- a/README.md +++ b/README.md @@ -39,9 +39,9 @@ Current build status Current release info ==================== -| Name | Downloads | Version | Platforms | -| --- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --- | --- | -| [![Conda Recipe](https://img.shields.io/badge/recipe-hapi-green.svg)](https://anaconda.org/conda-forge/hapi) | [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![Downloads](https://pepy.tech/badge/hapi-nile)](https://pepy.tech/project/hapi-nile) [![Downloads](https://pepy.tech/badge/hapi-nile/month)](https://pepy.tech/project/hapi-nile) [![Downloads](https://pepy.tech/badge/hapi-nile/week)](https://pepy.tech/project/hapi-nile) ![PyPI - Downloads](https://img.shields.io/pypi/dd/hapi-nile?color=blue&style=flat-square) ![GitHub all releases](https://img.shields.io/github/downloads/MAfarrag/Hapi/total) ![GitHub release (latest by date)](https://img.shields.io/github/downloads/MAfarrag/hapi/1.2.3/total) | [![Conda Version](https://img.shields.io/conda/vn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![PyPI version](https://badge.fury.io/py/HAPI-Nile.svg)](https://badge.fury.io/py/HAPI-Nile) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/hapi/badges/version.svg)](https://anaconda.org/conda-forge/hapi) | [![Conda Platforms](https://img.shields.io/conda/pn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![Join the chat at https://gitter.im/Hapi-Nile/Hapi](https://badges.gitter.im/Hapi-Nile/Hapi.svg)](https://gitter.im/Hapi-Nile/Hapi?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | +| Name | Downloads | Version | Platforms | +| --- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --- | --- | +| [![Conda Recipe](https://img.shields.io/badge/recipe-hapi-green.svg)](https://anaconda.org/conda-forge/hapi) | [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![Downloads](https://pepy.tech/badge/hapi-nile)](https://pepy.tech/project/hapi-nile) [![Downloads](https://pepy.tech/badge/hapi-nile/month)](https://pepy.tech/project/hapi-nile) [![Downloads](https://pepy.tech/badge/hapi-nile/week)](https://pepy.tech/project/hapi-nile) ![PyPI - Downloads](https://img.shields.io/pypi/dd/hapi-nile?color=blue&style=flat-square) ![GitHub all releases](https://img.shields.io/github/downloads/MAfarrag/Hapi/total) | [![Conda Version](https://img.shields.io/conda/vn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![PyPI version](https://badge.fury.io/py/HAPI-Nile.svg)](https://badge.fury.io/py/HAPI-Nile) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/hapi/badges/version.svg)](https://anaconda.org/conda-forge/hapi) | [![Conda Platforms](https://img.shields.io/conda/pn/conda-forge/hapi.svg)](https://anaconda.org/conda-forge/hapi) [![Join the chat at https://gitter.im/Hapi-Nile/Hapi](https://badges.gitter.im/Hapi-Nile/Hapi.svg)](https://gitter.im/Hapi-Nile/Hapi?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | ![Hapi](/docs/img/Hapi4.png) ![Hapi](/docs/img/name.png) @@ -126,7 +126,7 @@ pip install git+https://github.com/MAfarrag/HAPI ## pip to install the last release you can easly use pip ``` -pip install HAPI-Nile==1.5.0 +pip install HAPI-Nile==1.6.0 ``` Quick start diff --git a/conda-lock.yml b/conda-lock.yml index cdc8f6b5..eef75ca4 100644 --- a/conda-lock.yml +++ b/conda-lock.yml @@ -15,9 +15,9 @@ metadata: - url: conda-forge used_env_vars: [] content_hash: - linux-64: 7a379216574c61157eab2fe7c71ccc6364b7ed84f4fbeca14c82cbc65adaf4ef - osx-64: 2b6aaa623ef73c8c3aa83a6c7bf53c608bd8b92d96aff0e7b6cdd00adef9997e - win-64: 54e6db9f89492dcf03bd8ba6f6af8643b516fe3de94b10993a18ce823d04d361 + linux-64: d20a5c3089d3ddf0184b2a725d2e9ed89f75d01a985ea6b534478e424e8416c9 + osx-64: 3aa3de97bc7e9ca3b47dab7929e94995970115ea1a6392a5bbb97d679377b229 + win-64: e09de2fe6556541efe78f97210bc2a92b9808b881e5b72e04ea5875bb18a9405 platforms: - linux-64 - osx-64 @@ -96,14 +96,14 @@ package: - category: main dependencies: {} hash: - md5: 737be0d34c22d24432049ab7a3214de4 - sha256: 3e7f203e33ea497b6e468279cc5fdef7d556473c25e7466b35fd672940392469 + md5: 7aca3059a1729aa76c597603f10b0dd3 + sha256: f6cc89d887555912d6c61b295d398cff9ec982a3417d38025c45d5dd9b9e79cd manager: conda name: ld_impl_linux-64 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.39-hcc3a1bd_1.conda - version: '2.39' + url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda + version: '2.40' - category: main dependencies: {} hash: @@ -273,6 +273,18 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/attr-2.5.1-h166bdaf_1.tar.bz2 version: 2.5.1 +- category: main + dependencies: + libgcc-ng: '>=12' + hash: + md5: 5590453a8d072c9c89bfa26fcf88d870 + sha256: ab9278ee301ac95d54198cb75e0dbe0b27589d39bed029a13f0361c266f3e284 + manager: conda + name: aws-c-common + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.8.5-h166bdaf_0.tar.bz2 + version: 0.8.5 - category: main dependencies: libgcc-ng: '>=9.3.0' @@ -375,6 +387,19 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/gettext-0.21.1-h27087fc_0.tar.bz2 version: 0.21.1 +- category: main + dependencies: + libgcc-ng: '>=7.5.0' + libstdcxx-ng: '>=7.5.0' + hash: + md5: cddaf2c63ea4a5901cf09524c490ecdc + sha256: a853c0cacf53cfc59e1bca8d6e5cdfe9f38fce836f08c2a69e35429c2a492e77 + manager: conda + name: gflags + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/gflags-2.2.2-he1b5a44_1004.tar.bz2 + version: 2.2.2 - category: main dependencies: libgcc-ng: '>=7.5.0' @@ -387,6 +412,19 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/giflib-5.2.1-h36c2ea0_2.tar.bz2 version: 5.2.1 +- category: main + dependencies: + libgcc-ng: '>=7.5.0' + libstdcxx-ng: '>=7.5.0' + hash: + md5: 8c54672728e8ec6aa6db90cf2806d220 + sha256: 65da967f3101b737b08222de6a6a14e20e480e7d523a5d1e19ace7b960b5d6b1 + manager: conda + name: graphite2 + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/graphite2-1.3.13-h58526e2_1001.tar.bz2 + version: 1.3.13 - category: main dependencies: libgcc-ng: '>=12' @@ -512,16 +550,16 @@ package: version: '20220623.0' - category: main dependencies: - libgcc-ng: '>=9.4.0' - libstdcxx-ng: '>=9.4.0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' hash: - md5: c77f5e4e418fa47d699d6afa54c5d444 - sha256: f7c8866b27c4b6e2b2b84aae544fab539dfbfe5420c2c16fb868e9440bdb001e + md5: 0f683578378cddb223e7fd24f785ab2a + sha256: 4df6a29b71264fb25462065e8cddcf5bca60776b1801974af8cbd26b7425fcda manager: conda name: libaec optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.0.6-h9c3ff4c_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.0.6-hcb278e6_1.conda version: 1.0.6 - category: main dependencies: @@ -565,14 +603,14 @@ package: dependencies: libgcc-ng: '>=12' hash: - md5: fc84a0446e4e4fb882e78d786cfb9734 - sha256: 6f7cbc9347964e7f9697bde98a8fb68e0ed926888b3116474b1224eaa92209dc + md5: 5cc781fd91968b11a8a7fdbee0982676 + sha256: f9983a8ea03531f2c14bce76c870ca325c0fddf0c4e872bff1f78bc52624179c manager: conda name: libdeflate optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.14-h166bdaf_0.tar.bz2 - version: '1.14' + url: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.17-h0b41bf4_0.conda + version: '1.17' - category: main dependencies: libgcc-ng: '>=7.5.0' @@ -609,6 +647,18 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.17-h166bdaf_0.tar.bz2 version: '1.17' +- category: main + dependencies: + libgcc-ng: '>=12' + hash: + md5: b4f717df2d377410b462328bf0e8fb7d + sha256: 0d8b666ca4deabf948a76654df0fa1277145bed1c9e8a58e18a649c22c5f1c3e + manager: conda + name: libjpeg-turbo + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-2.1.4-h166bdaf_0.tar.bz2 + version: 2.1.4 - category: main dependencies: libgcc-ng: '>=9.4.0' @@ -697,6 +747,18 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/libudev1-252-h166bdaf_0.tar.bz2 version: '252' +- category: main + dependencies: + libgcc-ng: '>=12' + hash: + md5: ede4266dc02e875fe1ea77b25dd43747 + sha256: 49082ee8d01339b225f7f8c60f32a2a2c05fe3b16f31b554b4fb2c1dea237d1c + manager: conda + name: libutf8proc + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libutf8proc-2.8.0-h166bdaf_0.tar.bz2 + version: 2.8.0 - category: main dependencies: libgcc-ng: '>=9.3.0' @@ -748,30 +810,30 @@ package: version: 1.0.3 - category: main dependencies: - libgcc-ng: '>=9.3.0' - libstdcxx-ng: '>=9.3.0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' hash: - md5: fbe97e8fa6f275d7c76a09e795adc3e6 - sha256: 56313fe4e602319682d4ea05c0ed3c5c45fc79884a5896f2cb7436b15d6987f9 + md5: 318b08df404f9c9be5712aaa5a6f0bb0 + sha256: 1b4c105a887f9b2041219d57036f72c4739ab9e9fe5a1486f094e58c76b31f5f manager: conda name: lz4-c optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.9.3-h9c3ff4c_1.tar.bz2 - version: 1.9.3 + url: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.9.4-hcb278e6_0.conda + version: 1.9.4 - category: main dependencies: libgcc-ng: '>=12' libstdcxx-ng: '>=12' hash: - md5: 0af513b75f78a701a152568a31303bdf - sha256: 3bb69d4435d1986d688d35ec3f25d174235a5bbe1730c8085aa7dbf8be4dede3 + md5: 08efb1e1813f1a151b7a945b972a049b + sha256: cc8cb2097e96d2420dd698951ab524b6c8268fa691d370020a0eae3e65197c04 manager: conda name: mpg123 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/mpg123-1.31.1-h27087fc_0.tar.bz2 - version: 1.31.1 + url: https://conda.anaconda.org/conda-forge/linux-64/mpg123-1.31.2-hcb278e6_0.conda + version: 1.31.2 - category: main dependencies: libgcc-ng: '>=10.3.0' @@ -802,13 +864,13 @@ package: ca-certificates: '' libgcc-ng: '>=12' hash: - md5: 7adaac6ff98219bcb99b45e408b80f4e - sha256: d9143f6d10e7edaa8cbb03e510d60c54463f4538c01f30b0abff51def582d94e + md5: 45758f4ece9c8b7b5f99328bd5caae51 + sha256: 2fca71b8d95edc0e530f9512cdd9187407ad486868b7c247fc16cab1e4172ffa manager: conda name: openssl optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.0.7-h0b41bf4_1.conda + url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.0.7-h0b41bf4_2.conda version: 3.0.7 - category: main dependencies: @@ -1008,6 +1070,73 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/zfp-0.5.5-h9c3ff4c_8.tar.bz2 version: 0.5.5 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + libgcc-ng: '>=12' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: afc84c17eb855bfe13a20ee603230235 + sha256: e340b8a12faa184582106aa4913753fead960fecc3a670c1270de8877b049a14 + manager: conda + name: aws-c-cal + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.5.20-hff2c3d7_3.tar.bz2 + version: 0.5.20 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + libgcc-ng: '>=12' + hash: + md5: d279191a7bbce623d5087e0b1883cfb1 + sha256: 214a00d101b1b0be2f130b0774dd89982070c325d6b0106e0e7a808917606ef1 + manager: conda + name: aws-c-compression + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.2.16-hf5f93bc_0.tar.bz2 + version: 0.2.16 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + libgcc-ng: '>=12' + hash: + md5: 772dcd299af4757edd9f4da140849cf2 + sha256: bc29212f1504eaa1101e8b231dfdfc7d8bb9a111fe2d9d30704d5f68684cc9a0 + manager: conda + name: aws-c-sdkutils + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.1.7-hf5f93bc_0.tar.bz2 + version: 0.1.7 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + libgcc-ng: '>=12' + hash: + md5: 4960e03c8b6447aebc484f5a3c340180 + sha256: 93f0676f8058e4c5124f924a0833c28b4a14bc17e0283c43bec12c44a333539f + manager: conda + name: aws-checksums + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.1.14-h6027aba_0.conda + version: 0.1.14 +- category: main + dependencies: + gflags: '>=2.2.2,<2.3.0a0' + libgcc-ng: '>=10.3.0' + libstdcxx-ng: '>=10.3.0' + hash: + md5: b31f3565cb84435407594e548a2fb7b2 + sha256: 888cbcfb67f6e3d88a4c4ab9d26c9a406f620c4101a35dc6d2dbadb95f2221d4 + manager: conda + name: glog + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/glog-0.6.0-h6f12383_0.tar.bz2 + version: 0.6.0 - category: main dependencies: alsa-lib: '>=1.2.8,<1.2.9.0a0' @@ -1131,18 +1260,18 @@ package: version: 1.4.2 - category: main dependencies: - gettext: '>=0.19.8.1,<1.0a0' - libgcc-ng: '>=10.3.0' - libstdcxx-ng: '>=10.3.0' + gettext: '>=0.21.1,<1.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' hash: - md5: 839aeb24ab885a7b902247a6d943d02f - sha256: c07f9d38bd4e3ad13afa985b31262cc3f12a4ea158eb48c2afae7272099fb800 + md5: 27e745f6f2e4b757e95dd7225fbe6bdb + sha256: a2e3df80a5713b4143f7d276a9354d78f2b2927b22831dc24c3246a82674aaba manager: conda name: libgpg-error optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgpg-error-1.45-hc0c96e0_0.tar.bz2 - version: '1.45' + url: https://conda.anaconda.org/conda-forge/linux-64/libgpg-error-1.46-h620e276_0.conda + version: '1.46' - category: main dependencies: libgcc-ng: '>=9.4.0' @@ -1177,17 +1306,17 @@ package: libev: '>=4.33,<4.34.0a0' libgcc-ng: '>=12' libstdcxx-ng: '>=12' - libzlib: '>=1.2.12,<1.3.0a0' - openssl: '>=3.0.5,<4.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + openssl: '>=3.0.7,<4.0a0' hash: - md5: 2b7dbfa6988a41f9d23ba6d4f0e1d74e - sha256: 66988eb178d6ffbad3de5e391dad49aaa298e1309ac197ab40996eac740fbfff + md5: dd682f0b6d65e75b2bc868fc8e93d87e + sha256: acb80dfd0b7be38c47101df812fc903374c8408daec127edb6f11a648a67c243 manager: conda name: libnghttp2 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.47.0-hff17c54_1.tar.bz2 - version: 1.47.0 + url: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.51.0-hff17c54_0.conda + version: 1.51.0 - category: main dependencies: libgcc-ng: '>=12' @@ -1320,16 +1449,16 @@ package: dependencies: libgcc-ng: '>=12' libstdcxx-ng: '>=12' - openssl: '>=3.0.5,<4.0a0' + openssl: '>=3.0.7,<4.0a0' hash: - md5: 6c531bc30d49ae75b9c7c7f65bd62e3c - sha256: 2da35b51279bb4839a040296ed56f43bcfc0bb065bba423429804f88a15eef5f + md5: 6a39818710235826181e104aada40c75 + sha256: d7da5c1cc47656394933146ab30f6f3433553e8265ea1a4254bce441ab678199 manager: conda name: mysql-common optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/mysql-common-8.0.31-h26416b9_0.tar.bz2 - version: 8.0.31 + url: https://conda.anaconda.org/conda-forge/linux-64/mysql-common-8.0.32-ha901b37_0.conda + version: 8.0.32 - category: main dependencies: bzip2: '>=1.0.8,<2.0a0' @@ -1371,6 +1500,19 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/readline-8.1.2-h0f457ee_0.tar.bz2 version: 8.1.2 +- category: main + dependencies: + libgcc-ng: '>=12' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: c65b7438ec54390ecc040e06be712967 + sha256: 4da579be9011bd5285c94d6f4ad7dca2b5ccba9e0a7a1aeb4e81d34a43676e04 + manager: conda + name: s2n + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.3.31-h3358134_0.conda + version: 1.3.31 - category: main dependencies: libgcc-ng: '>=9.4.0' @@ -1415,16 +1557,31 @@ package: dependencies: libgcc-ng: '>=12' libstdcxx-ng: '>=12' - libzlib: '>=1.2.12,<1.3.0a0' + libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: adcf0be7897e73e312bd24353b613f74 - sha256: c42d9ec413edd7e984b6cac676997105d0f106556a0f045961153b049b95b87c + md5: 6b63daed8feeca47be78f323e793d555 + sha256: fbe49a8c8df83c2eccb37c5863ad98baeb29796ec96f2c503783d7b89bf80c98 manager: conda name: zstd optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.2-h6239696_4.tar.bz2 + url: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.2-h3eb15da_6.conda version: 1.5.2 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + libgcc-ng: '>=12' + s2n: '>=1.3.31,<1.3.32.0a0' + hash: + md5: 8d7d0930dcb9373baef80a151c831462 + sha256: f5fd58992a1496c99a8ab5783ffdff2a4ed4a8f2cd38b32748691cf38164d858 + manager: conda + name: aws-c-io + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.13.12-h57ca295_1.conda + version: 0.13.12 - category: main dependencies: libgcc-ng: '>=12' @@ -1618,14 +1775,14 @@ package: libzlib: '>=1.2.13,<1.3.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: 201168ef66095bbd565e124ee2c56a20 - sha256: 68a90e26dd56024c1c723605fd50817faf890f18acf91f1d36da3f8403474b52 + md5: 70cbb0c2033665f2a7339bf0ec51a67f + sha256: 3fb9a9cfd2f5c79e8116c67f95d5a9b790ec66807ae0d8cebefc26fda9f836a7 manager: conda name: libllvm15 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libllvm15-15.0.6-h63197d8_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/linux-64/libllvm15-15.0.7-hadd5161_0.conda + version: 15.0.7 - category: main dependencies: lame: '>=3.100,<3.101.0a0' @@ -1637,19 +1794,35 @@ package: libvorbis: '>=1.3.7,<1.4.0a0' mpg123: '>=1.31.1,<1.32.0a0' hash: - md5: d7a07b1f5974bce4735112aaef0c1467 - sha256: dfeffd12d5dd987f54d68f8ae53b14d665cd96578485c8f83aef0233520d69f5 + md5: c648d19cd9c8625898d5d370414de7c7 + sha256: 52ab2460d626d1cc95092daa4f7191f84d4950aeb9925484135f96af6b6391d8 manager: conda name: libsndfile optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libsndfile-1.1.0-hcb278e6_1.conda - version: 1.1.0 + url: https://conda.anaconda.org/conda-forge/linux-64/libsndfile-1.2.0-hb75c966_0.conda + version: 1.2.0 +- category: main + dependencies: + libevent: '>=2.1.10,<2.1.11.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + libzlib: '>=1.2.12,<1.3.0a0' + openssl: '>=3.0.5,<4.0a0' + hash: + md5: 0e169728f52de7bcf5ffdbbdd9075e1a + sha256: 99cd473cc11c4c41184236caa6edc9540087717d59e4c363db563cc8daaae5ab + manager: conda + name: libthrift + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libthrift-0.16.0-he500d00_2.tar.bz2 + version: 0.16.0 - category: main dependencies: jpeg: '>=9e,<10a' lerc: '>=4.0.0,<5.0a0' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libgcc-ng: '>=12' libstdcxx-ng: '>=12' libwebp-base: '>=1.2.4,<2.0a0' @@ -1657,13 +1830,13 @@ package: xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: a01611c54334d783847879ee40109657 - sha256: 7237772229da1058fae73ae6f04ad846551a44d7da602e9d328b81049d3219a2 + md5: 2e648a34072eb39d7c4fc2a9981c5f0c + sha256: e3e18d91fb282b61288d4fd2574dfa31f7ae90ef2737f96722fb6ad3257862ee manager: conda name: libtiff optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.5.0-h82bc61c_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.5.0-h6adf6a1_2.conda version: 4.5.0 - category: main dependencies: @@ -1683,19 +1856,19 @@ package: dependencies: libgcc-ng: '>=12' libstdcxx-ng: '>=12' - libzlib: '>=1.2.12,<1.3.0a0' - mysql-common: 8.0.31 h26416b9_0 - openssl: '>=3.0.5,<4.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + mysql-common: 8.0.32 ha901b37_0 + openssl: '>=3.0.7,<4.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: da9633eee814d4e910fe42643a356315 - sha256: a44bd116a4b0ee57532661d63bc1926ef13d4ec2bd00b57130467ca185d4647c + md5: b05d7ea8b76f1172d5fe4f30e03277ea + sha256: 903174761ce605d98410747e0072757da5278d57309148ef175af490aa791f38 manager: conda name: mysql-libs optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/mysql-libs-8.0.31-hbc51c84_0.tar.bz2 - version: 8.0.31 + url: https://conda.anaconda.org/conda-forge/linux-64/mysql-libs-8.0.32-hd7da12d_0.conda + version: 8.0.32 - category: main dependencies: libgcc-ng: '>=12' @@ -1712,6 +1885,24 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/nss-3.82-he02c5a1_0.conda version: '3.82' +- category: main + dependencies: + libgcc-ng: '>=12' + libprotobuf: '>=3.21.12,<3.22.0a0' + libstdcxx-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + lz4-c: '>=1.9.3,<1.10.0a0' + snappy: '>=1.1.9,<2.0a0' + zstd: '>=1.5.2,<1.6.0a0' + hash: + md5: 4ff484c4195091f4d6c00a4387cdbe2c + sha256: f92613115db536e9b005619c11371b8d2e3ade2717d378f7ff6e2230cb359881 + manager: conda + name: orc + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/orc-1.8.2-hfdbbad2_0.conda + version: 1.8.2 - category: main dependencies: bzip2: '>=1.0.8,<2.0a0' @@ -1822,16 +2013,28 @@ package: version: 1.7.2 - category: main dependencies: - python: '>=3.6' + python: '>=3.7' hash: - md5: 466dc5c1b75c93180efbd81d99dc29b0 - sha256: f3d58687fb000acc5d5f773d6e633ffb382575895abbc8db3d9b8e3996b05d39 + md5: ae5f4ad87126c55ba3f690ef07f81d64 + sha256: fbf0288cae7c6e5005280436ff73c95a36c5a4c978ba50175cc8e3eb22abc5f9 manager: conda name: affine optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/affine-2.3.1-pyhd8ed1ab_0.tar.bz2 - version: 2.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/affine-2.4.0-pyhd8ed1ab_0.conda + version: 2.4.0 +- category: main + dependencies: + python: '' + hash: + md5: 5f095bc6454094e96f146491fd03633b + sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111 + manager: conda + name: appdirs + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2 + version: 1.4.4 - category: main dependencies: python: '>=3.5' @@ -1844,6 +2047,37 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/attrs-22.2.0-pyh71513ae_0.conda version: 22.2.0 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + hash: + md5: fbc61c88b31bfe95096c2579d76af254 + sha256: aeaf4e10ca8c54b757bb4c44b9b42c27474c3328061fbf22d3b5bc0ff108839a + manager: conda + name: aws-c-event-stream + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-event-stream-0.2.18-h57874a7_0.conda + version: 0.2.18 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-compression: '>=0.2.16,<0.2.17.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + libgcc-ng: '>=12' + hash: + md5: 5a56f1109eb6dae09e560dfa7bd3203c + sha256: c0f09b7390af004e27f3b8eeee8fcd0697a0225697113a39e50941ff16a87a6e + manager: conda + name: aws-c-http + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.7.0-h96ef541_0.conda + version: 0.7.0 - category: main dependencies: brotli-bin: 1.0.9 h166bdaf_8 @@ -1863,14 +2097,14 @@ package: dependencies: python: '>=3.7' hash: - md5: c6653a1ed0c4a48ace64ab68a0bf9b27 - sha256: ae9d26949fcf8130d899e6bc22ed8afab40adcee782d79e0d82e0799960785af + md5: fd006afc4115740d8d52887ee813f262 + sha256: d17f6b5ae744e64a337c9dbad21b8d501916eaf0e55564dc81c78c492783d73a manager: conda name: cachetools optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.2.0-pyhd8ed1ab_0.tar.bz2 - version: 5.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.0-pyhd8ed1ab_0.conda + version: 5.3.0 - category: main dependencies: python: '>=3.7' @@ -1971,20 +2205,20 @@ package: version: 1.1.0 - category: main dependencies: - expat: '>=2.4.9,<3.0a0' + expat: '>=2.5.0,<3.0a0' freetype: '>=2.12.1,<3.0a0' libgcc-ng: '>=12' libuuid: '>=2.32.1,<3.0a0' libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: 78415f0180a8d9c5bcc47889e00d5fb1 - sha256: 4594348401ccdb622b41692698f3701423e9a4e726b6b6efa818c3a1611b01f9 + md5: 0f69b688f52ff6da70bccb7ff7001d1d + sha256: 155d534c9037347ea7439a2c6da7c24ffec8e5dd278889b4c57274a1d91e0a83 manager: conda name: fontconfig optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.14.1-hc2a2eb6_0.tar.bz2 - version: 2.14.1 + url: https://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.14.2-h14ed4e7_0.conda + version: 2.14.2 - category: main dependencies: libgcc-ng: '>=12' @@ -2015,14 +2249,14 @@ package: dependencies: python: '>=3.8' hash: - md5: a6966947ba28bbe60f9904653da7fed5 - sha256: 286667d325d52cd866a410da18da5660eb8bcde10dd6eae90403fa462152eff6 + md5: fec8329fc739090f26a7d7803db254f1 + sha256: b3d34bf4924cb80363c1ab57ac821393f118ffaa94f05368bf4044941163b65e manager: conda name: future optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.2-pyhd8ed1ab_6.tar.bz2 - version: 0.18.2 + url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.3-pyhd8ed1ab_0.conda + version: 0.18.3 - category: main dependencies: python: '>=3.6' @@ -2064,16 +2298,28 @@ package: version: '3.4' - category: main dependencies: - python: '' + python: '>=3.7' hash: - md5: 39161f81cc5e5ca45b8226fbb06c6905 - sha256: 9423ded508ebda87dae21d7876134e406ffeb88e6059f3fe1a909d180c351959 + md5: f800d2da156d08e289b14e87e43c1ae5 + sha256: 38740c939b668b36a50ef455b077e8015b8c9cf89860d421b3fff86048f49666 manager: conda name: iniconfig optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-1.1.1-pyh9f0ad1d_0.tar.bz2 - version: 1.1.1 + url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.0.0-pyhd8ed1ab_0.conda + version: 2.0.0 +- category: main + dependencies: + python: '>=3.7' + hash: + md5: 2cfa3e1cf3fb51bb9b17acc5b5e9ea11 + sha256: 95ac5f9ee95fd4e34dc051746fc86016d3d4f6abefed113e2ede049d59ec2991 + manager: conda + name: jmespath + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/jmespath-1.0.1-pyhd8ed1ab_0.tar.bz2 + version: 1.0.1 - category: main dependencies: libgcc-ng: '>=12' @@ -2106,18 +2352,18 @@ package: - category: main dependencies: libgcc-ng: '>=12' - libllvm15: '>=15.0.6,<15.1.0a0' + libllvm15: '>=15.0.7,<15.1.0a0' libstdcxx-ng: '>=12' libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: 535dd0ca1dcb165b6a8ffa10d01945fe - sha256: 0a8130d1f13225389d791e14b92654b488c82a8c6ec5cddc7c606389f02d6737 + md5: dcfae510179c3de2e42b3a2276d059e0 + sha256: 71539a4d472adc39a52e3cbbf5d33c06f09fd63f0c8f718fd2fb1274e7511b57 manager: conda name: libclang13 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libclang13-15.0.6-default_h3a83d3e_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/linux-64/libclang13-15.0.7-default_h3e3d535_0.conda + version: 15.0.7 - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' @@ -2174,13 +2420,13 @@ package: libzlib: '>=1.2.13,<1.3.0a0' openssl: '>=3.0.7,<4.0a0' hash: - md5: 509f08b3789d9e7e9a72871491ae08e2 - sha256: 37432e98a75ae1c7cf8f90da84b7a74b49e4e0fcf961a1ae8a6ef6ab2bb12bc7 + md5: 9873ab80ec8fab4a2c26c7580e0d7f58 + sha256: 02a8c83a4422e751a4119db62374d49a61eb41809d055e0bf4cb84cd23ba736f manager: conda name: libpq optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libpq-15.1-hb675445_2.conda + url: https://conda.anaconda.org/conda-forge/linux-64/libpq-15.1-hb675445_3.conda version: '15.1' - category: main dependencies: @@ -2248,14 +2494,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 2d7028ea2a77f909931e1a173d952261 - sha256: 05a217ff2eea3fb0bada54f4c7c8efe5eb35e1ad7d142d662b364c686ff80da6 + md5: a1f0db6709778b77b5903541eeac4032 + sha256: f62b2aeafe968472b20b6935fa7b2290d27ac38b65d98b2708c7cf0b689f9f19 manager: conda name: markupsafe optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.1.1-py310h5764c6d_2.tar.bz2 - version: 2.1.1 + url: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.1.2-py310h1fa729e_0.conda + version: 2.1.2 - category: main dependencies: libgcc-ng: '>=12' @@ -2286,14 +2532,14 @@ package: dependencies: python: '>=3.8' hash: - md5: bb45ff9deddb045331fd039949f39650 - sha256: a8e3531fdb6f9acfde885dd94c8639c020013215dab98ff4ed82db7aa745277a + md5: 88e40007414ea9a13f8df20fcffa87e2 + sha256: edd149a40ea746ce17c1b135c72a1646810e99071bedb7d808914cc31b3c8a5d manager: conda name: networkx optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/networkx-2.8.8-pyhd8ed1ab_0.tar.bz2 - version: 2.8.8 + url: https://conda.anaconda.org/conda-forge/noarch/networkx-3.0-pyhd8ed1ab_0.conda + version: '3.0' - category: main dependencies: libblas: '>=3.9.0,<4.0a0' @@ -2304,14 +2550,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 3b114b1559def8bad228fec544ac1812 - sha256: c3b2dc03dbae88ae1337e37e672aa44008898395d3508839bf35323b54e71665 + md5: c532c5df0bef4d138b2b0bdde99ab53e + sha256: 975c6d4e680b1fc13fbd7a2f9c59f1f147b3ebe7a4688d3cfdbfe01a647d4345 manager: conda name: numpy optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.23.5-py310h53a5b5f_0.conda - version: 1.23.5 + url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.24.1-py310h8deb116_0.conda + version: 1.24.1 - category: main dependencies: libgcc-ng: '>=12' @@ -2332,14 +2578,14 @@ package: dependencies: python: '>=3.7' hash: - md5: 0e8e1bd93998978fc3125522266d12db - sha256: 163f26e55246c506a75551ca01f35c7d4d533aee6db5c4cf2d598ae253e956b8 + md5: 1ff2e3ca41f0ce16afec7190db28288b + sha256: 00288f5e5e841711e8b8fef1f1242c858d8ef99ccbe5d7e0df4789d5d8d40645 manager: conda name: packaging optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/packaging-22.0-pyhd8ed1ab_0.conda - version: '22.0' + url: https://conda.anaconda.org/conda-forge/noarch/packaging-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -2429,14 +2675,14 @@ package: dependencies: python: '>=3.6' hash: - md5: c8d7e34ca76d6ecc03b84bedfd99d689 - sha256: 000f38e7ce7f020e2ce4d5024d3ffa63fcd65077edfe2182862965835f560525 + md5: f59d49a7b464901cf714b9e7984d01a2 + sha256: 93cfc7a92099e26b0575a343da4a667b52371cc38e4dee4ee264dc041ef77bac manager: conda name: pytz optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7-pyhd8ed1ab_0.conda - version: '2022.7' + url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7.1-pyhd8ed1ab_0.conda + version: 2022.7.1 - category: main dependencies: libgcc-ng: '>=12' @@ -2466,32 +2712,44 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/rtree-1.0.1-py310hbdcdc62_1.tar.bz2 version: 1.0.1 +- category: main + dependencies: + python: '>=3.9,<3.11' + hash: + md5: 97162d83d23113ffa938c83c91758d1b + sha256: 34b2dc2eee575a80a55dbeaa1580df9ccf0afec7baca99f8bb71da8ff158aa21 + manager: conda + name: serapeum_utils + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/serapeum_utils-0.1.1-pyhd8ed1ab_0.conda + version: 0.1.1 - category: main dependencies: python: '>=3.7' hash: - md5: 9600fc9524d3f821e6a6d58c52f5bf5a - sha256: ea9f7eee2648d8078391cf9f968d848b400349c784e761501fb32ae01d323acf + md5: 9467d520d1457018e055bbbfdf9b7567 + sha256: 053447c82243033e6fd5cacbf7c349552146b135730a87fd942ec517d2b22efb manager: conda name: setuptools optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/setuptools-65.6.3-pyhd8ed1ab_0.conda - version: 65.6.3 + url: https://conda.anaconda.org/conda-forge/noarch/setuptools-66.1.1-pyhd8ed1ab_0.conda + version: 66.1.1 - category: main dependencies: libgcc-ng: '>=12' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: c2a13d5ee57e76c0f044eea42f23307d - sha256: 78e59125993609d5809cc8845fb0512d6b007a7f4047774419a6cbb29cd9cbf9 + md5: 88602f3bbbe025f8cf28d546f97e686e + sha256: 48661957c7ca71285743ee2717b88090513dc8245f2230ac5ce538893d468078 manager: conda name: simplejson optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.18.0-py310h5764c6d_0.tar.bz2 - version: 3.18.0 + url: https://conda.anaconda.org/conda-forge/linux-64/simplejson-3.18.1-py310h1fa729e_0.conda + version: 3.18.1 - category: main dependencies: python: '' @@ -2696,6 +2954,38 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/aiosignal-1.3.1-pyhd8ed1ab_0.tar.bz2 version: 1.3.1 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-sdkutils: '>=0.1.7,<0.1.8.0a0' + libgcc-ng: '>=12' + hash: + md5: 1dc0c00522a7251d739d46b03637fce5 + sha256: 1e915f54182e08172352050f5e744648e016b1ecfa5c011f753946a7996f594e + manager: conda + name: aws-c-auth + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.6.21-hd93a3ba_3.conda + version: 0.6.21 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + libgcc-ng: '>=12' + hash: + md5: a1b44dfb2c581a9d74c4fdf826d66d6a + sha256: 7dcf27a41800cfa9117c736fa645c0164c2218d2b82ca533cc16f599ca5c513f + manager: conda + name: aws-c-mqtt + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-mqtt-0.7.13-h0b5698f_12.conda + version: 0.7.13 - category: main dependencies: brotli: '>=1.0.9,<2.0a0' @@ -2807,14 +3097,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: c5b1699e390d30b680dd93a2b251062b - sha256: 0026b90f6d05663df26ad281616b27211e39ecfe9b3ccbcb551d54c35e98bcf1 + md5: 7bf9d8c765b6b04882c719509652c6d6 + sha256: 670b736e895ed1b37187e0cbc73fd528414076f370068975135db2420af8663d manager: conda name: contourpy optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.0.6-py310hbf28c38_0.tar.bz2 - version: 1.0.6 + url: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.0.7-py310hdf3cbec_0.conda + version: 1.0.7 - category: main dependencies: libgcc-ng: '>=12' @@ -2822,14 +3112,14 @@ package: python_abi: 3.10.* *_cp310 tomli: '' hash: - md5: a00880abc45b82b2cfee326cfc3cef81 - sha256: 5b6f1c301ac653b9c39e4f5854eb5046504f4edf1d5197829a5d453a3e594b2e + md5: da7c45dbe780f5e162011a3af44e5009 + sha256: 204cf3c26702647af4b3c63fe3a90e0f98705229b5975691f752faacca8e13c8 manager: conda name: coverage optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.0.1-py310h1fa729e_0.conda - version: 7.0.1 + url: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.1.0-py310h1fa729e_0.conda + version: 7.1.0 - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' @@ -2933,6 +3223,22 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/grpcio-1.51.1-py310hc32fa93_0.conda version: 1.51.1 +- category: main + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + numpy: '' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + hash: + md5: fb6caff786b93eba713a1f9068773835 + sha256: 6598cff9fa8df1ea919e9767f96280db08063b3e002f07c39bd8e163521b67d8 + manager: conda + name: h3-py + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/h3-py-3.7.4-py310hd8f1fbe_1.tar.bz2 + version: 3.7.4 - category: main dependencies: libaec: '>=1.0.6,<2.0a0' @@ -2993,20 +3299,39 @@ package: version: 1.2.0 - category: main dependencies: - libclang13: 15.0.6 default_h3a83d3e_0 + libclang13: 15.0.7 default_h3e3d535_0 libgcc-ng: '>=12' - libllvm15: '>=15.0.6,<15.1.0a0' + libllvm15: '>=15.0.7,<15.1.0a0' libstdcxx-ng: '>=12' libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: 1b2cee49acc5b03c73ad0f68bfe04bb8 - sha256: 3bf9a2d398492a017f7be21d2fb22a824302773fd2b3a7d143617917146a1b64 + md5: 189f7f97245f594b7a9d8e2b9f311cf8 + sha256: e837bd39a07949e6f27d69bcc784ecc1e7c9f7c96c920c0a1f875f5c23b986b2 manager: conda name: libclang optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libclang-15.0.6-default_h2e3cab8_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/linux-64/libclang-15.0.7-default_had23c3d_0.conda + version: 15.0.7 +- category: main + dependencies: + libabseil: 20220623.0 cxx17* + libcrc32c: '>=1.1.2,<1.2.0a0' + libcurl: '>=7.86.0,<8.0a0' + libgcc-ng: '>=12' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.10,<3.22.0a0' + libstdcxx-ng: '>=12' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: af905d193c58a376621f09a21849d2c6 + sha256: 66919154dd7776c4c67a43e8113cef94f5bd17e254172fa5cd86735a5e2cbc80 + manager: conda + name: libgoogle-cloud + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgoogle-cloud-2.5.0-h21dfe5b_1.conda + version: 2.5.0 - category: main dependencies: python: '' @@ -3038,9 +3363,9 @@ package: - category: main dependencies: freetype: '>=2.12.1,<3.0a0' - jpeg: '>=9e,<10a' lcms2: '>=2.14,<3.0a0' libgcc-ng: '>=12' + libjpeg-turbo: '>=2.1.4,<3.0a0' libtiff: '>=4.5.0,<4.6.0a0' libwebp-base: '>=1.2.4,<2.0a0' libxcb: '>=1.13,<1.14.0a0' @@ -3050,33 +3375,33 @@ package: python_abi: 3.10.* *_cp310 tk: '>=8.6.12,<8.7.0a0' hash: - md5: 303776988a91771e6b60b8291d153553 - sha256: 3830922e71c88c52e5d830a89679c17713447b5c1fc8cc989ed71919617b4618 + md5: 66366aceea767f174f4d0408f3a62812 + sha256: ab85b5203a2f052e2e70aaa83dd5aa0782758598ae9a06842ec1f65f03173202 manager: conda name: pillow optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pillow-9.2.0-py310h023d228_4.conda - version: 9.2.0 + url: https://conda.anaconda.org/conda-forge/linux-64/pillow-9.4.0-py310h4927cde_0.conda + version: 9.4.0 - category: main dependencies: python: '>=3.7' setuptools: '' wheel: '' hash: - md5: da66f2851b9836d3a7c5190082a45f7d - sha256: 7a86b2427abbf5cf695da192ba1c03130115f157297e7bfde65f0a18a345a7bc + md5: 85b35999162ec95f9f999bac15279c02 + sha256: bbffec284bd0e154363e845121f43007e7e64c80412ff13be21909be907b697d manager: conda name: pip optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/pip-22.3.1-pyhd8ed1ab_0.tar.bz2 - version: 22.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/pip-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' libgcc-ng: '>=12' - libpq: 15.1 hb675445_2 + libpq: 15.1 hb675445_3 libxml2: '>=2.10.3,<2.11.0a0' libzlib: '>=1.2.13,<1.3.0a0' openssl: '>=3.0.7,<4.0a0' @@ -3085,13 +3410,13 @@ package: tzdata: '' zlib: '' hash: - md5: 4d2f2c04f8bdb6a9cb0d6e8d90b1f907 - sha256: 6d24935e50d7af2ed7610e731217556a5862173d8d6f77f00c118df2952f1e54 + md5: 4f686d5d582a3e3b32a3cb010fa12103 + sha256: c44d0aec97d552bac0e0ad310fe55c6989d5ae9b6bec2ac448072ab6ed295511 manager: conda name: postgresql optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/postgresql-15.1-h3248436_2.conda + url: https://conda.anaconda.org/conda-forge/linux-64/postgresql-15.1-h3248436_3.conda version: '15.1' - category: main dependencies: @@ -3132,24 +3457,24 @@ package: alsa-lib: '>=1.2.8,<1.2.9.0a0' dbus: '>=1.13.6,<2.0a0' fftw: '>=3.3.10,<4.0a0' - gstreamer-orc: '>=0.4.32,<0.5.0a0' + gstreamer-orc: '>=0.4.33,<0.5.0a0' jack: '>=1.9.21,<1.10.0a0' libcap: '>=2.66,<2.67.0a0' libgcc-ng: '>=12' libglib: '>=2.74.1,<3.0a0' - libsndfile: '>=1.1.0,<1.2.0a0' - libsystemd0: '>=251' - libtool: '>=2.4.6,<3.0a0' - libudev1: '>=251' - openssl: '>=3.0.5,<4.0a0' + libsndfile: '>=1.2.0,<1.3.0a0' + libsystemd0: '>=252' + libtool: '>=2.4.7,<3.0a0' + libudev1: '>=252' + openssl: '>=3.0.7,<4.0a0' hash: - md5: e4b74b33e13dd146e7d8b5078fc9ad30 - sha256: d356aff10f0b63199fb5895c7c4fcdddc9b0a7a43be682d51bd5041418b9a845 + md5: dbfc2a8d63a43a11acf4c704e1ef9d0c + sha256: aa2aa5b5e2430a3c3d8b24574e5e270c47026740cb706e9be31df81b0627afa6 manager: conda name: pulseaudio optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pulseaudio-16.1-h126f2b6_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/linux-64/pulseaudio-16.1-ha8d29e2_1.conda version: '16.1' - category: main dependencies: @@ -3189,14 +3514,14 @@ package: python: '>=3.8' tomli: '>=1.0.0' hash: - md5: ac82c7aebc282e6ac0450fca012ca78c - sha256: 854233dc2d0d64219b7e951ccf49c1f32332c6fc7085ecb62cc18bc1f4e791b0 + md5: f0be05afc9c9ab45e273c088e00c258b + sha256: d298dfe6c53555c9fb5662f5f936e621cddd3b0a7031789375b82a1ee3b3a96b manager: conda name: pytest optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.0-pyhd8ed1ab_2.tar.bz2 - version: 7.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.1-pyhd8ed1ab_0.conda + version: 7.2.1 - category: main dependencies: python: '>=3.6' @@ -3253,41 +3578,20 @@ package: version: '4.9' - category: main dependencies: - libblas: '>=3.9.0,<4.0a0' - libcblas: '>=3.9.0,<4.0a0' + geos: '>=3.11.1,<3.11.2.0a0' libgcc-ng: '>=12' - libgfortran-ng: '' - libgfortran5: '>=10.4.0' - liblapack: '>=3.9.0,<4.0a0' - libstdcxx-ng: '>=12' numpy: '>=1.21.6,<2.0a0' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 0582a434d03f6b06d5defbb142c96f4f - sha256: 3b25a8ccc8c4ebd91e540824dd5c36c6c9fa3758a69b8199d169b00fad86c8fb + md5: c4a3707d6a630facb6cf7ed8e0d37326 + sha256: 31f71ea8786ce6a0cd649ffa270feace395cc7a8a758413f11e69d03c36a8efe manager: conda - name: scipy + name: shapely optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.9.3-py310hdfbd76f_2.tar.bz2 - version: 1.9.3 -- category: main - dependencies: - geos: '>=3.11.1,<3.11.2.0a0' - libgcc-ng: '>=12' - numpy: '>=1.21.6,<2.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - hash: - md5: c4a3707d6a630facb6cf7ed8e0d37326 - sha256: 31f71ea8786ce6a0cd649ffa270feace395cc7a8a758413f11e69d03c36a8efe - manager: conda - name: shapely - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/shapely-1.8.5-py310h5b266fc_2.tar.bz2 - version: 1.8.5 + url: https://conda.anaconda.org/conda-forge/linux-64/shapely-1.8.5-py310h5b266fc_2.tar.bz2 + version: 1.8.5 - category: main dependencies: libgcc-ng: '>=12' @@ -3298,14 +3602,14 @@ package: python_abi: 3.10.* *_cp310 toml: '' hash: - md5: 765b39936044b542a69ec2d863f5b891 - sha256: 0dc2cae0ee51f1572c59c891934350d148a55be08ce27fdc2cb1dad679c984d5 + md5: e06043e228657084b380c2c361aab918 + sha256: c7e4d76c6d7a1d054fee6cfc8d4f97ffcc33648950d2931cd7ba8b5de6c2ca2e manager: conda name: sip optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/sip-6.7.5-py310hd8f1fbe_0.conda - version: 6.7.5 + url: https://conda.anaconda.org/conda-forge/linux-64/sip-6.7.6-py310heca2aa9_0.conda + version: 6.7.6 - category: main dependencies: numpy: '' @@ -3377,6 +3681,25 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/async-timeout-4.0.2-pyhd8ed1ab_0.tar.bz2 version: 4.0.2 +- category: main + dependencies: + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + libgcc-ng: '>=12' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: 093c59c159c785ff538465cd4857d792 + sha256: 431032318d68cd13557db8236192f88ef76a8168840601adc050bf0299204655 + manager: conda + name: aws-c-s3 + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.2.3-h82cbbf9_0.conda + version: 0.2.3 - category: main dependencies: jinja2: '' @@ -3440,14 +3763,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: f999dcc21fe27ad97a8afcfa590daa14 - sha256: 64a31aa5153e977e58256b098044b61ef903885b5b8f7ed807d066d1ceed2244 + md5: af4b0c22dc4006ce3c095e840cb2efd7 + sha256: 11d745fc13a122aaba35984362411d0b65dc2b31552c8ff3c3bdf6c988dc288f manager: conda name: cryptography optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/cryptography-38.0.4-py310h600f1e7_0.conda - version: 38.0.4 + url: https://conda.anaconda.org/conda-forge/linux-64/cryptography-39.0.0-py310h34c0648_0.conda + version: 39.0.0 - category: main dependencies: cloudpickle: '>=1.1.1' @@ -3505,14 +3828,14 @@ package: protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' python: '>=3.7' hash: - md5: 35947a7b1f5319de636d74ce38dcf131 - sha256: 88c2be80b3c4ca97f5259b6c6a814b730e6ab4d09c15dbbe60df779c3a7416f9 + md5: cbf8b4569c1d2a0a6077d34a2d38333e + sha256: 1b2a9ae4540e3056a7eaf126a4939360f521854c8a4aa04f10ed4c80da4edc7e manager: conda name: googleapis-common-protos optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.0-pyhd8ed1ab_3.conda - version: 1.57.0 + url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.1-pyhd8ed1ab_0.conda + version: 1.57.1 - category: main dependencies: gettext: '>=0.21.1,<1.0a0' @@ -3529,6 +3852,24 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/gstreamer-1.21.3-h25f0c4b_1.conda version: 1.21.3 +- category: main + dependencies: + cairo: '>=1.16.0,<2.0a0' + freetype: '>=2.12.1,<3.0a0' + graphite2: '' + icu: '>=70.1,<71.0a0' + libgcc-ng: '>=12' + libglib: '>=2.74.1,<3.0a0' + libstdcxx-ng: '>=12' + hash: + md5: 448fe40d2fed88ccf4d9ded37cbb2b38 + sha256: f300fcb390253d6d63346ee71e56f82bc830783d1682ac933fe9ac86f39da942 + manager: conda + name: harfbuzz + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/harfbuzz-6.0.0-h8e241bc_0.conda + version: 6.0.0 - category: main dependencies: blosc: '>=1.21.0,<2.0a0' @@ -3679,14 +4020,14 @@ package: python_abi: 3.10.* *_cp310 tk: '>=8.6.12,<8.7.0a0' hash: - md5: da51ddb20c0f99d672eb756c3abf27e7 - sha256: 58b84a4607bfbc3375d8dc3ec83cf324ae1df0261f51b6ef65023b5a725b79f8 + md5: 08d6376a6da7844308927190f81382bb + sha256: a262a9f4498ddc7f146f31ae79fcffefe0c7b58a059cb02d0567fafb9e83a3c6 manager: conda name: matplotlib-base optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.6.2-py310h8d5ebf3_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.6.3-py310he60537e_0.conda + version: 3.6.3 - category: main dependencies: click: '>=3.0' @@ -3725,29 +4066,14 @@ package: python_abi: 3.10.* *_cp310 pytz: '>=2020.1' hash: - md5: bc363997d22f3b058fb17f1e89d4c96f - sha256: 9fc01e4b920cee9ce2974a2d16dbacc5b86db3d12825fe8b25ff2adf4c80a362 + md5: 467244b0dbb7da40927ac6ee0e9491de + sha256: 289bdc902fb8c536d8fe1297fdc97bb17f01a9b19ad15d96fc8ba0dbe239a379 manager: conda name: pandas optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pandas-1.5.2-py310h769672d_0.conda - version: 1.5.2 -- category: main - dependencies: - numpy: '>=1.4.0' - python: '>=3.6' - scipy: '' - six: '' - hash: - md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 - sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 - manager: conda - name: patsy - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 - version: 0.5.3 + url: https://conda.anaconda.org/conda-forge/linux-64/pandas-1.5.3-py310h9b08913_0.conda + version: 1.5.3 - category: main dependencies: boost-cpp: '>=1.78.0,<1.78.1.0a0' @@ -3804,13 +4130,13 @@ package: sip: '' toml: '' hash: - md5: 0d815f1b2258d3d4c17cc80fd01e0f36 - sha256: e9bb563d87ba527b31d94a1240ec9efb68e02fa3febe4b28700cbd76753f1f39 + md5: 3b1946b676534472ce65181dda0b9554 + sha256: 7b58a8ca0bd2ab65d2c77017b288a551522dc5fe07d5d2dfa5189cdbb71019e8 manager: conda name: pyqt5-sip optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pyqt5-sip-12.11.0-py310hd8f1fbe_2.tar.bz2 + url: https://conda.anaconda.org/conda-forge/linux-64/pyqt5-sip-12.11.0-py310heca2aa9_3.conda version: 12.11.0 - category: main dependencies: @@ -3827,26 +4153,6 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-4.0.0-pyhd8ed1ab_0.tar.bz2 version: 4.0.0 -- category: main - dependencies: - joblib: '>=1.1.1' - libcblas: '>=3.9.0,<4.0a0' - libgcc-ng: '>=12' - libstdcxx-ng: '>=12' - numpy: '>=1.21.6,<2.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '' - threadpoolctl: '>=2.0.0' - hash: - md5: 5a08a1f004445ee9bf58261feb16cc18 - sha256: 00733203bf459b9ca33b8c19ba7a57fca460aa266a6d98a5b111e8b199ed896d - manager: conda - name: scikit-learn - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.2.0-py310h209a8ca_0.conda - version: 1.2.0 - category: main dependencies: bzip2: '>=1.0.8,<2.0a0' @@ -3859,14 +4165,14 @@ package: zlib: '' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: d7655c5bdd7a9b47a49d0029f72773da - sha256: a3df2f027f7f9ec604ab2e03facc69a1a167c20bd1ad91316102910fe771a310 + md5: 6d97164f19dbd27575ef1899b02dc1e0 + sha256: 406bf59089f80d553981a624c7c163b479f883f2f661a6aca3eb95aabebeb490 manager: conda name: tiledb optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/tiledb-2.13.0-hd532e3d_1.conda - version: 2.13.0 + url: https://conda.anaconda.org/conda-forge/linux-64/tiledb-2.13.2-hd532e3d_0.conda + version: 2.13.2 - category: main dependencies: aiosignal: '>=1.1.2' @@ -3890,26 +4196,26 @@ package: version: 3.8.3 - category: main dependencies: - geos: '>=3.11.1,<3.11.2.0a0' + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-mqtt: '>=0.7.13,<0.7.14.0a0' + aws-c-s3: '>=0.2.3,<0.2.4.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' libgcc-ng: '>=12' libstdcxx-ng: '>=12' - matplotlib-base: '>=3.1' - numpy: '>=1.21.6,<2.0a0' - pyproj: '>=3.0.0' - pyshp: '>=2.1' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '>=0.10' - shapely: '>=1.6.4' hash: - md5: bd14eaad9bbf54b78e48ecb8b644fcf6 - sha256: aa4641f2410f01c947f4f2d1b493c4e10406cbc75d4d94ea6dd9b754c9b67bff + md5: 4522baa80e06a18425f5f0cab34a506f + sha256: 090cbdb895014dc729532ed4820a2b14bc38b8ca23e12680ec1ee95bab164ef3 manager: conda - name: cartopy + name: aws-crt-cpp optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/cartopy-0.21.1-py310hcb7e713_0.conda - version: 0.21.1 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-crt-cpp-0.18.16-hf80f573_10.conda + version: 0.18.16 - category: main dependencies: packaging: '' @@ -3931,14 +4237,14 @@ package: google-crc32c: '>=1.0,<2.0.0dev' python: '>=3.7' hash: - md5: d8e92214f92379047780fd31bc8b1f94 - sha256: ff44d8c49f39afbcd2840f446a262a28b3f6f232be97604b55439dfed9756e38 + md5: a0d4c902824b3188a61df18c1e8bbf5e + sha256: d997737f75ff1132374f791b267e8a4322652a6b172da885abfc1d4bff18e883 manager: conda name: google-resumable-media optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.0-pyhd8ed1ab_0.tar.bz2 - version: 2.4.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.1-pyhd8ed1ab_0.conda + version: 2.4.1 - category: main dependencies: alsa-lib: '>=1.2.8,<1.2.9.0a0' @@ -3979,7 +4285,7 @@ package: kealib: '>=1.5.0,<1.6.0a0' lerc: '>=4.0.0,<5.0a0' libcurl: '>=7.87.0,<8.0a0' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libgcc-ng: '>=12' libiconv: '>=1.17,<2.0a0' libkml: '>=1.3.0,<1.4.0a0' @@ -4001,36 +4307,19 @@ package: poppler: '>=22.12.0,<22.13.0a0' postgresql: '' proj: '>=9.1.0,<9.1.1.0a0' - tiledb: '>=2.13.0,<2.14.0a0' + tiledb: '>=2.13.2,<2.14.0a0' xerces-c: '>=3.2.4,<3.3.0a0' xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: e48802f38ae5d01141b5bc4c8259797d - sha256: c9de70b0f94839835b8f482964839f69875bba65bbd69a45769e004dc2aa66ba + md5: 0a5408aac806b97ed97ca718957eb4b0 + sha256: c4a9694125c1ed214570df2e75ed1e799fa2aff8e85b365170ecd822d07aca22 manager: conda name: libgdal optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgdal-3.6.1-he31f7c0_2.conda - version: 3.6.1 -- category: main - dependencies: - networkx: '' - numpy: '>=1.3' - pandas: '>=1.0' - python: '>=3.5' - scikit-learn: '' - scipy: '>=1.0' - hash: - md5: 908bbfb54da154042c5cbda77b37a3d1 - sha256: 1435305fb0a127b3154e76c0836d44526eeb93e80bd37596128d7ad8fb196d97 - manager: conda - name: mapclassify - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.4.3-pyhd8ed1ab_0.tar.bz2 - version: 2.4.3 + url: https://conda.anaconda.org/conda-forge/linux-64/libgdal-3.6.2-h10cbb15_3.conda + version: 3.6.2 - category: main dependencies: cftime: '' @@ -4053,53 +4342,17 @@ package: version: 1.6.2 - category: main dependencies: - cryptography: '>=38.0.0,<39' + cryptography: '>=38.0.0,<40' python: '>=3.6' hash: - md5: fbfa0a180d48c800f922a10a114a8632 - sha256: 42f04dded77ac2597108378d62b121697d0e982aba7b20a462a7239030563628 + md5: d41957700e83bbb925928764cb7f8878 + sha256: adbf8951f22bfa950b9e24394df1ef1d2b2d7dfb194d91c7f42bc11900695785 manager: conda name: pyopenssl optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-22.1.0-pyhd8ed1ab_0.tar.bz2 - version: 22.1.0 -- category: main - dependencies: - matplotlib-base: '>=3.1,!=3.6.1' - numpy: '>=1.17,!=1.24.0' - pandas: '>=0.25' - python: '>=3.7' - scipy: '>=1.3' - typing_extensions: '' - hash: - md5: cf88f3a1c11536bc3c10c14ad00ccc42 - sha256: 92e4368ff90873716c637f6bd957506cf39288f9230bd7c75ca13082f5c8d7a1 - manager: conda - name: seaborn-base - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/seaborn-base-0.12.2-pyhd8ed1ab_0.conda - version: 0.12.2 -- category: main - dependencies: - libgcc-ng: '>=12' - numpy: '>=1.21.6,<2.0a0' - packaging: '' - pandas: '>=1.0' - patsy: '>=0.5.2' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '>=1.3' - hash: - md5: 521b762fdb5e47915251d460a8fc5814 - sha256: 73b95efe96864aa66d70005139e0bb51a4a2f9070cb062c650b4a848bf35d4a0 - manager: conda - name: statsmodels - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/statsmodels-0.13.5-py310hde88566_2.tar.bz2 - version: 0.13.5 + url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-23.0.0-pyhd8ed1ab_0.conda + version: 23.0.0 - category: main dependencies: imagecodecs: '>=2021.11.20' @@ -4114,25 +4367,44 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/tifffile-2022.3.25-pyhd8ed1ab_0.tar.bz2 version: 2022.3.25 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-crt-cpp: '>=0.18.16,<0.18.17.0a0' + libcurl: '>=7.87.0,<8.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: c6b7607cfe683e42a079c2140252cc0d + sha256: fe7abfa4acf2fd1c88321c1db6f49a37640b2eae4c3fad215c67c11f5606ca38 + manager: conda + name: aws-sdk-cpp + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/aws-sdk-cpp-1.10.57-ha834a50_1.conda + version: 1.10.57 - category: main dependencies: hdf5: '>=1.12.2,<1.12.3.0a0' libgcc-ng: '>=12' - libgdal: 3.6.1 he31f7c0_2 + libgdal: 3.6.2 h10cbb15_3 libstdcxx-ng: '>=12' numpy: '>=1.21.6,<2.0a0' openssl: '>=3.0.7,<4.0a0' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 6682a2c089232996ba981161cc2cc456 - sha256: ba755277d307460c1b0a8fcab3042ff22aba951eaff38a3a09a9edd60c318fe1 + md5: 8f09d00becd16e21c92f26a99b231aac + sha256: eb3824b695871188e6b3a11f0fe4b950ba79327a6e33b7d69dd23fa85d7b49d6 manager: conda name: gdal optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/gdal-3.6.1-py310hc1b7723_2.conda - version: 3.6.1 + url: https://conda.anaconda.org/conda-forge/linux-64/gdal-3.6.2-py310hc1b7723_3.conda + version: 3.6.2 - category: main dependencies: dbus: '>=1.13.6,<2.0a0' @@ -4178,13 +4450,14 @@ package: fontconfig: '>=2.14.1,<3.0a0' fonts-conda-ecosystem: '' freetype: '>=2.12.1,<3.0a0' - gst-plugins-base: '>=1.21.2,<1.22.0a0' - gstreamer: '>=1.21.2,<1.22.0a0' + gst-plugins-base: '>=1.21.3,<1.22.0a0' + gstreamer: '>=1.21.3,<1.22.0a0' + harfbuzz: '>=6.0.0,<7.0a0' icu: '>=70.1,<71.0a0' jpeg: '>=9e,<10a' krb5: '>=1.20.1,<1.21.0a0' - libclang: '>=15.0.6,<16.0a0' - libclang13: '>=15.0.6' + libclang: '>=15.0.7,<16.0a0' + libclang13: '>=15.0.7' libcups: '>=2.3.3,<2.4.0a0' libevent: '>=2.1.10,<2.1.11.0a0' libgcc-ng: '>=12' @@ -4197,7 +4470,7 @@ package: libxkbcommon: '>=1.0.3,<2.0a0' libxml2: '>=2.10.3,<2.11.0a0' libzlib: '>=1.2.13,<1.3.0a0' - mysql-libs: '>=8.0.31,<8.1.0a0' + mysql-libs: '>=8.0.32,<8.1.0a0' nspr: '>=4.35,<5.0a0' nss: '>=3.82,<4.0a0' openssl: '>=3.0.7,<4.0a0' @@ -4209,13 +4482,13 @@ package: xcb-util-wm: '' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: 9c23a5205b67f2a67b19c84bf1fd7f5e - sha256: e141203420e0caae6d35a2044fdea751a88534ce42179e6b0a1d8fbe8e1bf50c + md5: 050ed331c6b32c82b845907fd3494d3a + sha256: 8da3cbbd96f0215a512d8a970f565781a98bdf0441a88a980aab70a399239b60 manager: conda name: qt-main optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/qt-main-5.15.6-hf6cd601_5.conda + url: https://conda.anaconda.org/conda-forge/linux-64/qt-main-5.15.6-h602db52_6.conda version: 5.15.6 - category: main dependencies: @@ -4243,33 +4516,6 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/linux-64/rasterio-1.3.4-py310hfc14bbd_0.tar.bz2 version: 1.3.4 -- category: main - dependencies: - cloudpickle: '>=0.2.1' - cytoolz: '>=0.7.3' - dask-core: '>=1.0.0,!=2.17.0' - imageio: '>=2.3.0' - libgcc-ng: '>=9.4.0' - libstdcxx-ng: '>=9.4.0' - networkx: '>=2.2' - numpy: '>=1.19.5,<2.0a0' - packaging: '>=20.0' - pillow: '>=6.1.0,!=7.1.0,!=7.1.1,!=8.3.0' - python: '>=3.9,<3.10.0a0' - python_abi: 3.9.* *_cp39 - pywavelets: '>=1.1.1' - scipy: '>=1.4.1' - tifffile: '>=2019.7.26' - toolz: '>=0.7.3' - hash: - md5: 200ceba1a31bf22629232285a550506f - sha256: cc77118707bb403af90722435296a1aef480bd58e99a2924f4fdbdbc3b2b5c04 - manager: conda - name: scikit-image - optional: false - platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/scikit-image-0.19.2-py39hde0f152_0.tar.bz2 - version: 0.19.2 - category: main dependencies: brotlipy: '>=0.6.0' @@ -4280,14 +4526,29 @@ package: pysocks: '>=1.5.6,<2.0,!=1.5.7' python: <4.0 hash: - md5: 3078ef2359efd6ecadbc7e085c5e0592 - sha256: 992f2d6ca50c98f865a4f2e4bada23f950e39f33ff7c64614a31ee152ec4d5ae + md5: 01f33ad2e0aaf6b5ba4add50dad5ad29 + sha256: f2f09c44e47946ce631dbc9a8a79bb463ac0f4122aaafdbcc51f200a1e420ca6 manager: conda name: urllib3 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.13-pyhd8ed1ab_0.conda - version: 1.26.13 + url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.14-pyhd8ed1ab_0.conda + version: 1.26.14 +- category: main + dependencies: + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + python-dateutil: '>=2.1,<3.0.0' + urllib3: '>=1.25.4,<1.27' + hash: + md5: 766c1b31877841ebd4bbed9274ff81ed + sha256: b03b23c6bef007197788df421ed60d351bb80065c09fcb6c7bdf2884894db226 + manager: conda + name: botocore + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.29.60-pyhd8ed1ab_0.conda + version: 1.29.60 - category: main dependencies: attrs: '>=17' @@ -4296,7 +4557,7 @@ package: cligj: '>=0.5' gdal: '' libgcc-ng: '>=12' - libgdal: '>=3.6.0,<3.7.0a0' + libgdal: '>=3.6.2,<3.7.0a0' libstdcxx-ng: '>=12' munch: '' numpy: '>=1.21.6,<2.0a0' @@ -4306,31 +4567,66 @@ package: shapely: '' six: '>=1.7' hash: - md5: 4dbdf48d4712e8906595291f38423eff - sha256: 4b16bdce243474428561646cd74573060e281cf3594e2bdc019ce61d9d9abfae + md5: 3326f0545eb9290956a409a12b24c729 + sha256: 753ffe6918a69b2e2bed4491ba8f31c967a9a2f7556be8d9410f1d5a02f5345c manager: conda name: fiona optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/fiona-1.8.22-py310ha325b7b_5.conda - version: 1.8.22 + url: https://conda.anaconda.org/conda-forge/linux-64/fiona-1.9.0-py310ha325b7b_0.conda + version: 1.9.0 +- category: main + dependencies: + aws-crt-cpp: '>=0.18.16,<0.18.17.0a0' + aws-sdk-cpp: '>=1.10.57,<1.10.58.0a0' + bzip2: '>=1.0.8,<2.0a0' + c-ares: '>=1.18.1,<2.0a0' + gflags: '>=2.2.2,<2.3.0a0' + glog: '>=0.6.0,<0.7.0a0' + libabseil: 20220623.0 cxx17* + libbrotlicommon: '>=1.0.9,<1.1.0a0' + libbrotlidec: '>=1.0.9,<1.1.0a0' + libbrotlienc: '>=1.0.9,<1.1.0a0' + libgcc-ng: '>=12' + libgoogle-cloud: '>=2.5.0,<2.5.1.0a0' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.12,<3.22.0a0' + libstdcxx-ng: '>=12' + libthrift: '>=0.16.0,<0.16.1.0a0' + libutf8proc: '>=2.8.0,<3.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + lz4-c: '>=1.9.3,<1.10.0a0' + openssl: '>=3.0.7,<4.0a0' + orc: '>=1.8.2,<1.8.3.0a0' + re2: '>=2022.6.1,<2022.6.2.0a0' + snappy: '>=1.1.9,<2.0a0' + zstd: '>=1.5.2,<1.6.0a0' + hash: + md5: 7e1d313b26717dd8384c5901fcf46233 + sha256: b6205e3ea6ec1620dbab857840a165c4d9a7b033ec4f147333802872d51f7eea + manager: conda + name: libarrow + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-10.0.1-hf9c26a6_6_cpu.conda + version: 10.0.1 - category: main dependencies: libgcc-ng: '>=12' libstdcxx-ng: '>=12' - pyqt5-sip: 12.11.0 py310hd8f1fbe_2 + pyqt5-sip: 12.11.0 py310heca2aa9_3 python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 qt-main: '>=5.15.6,<5.16.0a0' - sip: '>=6.7.2,<6.8.0a0' + sip: '>=6.7.5,<6.8.0a0' hash: - md5: 1e2c49215b17e6cf06edf100c9869ebe - sha256: d1ed54ae94f509d73a88bd5f029f2f6d17eb4838cae5952990ff1a95412e9894 + md5: d049da3204bf5ecb54a852b622f2d7d2 + sha256: 9210571612b135979541c5c65d28eda82941b3d613f3c8c792971bdfb7b4383a manager: conda name: pyqt optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pyqt-5.15.7-py310h29803b5_2.tar.bz2 + url: https://conda.anaconda.org/conda-forge/linux-64/pyqt-5.15.7-py310hab646b1_3.conda version: 5.15.7 - category: main dependencies: @@ -4359,14 +4655,26 @@ package: python: '>=3.7,<4.0' urllib3: '>=1.21.1,<1.27' hash: - md5: 089382ee0e2dc2eae33a04cc3c2bddb0 - sha256: b45d0da6774c8231ab4fef0427b3050e7c54c84dfe453143dd4010999c89e050 + md5: 11d178fc55199482ee48d6812ea83983 + sha256: 22c081b4cdd023a514400413f50efdf2c378f56f2a5ea9d65666aacf4696490a manager: conda name: requests optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.1-pyhd8ed1ab_1.tar.bz2 - version: 2.28.1 + url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.2-pyhd8ed1ab_0.conda + version: 2.28.2 +- category: main + dependencies: + libarrow: 10.0.1 hf9c26a6_6_cpu + hash: + md5: de0024bfd356739f47a3b58715a2912c + sha256: 2f05c0ccdeb3dee4d5bf0d3601d97635c5111a685478d3dcd817d173de380ee6 + manager: conda + name: arrow-cpp + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/arrow-cpp-10.0.1-ha770c72_6_cpu.conda + version: 10.0.1 - category: main dependencies: geopy: '' @@ -4415,30 +4723,45 @@ package: rsa: '>=3.1.4,<5' six: '>=1.9.0' hash: - md5: ce0b3b567b3b8f7a3ef5bd43b2fd1a5e - sha256: 5525c0fe34e102d12f66fe96d2bac211cb42332e294718f72c15734a2b618dc4 + md5: 88944e8c28fbd7471213f8b23d40f001 + sha256: d9fbbaf18ca8dff81d004bad336a8cd04be717c8e41cc0ba49c4471f50db9472 manager: conda name: google-auth optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.15.0-pyh1a96a4e_0.conda - version: 2.15.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.16.0-pyh1a96a4e_1.conda + version: 2.16.0 - category: main dependencies: - matplotlib-base: '>=3.6.2,<3.6.3.0a0' + matplotlib-base: '>=3.6.3,<3.6.4.0a0' pyqt: '>=5' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 tornado: '>=5' hash: - md5: aa78d12708912cd34135e6694a046ba0 - sha256: cedfc0290412a912b230eb84a5f7cc3031e913731255540be180a4afc25d892e + md5: 2bc2b44c4c5602efb589dc625939e57d + sha256: 308bce36a59033a1e6ca6436b1adb3954fe8ebae72a73918e016c62a444b8512 manager: conda name: matplotlib optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-3.6.2-py310hff52083_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-3.6.3-py310hff52083_0.conda + version: 3.6.3 +- category: main + dependencies: + appdirs: '>=1.3.0' + packaging: '>=20.0' + python: '>=3.6' + requests: '>=2.19.0' + hash: + md5: 6429e1d1091c51f626b5dcfdd38bf429 + sha256: 1f0548105de86fb2eb6fbb8d3d6cc2004079b8442d232258108687d6cc91eb73 + manager: conda + name: pooch + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/pooch-1.6.0-pyhd8ed1ab_0.tar.bz2 + version: 1.6.0 - category: main dependencies: dbus: '>=1.13.6,<2.0a0' @@ -4496,38 +4819,48 @@ package: version: 0.17.0 - category: main dependencies: - matplotlib: '>=3.5.3' - numpy: 1.23.5.* - pip: '>=22.3.1' - python: '>=3.9,<3.12' + botocore: '>=1.12.36,<2.0a.0' + python: '>=3.7' hash: - md5: 2e7d2a8e819e60c8a4c5ddbb07a6873a - sha256: 624f6ccb286c5fe0d04c5e924ea298c48f3ce5b4394ceb635b1ff1ebfc7cae71 + md5: 900e74d8547fbea3af028937df28ed77 + sha256: 0e459ed32b00e96b62c2ab7e2dba0135c73fd980120fe1a7bd49901f2d50760f manager: conda - name: cleopatra + name: s3transfer optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.4-pyhd8ed1ab_0.conda - version: 0.2.4 + url: https://conda.anaconda.org/conda-forge/noarch/s3transfer-0.6.0-pyhd8ed1ab_0.tar.bz2 + version: 0.6.0 - category: main dependencies: - fiona: '' - folium: '' - geopandas-base: 0.12.2 pyha770c72_0 - mapclassify: '>=2.4.0' - matplotlib-base: '' - python: '>=3.8' - rtree: '' - xyzservices: '' + botocore: '>=1.29.60,<1.30.0' + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + s3transfer: '>=0.6.0,<0.7.0' hash: - md5: ee3b330f13297f5839d46e1ca3e57d56 - sha256: 51660094efee2a74b24ab535e03005a6ddedc9e160c0d573cfaf2724312d171c + md5: bdb3a067f03eded198b3fdf7c66ba8c3 + sha256: d81ae5f720229e0a81914ace6ea8e741eced99cb72d34d3742461496bec71e12 manager: conda - name: geopandas + name: boto3 optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/geopandas-0.12.2-pyhd8ed1ab_0.conda - version: 0.12.2 + url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.26.60-pyhd8ed1ab_0.conda + version: 1.26.60 +- category: main + dependencies: + matplotlib: '>=3.6.2' + numpy: 1.24.1.* + pip: '>=22.3.1' + python: '>=3.9,<3.11' + serapeum_utils: '>=0.1.1' + hash: + md5: 32cd6422f2e5730a7b5cc9f8c41f968d + sha256: 0bbe8125ef7303883f96239af0973784337c4fc3fa901a052616f71e7b4a6756 + manager: conda + name: cleopatra + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.7-pyhd8ed1ab_0.conda + version: 0.2.7 - category: main dependencies: google-auth: '>=2.14.1,<3.0dev' @@ -4561,42 +4894,60 @@ package: version: 0.1.0 - category: main dependencies: - loguru: '>=0.6.0' - matplotlib: '>=3.5.3' - numpy: 1.23.5 - pandas: '>=1.4.4' - pip: '>=22.3.1' - python: '>=3.9,<3.15' - scikit-learn: '>=1.1.1' - scipy: '>=1.9.0' + arrow-cpp: '>=0.11.0' hash: - md5: 5f4118cfa89158b5b58bac3ac5495cba - sha256: 755f6d603c37c3dfcb48bf1379e81178474fdeec94eabc73af5825f96e8a3e4e + md5: 79a5f78c42817594ae016a7896521a97 + sha256: 15e50657515b791734ba045da5135377404ca37c518b2066b9c6451c65cd732e manager: conda - name: statista + name: parquet-cpp optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 + url: https://conda.anaconda.org/conda-forge/noarch/parquet-cpp-1.5.1-2.tar.bz2 + version: 1.5.1 - category: main dependencies: - cartopy: '' - contextily: '>=1.0.0' - geopandas: '>=0.9.0' - mapclassify: '>=2.1' - matplotlib-base: '>=3.1.2' - pandas: '' - python: '>=3.7' - seaborn-base: '' + libblas: '>=3.9.0,<4.0a0' + libcblas: '>=3.9.0,<4.0a0' + libgcc-ng: '>=12' + libgfortran-ng: '' + libgfortran5: '>=11.3.0' + liblapack: '>=3.9.0,<4.0a0' + libstdcxx-ng: '>=12' + numpy: '>=1.21.6,<2.0a0' + pooch: '' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 hash: - md5: 672d8f0139e74078dcdb6956d5e5e671 - sha256: dae0c82a86de293a0d6dbb97082f34d6a12fb442005667271a85eba9f9cfc040 + md5: ef72eeddf5316330730b11907c6c07d8 + sha256: 6183ada76df21014cb7aefdc3f7c850b4324bfccf77b0268dde09801f35dc349 manager: conda - name: geoplot + name: scipy optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/geoplot-0.5.1-pyhd8ed1ab_0.tar.bz2 - version: 0.5.1 + url: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.10.0-py310h8deb116_0.conda + version: 1.10.0 +- category: main + dependencies: + geos: '>=3.11.1,<3.11.2.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + matplotlib-base: '>=3.1' + numpy: '>=1.21.6,<2.0a0' + pyproj: '>=3.0.0' + pyshp: '>=2.1' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '>=0.10' + shapely: '>=1.6.4' + hash: + md5: bd14eaad9bbf54b78e48ecb8b644fcf6 + sha256: aa4641f2410f01c947f4f2d1b493c4e10406cbc75d4d94ea6dd9b754c9b67bff + manager: conda + name: cartopy + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/cartopy-0.21.1-py310hcb7e713_0.conda + version: 0.21.1 - category: main dependencies: google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' @@ -4606,14 +4957,14 @@ package: python: '>=3.7' uritemplate: '>=3.0.1,<5' hash: - md5: 04241ec803212136585c4e7738de8543 - sha256: 59d5c1e9afce9be9042900e10ffa804bbe68fb1331fed2ace5d15ce461f83b87 + md5: 2fba68326c4a5f7308ae42725253d015 + sha256: 07590faf8e2b3939ff11026d71fc48c909b99995c00eea59f2de5734ee127773 manager: conda name: google-api-python-client optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.70.0-pyhd8ed1ab_0.conda - version: 2.70.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.74.0-pyhd8ed1ab_0.conda + version: 2.74.0 - category: main dependencies: google-api-core: '>=1.31.6,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' @@ -4631,67 +4982,102 @@ package: version: 2.3.2 - category: main dependencies: - affine: '>=2.3.1' - gdal: '>=3.5.3' - geopandas: '>=0.12.2' - geopy: '>=2.2.0' - loguru: '>=0.6.0' - netcdf4: '>=1.6.1' - numpy: 1.23.5 - pandas: '>=1.4.4' - pip: '>=22.3.1' - pyproj: '>=3.4.0' - python: '>=3.9,<3.11' - rasterio: '>=1.3.0' - requests: '>=2.28.1' - rtree: '>=1.0.0' - shapely: '>=1.8.4,<2' + numpy: '>=1.4.0' + python: '>=3.6' + scipy: '' + six: '' hash: - md5: 4f85b9d893e953d3f330a705bb64dc75 - sha256: e878e39e30761723e4b5f4c66fc250c00026e2ed62823f89cb72e3edc375fdeb + md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 + sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 manager: conda - name: pyramids + name: patsy optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.2.11-pyhd8ed1ab_0.conda - version: 0.2.11 + url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 + version: 0.5.3 - category: main dependencies: - cleopatra: '>=0.2.4' - gdal: '>=3.5.3' - geopandas: '>=0.12.2' - geoplot: '>=0.5.1' - loguru: '>=0.6.0' - numpy: 1.23.5 - pip: '>=22.3.1' - pyramids: 0.2.11 - python: '>=3.9,<3.11' + gflags: '>=2.2.2,<2.3.0a0' + libarrow: 10.0.1 hf9c26a6_6_cpu + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + numpy: '>=1.21.6,<2.0a0' + parquet-cpp: 1.5.1.* + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 hash: - md5: 6b63a9d1e41e7fb8a9181435b9cb89c5 - sha256: 50b88c8f546f9f9d860ca09316a89d6d5d33d2e815f9f8175c07eaa6dfeb8ea0 + md5: 98b3f78b80c518e404671610cdae8e53 + sha256: ad5a015da3d2e3acd1da64a22a9e0fc4b9638cb661922c77652f163a32751451 manager: conda - name: digitalearth + name: pyarrow optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.10-pyhd8ed1ab_0.conda - version: 0.1.10 + url: https://conda.anaconda.org/conda-forge/linux-64/pyarrow-10.0.1-py310h633f555_6_cpu.conda + version: 10.0.1 - category: main dependencies: - loguru: '>=0.6.0' - numpy: 1.23.5 - pip: '>=22.3.1' - pyramids: '>=0.2.11' - python: '>=3.9,<3.11' - statista: '>=0.1.7' + cloudpickle: '>=0.2.1' + cytoolz: '>=0.7.3' + dask-core: '>=1.0.0,!=2.17.0' + imageio: '>=2.3.0' + libgcc-ng: '>=9.4.0' + libstdcxx-ng: '>=9.4.0' + networkx: '>=2.2' + numpy: '>=1.19.5,<2.0a0' + packaging: '>=20.0' + pillow: '>=6.1.0,!=7.1.0,!=7.1.1,!=8.3.0' + python: '>=3.9,<3.10.0a0' + python_abi: 3.9.* *_cp39 + pywavelets: '>=1.1.1' + scipy: '>=1.4.1' + tifffile: '>=2019.7.26' + toolz: '>=0.7.3' hash: - md5: 679b3be1963e704ad7e5a4d45c248c91 - sha256: 6e4f2d622558a179e0c9e0d9ff3faf606ed19e4b22976ef3fec2d869d314c3ed + md5: 200ceba1a31bf22629232285a550506f + sha256: cc77118707bb403af90722435296a1aef480bd58e99a2924f4fdbdbc3b2b5c04 manager: conda - name: geostatista + name: scikit-image optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.5-pyhd8ed1ab_0.conda - version: 0.1.5 + url: https://conda.anaconda.org/conda-forge/linux-64/scikit-image-0.19.2-py39hde0f152_0.tar.bz2 + version: 0.19.2 +- category: main + dependencies: + joblib: '>=1.1.1' + libcblas: '>=3.9.0,<4.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + numpy: '>=1.21.6,<2.0a0' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '' + threadpoolctl: '>=2.0.0' + hash: + md5: 3ffef54f2577d392e8d8790b3815ced6 + sha256: 140138aaabce1c65f8f1039bca553673d0dd0cd468a6cb06cc5c6415824189ce + manager: conda + name: scikit-learn + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.2.1-py310h209a8ca_0.conda + version: 1.2.1 +- category: main + dependencies: + matplotlib-base: '>=3.1,!=3.6.1' + numpy: '>=1.17,!=1.24.0' + pandas: '>=0.25' + python: '>=3.7' + scipy: '>=1.3' + typing_extensions: '' + hash: + md5: cf88f3a1c11536bc3c10c14ad00ccc42 + sha256: 92e4368ff90873716c637f6bd957506cf39288f9230bd7c75ca13082f5c8d7a1 + manager: conda + name: seaborn-base + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/seaborn-base-0.12.2-pyhd8ed1ab_0.conda + version: 0.12.2 - category: main dependencies: google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' @@ -4710,6 +5096,61 @@ package: platform: linux-64 url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-storage-2.7.0-pyh1a96a4e_0.conda version: 2.7.0 +- category: main + dependencies: + networkx: '' + numpy: '>=1.3' + pandas: '>=1.0' + python: '>=3.6' + scikit-learn: '' + scipy: '>=1.0' + hash: + md5: db1aeaff6e248db425e049feffded7a9 + sha256: 78aadbd9953976678b6e3298ac26a63cf9390a8794db3ff71f3fe5b6d13a35ca + manager: conda + name: mapclassify + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.5.0-pyhd8ed1ab_1.conda + version: 2.5.0 +- category: main + dependencies: + loguru: '>=0.6.0' + matplotlib: '>=3.6.3' + numpy: 1.24.1 + pandas: '>=1.5.3' + pip: '>=22.3.1' + python: '>=3.9,<3.15' + scikit-learn: '>=1.2.1' + scipy: '>=1.9.0' + hash: + md5: 75342a0eb483db01f3b0e9dd3c607815 + sha256: dc9cffa56c55a453d56b4fc1b2f057dde4a0f7dd06c4e8376de75b23e2e5cc5e + manager: conda + name: statista + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.8-pyhd8ed1ab_0.conda + version: 0.1.8 +- category: main + dependencies: + libgcc-ng: '>=12' + numpy: '>=1.21.6,<2.0a0' + packaging: '' + pandas: '>=1.0' + patsy: '>=0.5.2' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '>=1.3' + hash: + md5: 521b762fdb5e47915251d460a8fc5814 + sha256: 73b95efe96864aa66d70005139e0bb51a4a2f9070cb062c650b4a848bf35d4a0 + manager: conda + name: statsmodels + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/statsmodels-0.13.5-py310hde88566_2.tar.bz2 + version: 0.13.5 - category: main dependencies: future: '' @@ -4723,37 +5164,145 @@ package: setuptools: '' six: '' hash: - md5: 30e3d2c755cf9c0c0483f01ab25c7e59 - sha256: d498cf74bfa54861cc9a45b812d3321d0f482a62025c7abf69df914c94a3c3a9 + md5: 08b32b1b1369cff511af1592403bb2af + sha256: 9590c61b4613c52412cb16dcff9a3eca6cc2be685eb45fbba49803f6c552df30 manager: conda name: earthengine-api optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.334-pyhd8ed1ab_1.conda - version: 0.1.334 + url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.338-pyhd8ed1ab_0.conda + version: 0.1.338 - category: main dependencies: + fiona: '' + folium: '' + geopandas-base: 0.12.2 pyha770c72_0 + mapclassify: '>=2.4.0' + matplotlib-base: '' + python: '>=3.8' + rtree: '' + xyzservices: '' + hash: + md5: ee3b330f13297f5839d46e1ca3e57d56 + sha256: 51660094efee2a74b24ab535e03005a6ddedc9e160c0d573cfaf2724312d171c + manager: conda + name: geopandas + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/geopandas-0.12.2-pyhd8ed1ab_0.conda + version: 0.12.2 +- category: main + dependencies: + cartopy: '' + contextily: '>=1.0.0' + geopandas: '>=0.9.0' + mapclassify: '>=2.1' + matplotlib-base: '>=3.1.2' + pandas: '' + python: '>=3.7' + seaborn-base: '' + hash: + md5: 672d8f0139e74078dcdb6956d5e5e671 + sha256: dae0c82a86de293a0d6dbb97082f34d6a12fb442005667271a85eba9f9cfc040 + manager: conda + name: geoplot + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/geoplot-0.5.1-pyhd8ed1ab_0.tar.bz2 + version: 0.5.1 +- category: main + dependencies: + affine: '>=2.3.1' + gdal: '>=3.5.3' + geopandas: '>=0.12.2' + geopy: '>=2.2.0' + h3-py: '>=3.7.4' + loguru: '>=0.6.0' + netcdf4: '>=1.6.1' + numpy: 1.24.1 + pandas: '>=1.4.4' + pip: '>=22.3.1' + pyarrow: '>=10.0.1' + pyproj: '>=3.4.0' + python: '>=3.9,<3.11' + pyyaml: '>=6.0' + rasterio: '>=1.3.0' + requests: '>=2.28.1' + rtree: '>=1.0.0' + shapely: '>=1.8.4,<2' + hash: + md5: e91e293e82bd2fc7da52944256d6d4e6 + sha256: 947c0bf9ec7c366299c57f8b4cfcdc1320168bd6bfc6ce15921a9f11af821c21 + manager: conda + name: pyramids + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.3.2-pyhd8ed1ab_0.conda + version: 0.3.2 +- category: main + dependencies: + cleopatra: '>=0.2.7' + gdal: '>=3.5.3' + geopandas: '>=0.12.2' + geoplot: '>=0.5.1' + loguru: '>=0.6.0' + numpy: '>=1.24.1' + pip: '>=22.3.1' + pyramids: '>=0.3.2' + python: '>=3.9,<3.11' + hash: + md5: bf10a9e624caf50d57e643a73f91a70c + sha256: 8fe74e096a93b016a5dff3e852ac3079d74a6f7e2d21bf4b05a347e9bcbe61fa + manager: conda + name: digitalearth + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.11-pyhd8ed1ab_0.conda + version: 0.1.11 +- category: main + dependencies: + boto3: '>=1.26.50' earthengine-api: '>=0.1.324' ecmwf-api-client: '>=1.6.3' gdal: '>=3.5.3' joblib: '>=1.2.0' loguru: '>=0.6.0' netcdf4: '>=1.6.1' - numpy: 1.23.5 + numpy: 1.24.1 pandas: '>=1.4.4' + pathlib: '>=1.0.1' pip: '>=22.3.1' - pyramids: '>=0.2.11' + pyramids: '>=0.3.2' python: '>=3.9,<3.11' + pyyaml: '>=6.0' requests: '>=2.28.1' + serapeum_utils: '>=0.1.1' hash: - md5: 51d9c69a0c90519b0dda07828319af81 - sha256: 3c4cd924212bac400b9bfafa36ed729240a47eafc01b2c9d251d37012552e3ef + md5: 65d70fa30a34f9cbdc9dd130017310af + sha256: d75791533e3ea957c049ff3be8aae020003730d12216d149b6f086d2da66d8db manager: conda name: earth2observe optional: false platform: linux-64 - url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 + url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.2.2-pyhd8ed1ab_0.conda + version: 0.2.2 +- category: main + dependencies: + loguru: '>=0.6.0' + numpy: '>=1.24.1' + pip: '>=22.3.1' + pyramids: '>=0.3.2' + python: '>=3.9,<3.11' + statista: '>=0.1.8' + hash: + md5: 7e2419cadcd4ecd1b77855ff3b47634c + sha256: a7ea64da036da19c224d8e61efdf862ae91b932501dad7297d4a364b3329c80a + manager: conda + name: geostatista + optional: false + platform: linux-64 + url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.6-pyhd8ed1ab_0.conda + version: 0.1.6 - category: main dependencies: {} hash: @@ -4862,6 +5411,17 @@ package: source: null url: https://files.pythonhosted.org/packages/20/49/b3e0edec68d81846f519c602ac38af9db86e1e71275528b3e814ae236063/pytest_cov-3.0.0-py3-none-any.whl version: 3.0.0 +- category: main + dependencies: {} + hash: + md5: f483bfa9390dadb210466f43723b8e83 + sha256: eb2acab7ac7c586221d5807b59f6273ecc1928c5785bcc044928791f7ec22b3a + manager: conda + name: aws-c-common + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-common-0.8.5-hb7f2c08_0.tar.bz2 + version: 0.8.5 - category: main dependencies: {} hash: @@ -5030,14 +5590,14 @@ package: - category: main dependencies: {} hash: - md5: ce2a6075114c9b64ad8cace52492feee - sha256: 0153de9987fa6e8dd5be45920470d579af433d4560bfd77318a72b3fd75fb6dc + md5: e3894420cf8b6abbf6c4d3d9742fbb4a + sha256: b322e190fd6fe631e1f4836ef99cbfb8352c03c30b51cb5baa216f7c9124d82e manager: conda name: libdeflate optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libdeflate-1.14-hb7f2c08_0.tar.bz2 - version: '1.14' + url: https://conda.anaconda.org/conda-forge/osx-64/libdeflate-1.17-hac1461d_0.conda + version: '1.17' - category: main dependencies: {} hash: @@ -5071,6 +5631,28 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/libiconv-1.17-hac89ed1_0.tar.bz2 version: '1.17' +- category: main + dependencies: {} + hash: + md5: a8adc43e4b09be9c2ddbf89900956db2 + sha256: a4c086d223fc82160cee1667d0a2a7da8b65f78f5bcda444cb5627c2232c83e3 + manager: conda + name: libjpeg-turbo + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libjpeg-turbo-2.1.4-hb7f2c08_0.tar.bz2 + version: 2.1.4 +- category: main + dependencies: {} + hash: + md5: db98dc3e58cbc11583180609c429c17d + sha256: 55a7f96b2802e94def207fdfe92bc52c24d705d139bb6cdb3d936cbe85e1c505 + manager: conda + name: libutf8proc + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libutf8proc-2.8.0-hb7f2c08_0.tar.bz2 + version: 2.8.0 - category: main dependencies: {} hash: @@ -5096,14 +5678,14 @@ package: - category: main dependencies: {} hash: - md5: 3a900993deb973d86d98361ceef49ab3 - sha256: d8b65c6bd88a6508bc0ed3c03edfdf4c05d55024b2bf43dd5f61bec69f3bb87e + md5: 3faa9933dff6e96333b5ca5274674b63 + sha256: cc1586b43b757890b7d1cd24e1582345a36c40acd6cb6f9d9affb91de3c62015 manager: conda name: llvm-openmp optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/llvm-openmp-15.0.6-h61d9ccf_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/osx-64/llvm-openmp-15.0.7-h61d9ccf_0.conda + version: 15.0.7 - category: main dependencies: {} hash: @@ -5237,6 +5819,42 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/aom-3.3.0-h96cf925_1.tar.bz2 version: 3.3.0 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + hash: + md5: 7f22a82665a85c7f88a2c3af7250fb3d + sha256: 74c87279711d109e74859d647d9210d6f70b20c5e40cd5d23e8063d39304c468 + manager: conda + name: aws-c-compression + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-compression-0.2.16-hfb1f584_0.tar.bz2 + version: 0.2.16 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + hash: + md5: 075dbb151abb7abc909da48163637429 + sha256: c7010e55eb77b92ae065b88803731feff0ac802ac1b1c6da983378269672cc9c + manager: conda + name: aws-c-sdkutils + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-sdkutils-0.1.7-hfb1f584_0.tar.bz2 + version: 0.1.7 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + hash: + md5: 49b541ba797b2f415b9b67c65996b180 + sha256: cbfd371cc16b85958b6c002269aa1ec5ce279d14a708f8124bd4bacb8a51299a + manager: conda + name: aws-checksums + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-checksums-0.1.14-hfb1f584_0.conda + version: 0.1.14 - category: main dependencies: libcxx: '>=11.1.0' @@ -5300,6 +5918,18 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/gettext-0.21.1-h8a4c099_0.tar.bz2 version: 0.21.1 +- category: main + dependencies: + libcxx: '>=10.0.1' + hash: + md5: 3f59cc77a929537e42120faf104e0d16 + sha256: 39540f879057ae529cad131644af111a8c3c48b384ec6212de6a5381e0863948 + manager: conda + name: gflags + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/gflags-2.2.2-hb1e8313_1004.tar.bz2 + version: 2.2.2 - category: main dependencies: libcxx: '>=12.0.1' @@ -5338,15 +5968,15 @@ package: version: '20220623.0' - category: main dependencies: - libcxx: '>=11.1.0' + libcxx: '>=14.0.6' hash: - md5: 0a49b696f11ed805ee4690479cc5e950 - sha256: 5c45ae356d10b6b78a9985e19d4cbd0e71cc76d1b43028f32737ef313ed525de + md5: 7c0f82f435ab4c48d65dc9b28db2ad9e + sha256: 38d32f4c7efddc204e53f43cd910122d3e6a997de1a3cd15f263217b225a9cdf manager: conda name: libaec optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libaec-1.0.6-he49afe7_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/osx-64/libaec-1.0.6-hf0c8a7f_1.conda version: 1.0.6 - category: main dependencies: @@ -5485,16 +6115,16 @@ package: version: 1.0.3 - category: main dependencies: - libcxx: '>=11.1.0' + libcxx: '>=14.0.6' hash: - md5: 05c08241b66631c00ca4f9e0b75320bc - sha256: 627c435c511e789ed04e0e2077fdfc645117474c4d1c4a7c0d31241936632cd4 + md5: aa04f7143228308662696ac24023f991 + sha256: 39aa0c01696e4e202bf5e337413de09dfeec061d89acd5f28e9968b4e93c3f48 manager: conda name: lz4-c optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/lz4-c-1.9.3-he49afe7_1.tar.bz2 - version: 1.9.3 + url: https://conda.anaconda.org/conda-forge/osx-64/lz4-c-1.9.4-hf0c8a7f_0.conda + version: 1.9.4 - category: main dependencies: libcxx: '>=14.0.6' @@ -5511,13 +6141,13 @@ package: dependencies: ca-certificates: '' hash: - md5: 7a3fb6d40e0aa5dbb5b4ef54462f00a8 - sha256: 3eb19686ae870daae035582cb93253a6435f71baf537addced41be449b4daf67 + md5: c187e9853d83eb1e59af3652f0063722 + sha256: 1bcc5ef3105f554f22509e352d41b892c77827a6601e2fd7da3e77f637e29fa7 manager: conda name: openssl optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.0.7-hfd90126_1.conda + url: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.0.7-hfd90126_2.conda version: 3.0.7 - category: main dependencies: @@ -5619,17 +6249,30 @@ package: version: 1.2.13 - category: main dependencies: - libcxx: '>=13.0.1' - libzlib: '>=1.2.12,<1.3.0a0' + libcxx: '>=14.0.6' + libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: 0b446e84f3ccf085e590dc1f73eebe3f - sha256: acf19719a0a4b7534532166f84346709fdb8ccf960bc6c19ac3b437177e95dde + md5: 40a188783d3c425bdccc9ae9104acbb8 + sha256: f845dafb0b488703ce81e25b6f27ed909ee9061b730c172e6b084fcf7156231f manager: conda name: zstd optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/zstd-1.5.2-hfa58983_4.tar.bz2 + url: https://conda.anaconda.org/conda-forge/osx-64/zstd-1.5.2-hbc0c0cd_6.conda version: 1.5.2 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: 06a3c718456f6823754797e3d67832db + sha256: 83dd6ae704fc3e4a8cf9f462513dd21f2342f4e95890f8a4aa483ce4bb781df1 + manager: conda + name: aws-c-cal + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-cal-0.5.20-hdc31e88_3.tar.bz2 + version: 0.5.20 - category: main dependencies: libcxx: '>=14.0.6' @@ -5715,6 +6358,19 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/freetype-2.12.1-h3f81eb7_1.conda version: 2.12.1 +- category: main + dependencies: + gflags: '>=2.2.2,<2.3.0a0' + libcxx: '>=12.0.1' + hash: + md5: 69eb97ca709a136c53fdca1f2fd33ddf + sha256: fdb38560094fb4a952346dc72a79b3cb09e23e4d0cae9ba4f524e6e88203d3c8 + manager: conda + name: glog + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/glog-0.6.0-h8ac2a54_0.tar.bz2 + version: 0.6.0 - category: main dependencies: jpeg: '>=9e,<10a' @@ -5756,6 +6412,18 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/libavif-0.9.3-h5eb16cf_1.tar.bz2 version: 0.9.3 +- category: main + dependencies: + openssl: '>=3.0.0,<4.0a0' + hash: + md5: ed607da8c2602aeddb4ae723819fcdcc + sha256: 49c6b6082152a8ac7f161b1d5a4d9d4d51961b5ca29c9c6798fba832d9eab1f1 + manager: conda + name: libevent + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libevent-2.1.10-h7d65743_4.tar.bz2 + version: 2.1.10 - category: main dependencies: libgfortran5: '' @@ -5807,19 +6475,19 @@ package: - category: main dependencies: c-ares: '>=1.18.1,<2.0a0' - libcxx: '>=13.0.1' + libcxx: '>=14.0.6' libev: '>=4.33,<4.34.0a0' - libzlib: '>=1.2.12,<1.3.0a0' - openssl: '>=3.0.5,<4.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + openssl: '>=3.0.7,<4.0a0' hash: - md5: 19d5ae4be3e4b3cfa5696f3667e8c631 - sha256: 9e14d62e4462e6be28bcaa266f69e96ead43f4d7ef566e9cd460dbc9ae999daf + md5: 910e7f012beecf5d845a993feb9259a9 + sha256: 6400779ed8bfee36c35a66d52ec4e374651b07fb0e1cd1d0d1b9ef39f86a21c0 manager: conda name: libnghttp2 optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libnghttp2-1.47.0-h5aae05b_1.tar.bz2 - version: 1.47.0 + url: https://conda.anaconda.org/conda-forge/osx-64/libnghttp2-1.51.0-he2ab024_0.conda + version: 1.51.0 - category: main dependencies: geos: '>=3.11.1,<3.11.2.0a0' @@ -5851,19 +6519,19 @@ package: jpeg: '>=9e,<10a' lerc: '>=4.0.0,<5.0a0' libcxx: '>=14.0.6' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libwebp-base: '>=1.2.4,<2.0a0' libzlib: '>=1.2.13,<1.3.0a0' xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: f32f9708c8d6f13e20a524c6da9a881e - sha256: 6659b6b71e79976e1bdd4b730a92425954e88cad6a184e274d6523d34d85f10e + md5: 35f714269a801f7c3cb522aacd3c0e69 + sha256: 03d00d6a3b1e569e9a8da66a9ad75a29c9c676dc7de6c16771abbb961abded2c manager: conda name: libtiff optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libtiff-4.5.0-h6268bbc_0.conda + url: https://conda.anaconda.org/conda-forge/osx-64/libtiff-4.5.0-hee9004a_2.conda version: 4.5.0 - category: main dependencies: @@ -5894,6 +6562,23 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/libzip-1.9.2-h6db710c_1.tar.bz2 version: 1.9.2 +- category: main + dependencies: + libcxx: '>=14.0.6' + libprotobuf: '>=3.21.12,<3.22.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + lz4-c: '>=1.9.3,<1.10.0a0' + snappy: '>=1.1.9,<2.0a0' + zstd: '>=1.5.2,<1.6.0a0' + hash: + md5: 14eec253b02641e03dafc35dc995b836 + sha256: 2661a403998128432c901bfbd7387e5e392c9c569f84802db66c01b46a9ee96e + manager: conda + name: orc + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/orc-1.8.2-ha9d861c_0.conda + version: 1.8.2 - category: main dependencies: bzip2: '>=1.0.8,<2.0a0' @@ -5932,16 +6617,28 @@ package: version: 3.40.0 - category: main dependencies: - python: '>=3.6' + python: '>=3.7' hash: - md5: 466dc5c1b75c93180efbd81d99dc29b0 - sha256: f3d58687fb000acc5d5f773d6e633ffb382575895abbc8db3d9b8e3996b05d39 + md5: ae5f4ad87126c55ba3f690ef07f81d64 + sha256: fbf0288cae7c6e5005280436ff73c95a36c5a4c978ba50175cc8e3eb22abc5f9 manager: conda name: affine optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/affine-2.3.1-pyhd8ed1ab_0.tar.bz2 - version: 2.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/affine-2.4.0-pyhd8ed1ab_0.conda + version: 2.4.0 +- category: main + dependencies: + python: '' + hash: + md5: 5f095bc6454094e96f146491fd03633b + sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111 + manager: conda + name: appdirs + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2 + version: 1.4.4 - category: main dependencies: python: '>=3.5' @@ -5954,6 +6651,19 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/attrs-22.2.0-pyh71513ae_0.conda version: 22.2.0 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + hash: + md5: 8281dfc21312c8bf2e32b74a4886e3c3 + sha256: e2c8b5081bce12120fdca34852e7037479f46da8519b852e313f3967902df384 + manager: conda + name: aws-c-io + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-io-0.13.12-hb3bb6ef_1.conda + version: 0.13.12 - category: main dependencies: brotli-bin: 1.0.9 hb7f2c08_8 @@ -5972,14 +6682,14 @@ package: dependencies: python: '>=3.7' hash: - md5: c6653a1ed0c4a48ace64ab68a0bf9b27 - sha256: ae9d26949fcf8130d899e6bc22ed8afab40adcee782d79e0d82e0799960785af + md5: fd006afc4115740d8d52887ee813f262 + sha256: d17f6b5ae744e64a337c9dbad21b8d501916eaf0e55564dc81c78c492783d73a manager: conda name: cachetools optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.2.0-pyhd8ed1ab_0.tar.bz2 - version: 5.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.0-pyhd8ed1ab_0.conda + version: 5.3.0 - category: main dependencies: python: '>=3.7' @@ -6066,18 +6776,18 @@ package: version: 1.1.0 - category: main dependencies: - expat: '>=2.4.9,<3.0a0' + expat: '>=2.5.0,<3.0a0' freetype: '>=2.12.1,<3.0a0' libzlib: '>=1.2.13,<1.3.0a0' hash: - md5: 68c42c630dcf96518bbd9f6525861e06 - sha256: 76df655e7f4885b9f964906d9b0aad5ecd5ae9779b0ca63e875483428c38a528 + md5: 86cc5867dfbee4178118392bae4a3c89 + sha256: f63e6d1d6aef8ba6de4fc54d3d7898a153479888d40ffdf2e4cfad6f92679d34 manager: conda name: fontconfig optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/fontconfig-2.14.1-h5bb23bf_0.tar.bz2 - version: 2.14.1 + url: https://conda.anaconda.org/conda-forge/osx-64/fontconfig-2.14.2-h5bb23bf_0.conda + version: 2.14.2 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -6107,14 +6817,14 @@ package: dependencies: python: '>=3.8' hash: - md5: a6966947ba28bbe60f9904653da7fed5 - sha256: 286667d325d52cd866a410da18da5660eb8bcde10dd6eae90403fa462152eff6 + md5: fec8329fc739090f26a7d7803db254f1 + sha256: b3d34bf4924cb80363c1ab57ac821393f118ffaa94f05368bf4044941163b65e manager: conda name: future optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.2-pyhd8ed1ab_6.tar.bz2 - version: 0.18.2 + url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.3-pyhd8ed1ab_0.conda + version: 0.18.3 - category: main dependencies: python: '>=3.6' @@ -6141,16 +6851,28 @@ package: version: '3.4' - category: main dependencies: - python: '' + python: '>=3.7' hash: - md5: 39161f81cc5e5ca45b8226fbb06c6905 - sha256: 9423ded508ebda87dae21d7876134e406ffeb88e6059f3fe1a909d180c351959 + md5: f800d2da156d08e289b14e87e43c1ae5 + sha256: 38740c939b668b36a50ef455b077e8015b8c9cf89860d421b3fff86048f49666 manager: conda name: iniconfig optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-1.1.1-pyh9f0ad1d_0.tar.bz2 - version: 1.1.1 + url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.0.0-pyhd8ed1ab_0.conda + version: 2.0.0 +- category: main + dependencies: + python: '>=3.7' + hash: + md5: 2cfa3e1cf3fb51bb9b17acc5b5e9ea11 + sha256: 95ac5f9ee95fd4e34dc051746fc86016d3d4f6abefed113e2ede049d59ec2991 + manager: conda + name: jmespath + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/jmespath-1.0.1-pyhd8ed1ab_0.tar.bz2 + version: 1.0.1 - category: main dependencies: libcxx: '>=14.0.4' @@ -6230,14 +6952,29 @@ package: libzlib: '>=1.2.13,<1.3.0a0' openssl: '>=3.0.7,<4.0a0' hash: - md5: 9607f99f5d54d0bf8b2168d000a1ffa8 - sha256: 07f86d8962975563ff54a42279a728dab94cbc89ceddccede6c73c91c65690cc + md5: 60c31c2382d8e2334b1903a5e47bd85e + sha256: 482c06384d8f1817e7893be0a24c93bd9a114c8af7724add768a922647756636 manager: conda name: libpq optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libpq-15.1-h3640bf0_2.conda + url: https://conda.anaconda.org/conda-forge/osx-64/libpq-15.1-h3640bf0_3.conda version: '15.1' +- category: main + dependencies: + libcxx: '>=14.0.4' + libevent: '>=2.1.10,<2.1.11.0a0' + libzlib: '>=1.2.12,<1.3.0a0' + openssl: '>=3.0.5,<4.0a0' + hash: + md5: 7fb3e8028392bea6290fb0800312dfe9 + sha256: 726de207a1d98bc5afbdaae97a3f4ab58a228114563cdf83cf8936326b9fbbb5 + manager: conda + name: libthrift + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libthrift-0.16.0-h16802d8_2.tar.bz2 + version: 0.16.0 - category: main dependencies: giflib: '>=5.2.1,<5.3.0a0' @@ -6284,14 +7021,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 232c20719e4290fa284ae1e9a4661dfa - sha256: 45aa996770bb49fdd6bc68e24b301932c2bb592522e3798f0c80e0206c352b8b + md5: a230aa9172440ace9a1b33a74f7b6fbd + sha256: d71c23929c40f9fb1ed3dc03005ce720e091fd92559d649358f77a55b987f948 manager: conda name: markupsafe optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/markupsafe-2.1.1-py310h90acd4f_2.tar.bz2 - version: 2.1.1 + url: https://conda.anaconda.org/conda-forge/osx-64/markupsafe-2.1.2-py310h90acd4f_0.conda + version: 2.1.2 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -6321,14 +7058,14 @@ package: dependencies: python: '>=3.8' hash: - md5: bb45ff9deddb045331fd039949f39650 - sha256: a8e3531fdb6f9acfde885dd94c8639c020013215dab98ff4ed82db7aa745277a + md5: 88e40007414ea9a13f8df20fcffa87e2 + sha256: edd149a40ea746ce17c1b135c72a1646810e99071bedb7d808914cc31b3c8a5d manager: conda name: networkx optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/networkx-2.8.8-pyhd8ed1ab_0.tar.bz2 - version: 2.8.8 + url: https://conda.anaconda.org/conda-forge/noarch/networkx-3.0-pyhd8ed1ab_0.conda + version: '3.0' - category: main dependencies: libcxx: '>=13.0.1' @@ -6363,14 +7100,14 @@ package: dependencies: python: '>=3.7' hash: - md5: 0e8e1bd93998978fc3125522266d12db - sha256: 163f26e55246c506a75551ca01f35c7d4d533aee6db5c4cf2d598ae253e956b8 + md5: 1ff2e3ca41f0ce16afec7190db28288b + sha256: 00288f5e5e841711e8b8fef1f1242c858d8ef99ccbe5d7e0df4789d5d8d40645 manager: conda name: packaging optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/packaging-22.0-pyhd8ed1ab_0.conda - version: '22.0' + url: https://conda.anaconda.org/conda-forge/noarch/packaging-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -6448,14 +7185,14 @@ package: dependencies: python: '>=3.6' hash: - md5: c8d7e34ca76d6ecc03b84bedfd99d689 - sha256: 000f38e7ce7f020e2ce4d5024d3ffa63fcd65077edfe2182862965835f560525 + md5: f59d49a7b464901cf714b9e7984d01a2 + sha256: 93cfc7a92099e26b0575a343da4a667b52371cc38e4dee4ee264dc041ef77bac manager: conda name: pytz optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7-pyhd8ed1ab_0.conda - version: '2022.7' + url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7.1-pyhd8ed1ab_0.conda + version: 2022.7.1 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -6484,31 +7221,43 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/rtree-1.0.1-py310had9ce37_1.tar.bz2 version: 1.0.1 +- category: main + dependencies: + python: '>=3.9,<3.11' + hash: + md5: 97162d83d23113ffa938c83c91758d1b + sha256: 34b2dc2eee575a80a55dbeaa1580df9ccf0afec7baca99f8bb71da8ff158aa21 + manager: conda + name: serapeum_utils + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/serapeum_utils-0.1.1-pyhd8ed1ab_0.conda + version: 0.1.1 - category: main dependencies: python: '>=3.7' hash: - md5: 9600fc9524d3f821e6a6d58c52f5bf5a - sha256: ea9f7eee2648d8078391cf9f968d848b400349c784e761501fb32ae01d323acf + md5: 9467d520d1457018e055bbbfdf9b7567 + sha256: 053447c82243033e6fd5cacbf7c349552146b135730a87fd942ec517d2b22efb manager: conda name: setuptools optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/setuptools-65.6.3-pyhd8ed1ab_0.conda - version: 65.6.3 + url: https://conda.anaconda.org/conda-forge/noarch/setuptools-66.1.1-pyhd8ed1ab_0.conda + version: 66.1.1 - category: main dependencies: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: f82f109ba2962177560d530cbd89da6d - sha256: 0bbf610a1fab786769ad282f193886bbd83faf63c4ee96037957e5d5e9e72e22 + md5: 638662025a3d3afb4dfde29a38eed011 + sha256: 404ce5ebe8276a779a4a6d9a24f438698a5c08d8b7274c53a07f95d8ac043b3a manager: conda name: simplejson optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/simplejson-3.18.0-py310h90acd4f_0.tar.bz2 - version: 3.18.0 + url: https://conda.anaconda.org/conda-forge/osx-64/simplejson-3.18.1-py310h90acd4f_0.conda + version: 3.18.1 - category: main dependencies: python: '' @@ -6669,6 +7418,35 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/aiosignal-1.3.1-pyhd8ed1ab_0.tar.bz2 version: 1.3.1 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + libcxx: '>=14.0.6' + hash: + md5: b395205084711d0d4166c52fd2fe9179 + sha256: 04afcf15b322eba583c1aebb0d787a329cf2b507bb1de80510874b326a15486d + manager: conda + name: aws-c-event-stream + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-event-stream-0.2.18-h38cbd52_0.conda + version: 0.2.18 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-compression: '>=0.2.16,<0.2.17.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + hash: + md5: 44470918a303b221cad457e30cf51ec8 + sha256: c73fb162927206e5f26cb676063760b3d6236cd23667bc97f26ab0a3755a7602 + manager: conda + name: aws-c-http + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-http-0.7.0-h4f27208_0.conda + version: 0.7.0 - category: main dependencies: brotli: '>=1.0.9,<2.0a0' @@ -6753,14 +7531,14 @@ package: python_abi: 3.10.* *_cp310 tomli: '' hash: - md5: 36133e1893086dd56ded7da7f6b06089 - sha256: 8d287bfe95277b9212605385ccf295bc9de01af2c14e920860decf80fe5b479e + md5: 0499188bb1d22c9c5dc5d2b70c09c8ed + sha256: 15cd3b10205de193c98100f9a484f2233369cf3d75cbeedd34c9ca9e4a506acc manager: conda name: coverage optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/coverage-7.0.1-py310h90acd4f_0.conda - version: 7.0.1 + url: https://conda.anaconda.org/conda-forge/osx-64/coverage-7.1.0-py310h90acd4f_0.conda + version: 7.1.0 - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' @@ -6912,6 +7690,24 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/libblas-3.9.0-16_osx64_openblas.tar.bz2 version: 3.9.0 +- category: main + dependencies: + libabseil: 20220623.0 cxx17* + libcrc32c: '>=1.1.2,<1.2.0a0' + libcurl: '>=7.86.0,<8.0a0' + libcxx: '>=14.0.6' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.10,<3.22.0a0' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: bc12b19b8bddec2202c734c2e5cb3d87 + sha256: a1db3dc6ef6d4eb22f39f137238cd2a39bc395b026abff7d2db8912ff32a19de + manager: conda + name: libgoogle-cloud + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libgoogle-cloud-2.5.0-hb5e37a9_1.conda + version: 2.5.0 - category: main dependencies: python: '' @@ -6943,8 +7739,8 @@ package: - category: main dependencies: freetype: '>=2.12.1,<3.0a0' - jpeg: '>=9e,<10a' lcms2: '>=2.14,<3.0a0' + libjpeg-turbo: '>=2.1.4,<3.0a0' libtiff: '>=4.5.0,<4.6.0a0' libwebp-base: '>=1.2.4,<2.0a0' libxcb: '>=1.13,<1.14.0a0' @@ -6954,32 +7750,32 @@ package: python_abi: 3.10.* *_cp310 tk: '>=8.6.12,<8.7.0a0' hash: - md5: d39d65742de1b5b9ae63506d62f30bde - sha256: c661a6681865f0b482fa5ddec5ceb34157d25f6d5f252e8c24719ebc3d05cc2c + md5: f8c80978311bcc1dbf75229280b7556f + sha256: d5ba77adcded9344e888f751327fb85e99a66f94197a498522ad8514b3c172f0 manager: conda name: pillow optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/pillow-9.2.0-py310h306a057_4.conda - version: 9.2.0 + url: https://conda.anaconda.org/conda-forge/osx-64/pillow-9.4.0-py310hab5364c_0.conda + version: 9.4.0 - category: main dependencies: python: '>=3.7' setuptools: '' wheel: '' hash: - md5: da66f2851b9836d3a7c5190082a45f7d - sha256: 7a86b2427abbf5cf695da192ba1c03130115f157297e7bfde65f0a18a345a7bc + md5: 85b35999162ec95f9f999bac15279c02 + sha256: bbffec284bd0e154363e845121f43007e7e64c80412ff13be21909be907b697d manager: conda name: pip optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/pip-22.3.1-pyhd8ed1ab_0.tar.bz2 - version: 22.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/pip-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' - libpq: 15.1 h3640bf0_2 + libpq: 15.1 h3640bf0_3 libxml2: '>=2.10.3,<2.11.0a0' libzlib: '>=1.2.13,<1.3.0a0' openssl: '>=3.0.7,<4.0a0' @@ -6988,13 +7784,13 @@ package: tzdata: '' zlib: '' hash: - md5: 92e70337e0fdfa816d2a958f307220d9 - sha256: 1e5932a4df047eeadcd0945cfec7586984fb251f3c8f11199df59f5bddbac305 + md5: ac3e012d910a8d9688a6a82aaf2be8ce + sha256: 838e89e47cd33175236b1cc2f3d3104aac9d14553f69812b6d4593febb8319fe manager: conda name: postgresql optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/postgresql-15.1-hbea33b9_2.conda + url: https://conda.anaconda.org/conda-forge/osx-64/postgresql-15.1-hbea33b9_3.conda version: '15.1' - category: main dependencies: @@ -7066,14 +7862,14 @@ package: python: '>=3.8' tomli: '>=1.0.0' hash: - md5: ac82c7aebc282e6ac0450fca012ca78c - sha256: 854233dc2d0d64219b7e951ccf49c1f32332c6fc7085ecb62cc18bc1f4e791b0 + md5: f0be05afc9c9ab45e273c088e00c258b + sha256: d298dfe6c53555c9fb5662f5f936e621cddd3b0a7031789375b82a1ee3b3a96b manager: conda name: pytest optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.0-pyhd8ed1ab_2.tar.bz2 - version: 7.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.1-pyhd8ed1ab_0.conda + version: 7.2.1 - category: main dependencies: python: '>=3.6' @@ -7167,6 +7963,36 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/async-timeout-4.0.2-pyhd8ed1ab_0.tar.bz2 version: 4.0.2 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-sdkutils: '>=0.1.7,<0.1.8.0a0' + hash: + md5: 523fe8df9dbd067b41bfa6c328958e27 + sha256: 570012f24d637fcf81007245cc2df4eb7ea43c26d6457d4d938f00d336974c1e + manager: conda + name: aws-c-auth + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-auth-0.6.21-h268417e_3.conda + version: 0.6.21 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + hash: + md5: 98d1ac78f976f5906e94694012d00803 + sha256: abe5a05090b7c4e98b37e40cfa2ed816bd339795232c7fd2aa78c53e097eb48a + manager: conda + name: aws-c-mqtt + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-mqtt-0.7.13-h32e11d9_12.conda + version: 0.7.13 - category: main dependencies: jinja2: '' @@ -7228,14 +8054,14 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: 7f0791ebd6b9c996fbd16ed1368b7eee - sha256: 16cfba388e76b1b22b31a0f9ec864f5d4e19902f72eb65b29e2eebb2a1bdc9db + md5: fa9ffedc2323bfec787a09e798bb93b7 + sha256: f3dfda5cf3abcdd94d3d7a3b2a88ee4c5a7c22999d00f57f875d8182086b7b8d manager: conda name: cryptography optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/cryptography-38.0.4-py310hdd0c95c_0.conda - version: 38.0.4 + url: https://conda.anaconda.org/conda-forge/osx-64/cryptography-39.0.0-py310hdd0c95c_0.conda + version: 39.0.0 - category: main dependencies: cloudpickle: '>=1.1.1' @@ -7291,14 +8117,14 @@ package: protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' python: '>=3.7' hash: - md5: 35947a7b1f5319de636d74ce38dcf131 - sha256: 88c2be80b3c4ca97f5259b6c6a814b730e6ab4d09c15dbbe60df779c3a7416f9 + md5: cbf8b4569c1d2a0a6077d34a2d38333e + sha256: 1b2a9ae4540e3056a7eaf126a4939360f521854c8a4aa04f10ed4c80da4edc7e manager: conda name: googleapis-common-protos optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.0-pyhd8ed1ab_3.conda - version: 1.57.0 + url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.1-pyhd8ed1ab_0.conda + version: 1.57.1 - category: main dependencies: hdf5: '>=1.12.2,<1.12.3.0a0' @@ -7476,14 +8302,14 @@ package: zlib: '' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: 8518a3ed8dfc2ff49bc5eced01d697b0 - sha256: f852dadc0a128ca7f9e629aa95287c247c8886ada566965a04c2c1ea672676c4 + md5: a10738d4788cf6b0b0d9bff2e324b942 + sha256: 0165e3597571c80b5d50af7917a048ffe70e7419cd91caf4bf69999de5a0e01d manager: conda name: tiledb optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/tiledb-2.13.0-h8b9cbf0_1.conda - version: 2.13.0 + url: https://conda.anaconda.org/conda-forge/osx-64/tiledb-2.13.2-h8b9cbf0_0.conda + version: 2.13.2 - category: main dependencies: aiosignal: '>=1.1.2' @@ -7504,19 +8330,36 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/aiohttp-3.8.3-py310h90acd4f_1.tar.bz2 version: 3.8.3 +- category: main + dependencies: + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + hash: + md5: 923b1931d409ec0fdeef60022744a2ae + sha256: 8c193baaf8a86651c016236a8a627127e7ceee1c9f9a2877b9baa3a7171739a4 + manager: conda + name: aws-c-s3 + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-s3-0.2.3-h9eec77e_0.conda + version: 0.2.3 - category: main dependencies: google-crc32c: '>=1.0,<2.0.0dev' python: '>=3.7' hash: - md5: d8e92214f92379047780fd31bc8b1f94 - sha256: ff44d8c49f39afbcd2840f446a262a28b3f6f232be97604b55439dfed9756e38 + md5: a0d4c902824b3188a61df18c1e8bbf5e + sha256: d997737f75ff1132374f791b267e8a4322652a6b172da885abfc1d4bff18e883 manager: conda name: google-resumable-media optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.0-pyhd8ed1ab_0.tar.bz2 - version: 2.4.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.1-pyhd8ed1ab_0.conda + version: 2.4.1 - category: main dependencies: blosc: '>=1.21.2,<2.0a0' @@ -7535,7 +8378,7 @@ package: lerc: '>=4.0.0,<5.0a0' libcurl: '>=7.87.0,<8.0a0' libcxx: '>=14.0.6' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libiconv: '>=1.17,<2.0a0' libkml: '>=1.3.0,<1.4.0a0' libnetcdf: '>=4.8.1,<4.8.2.0a0' @@ -7554,19 +8397,19 @@ package: poppler: '>=22.12.0,<22.13.0a0' postgresql: '' proj: '>=9.1.0,<9.1.1.0a0' - tiledb: '>=2.13.0,<2.14.0a0' + tiledb: '>=2.13.2,<2.14.0a0' xerces-c: '>=3.2.4,<3.3.0a0' xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: 480278ff024e51c058c7dd60043dfffd - sha256: 58edea4780185d2b44a36932c365e272247a1719af3aa354dacf960d99b41fb5 + md5: 1ae43833d5a0c8d7504a991eb13e2439 + sha256: f077161cbbfe41d687804237adb169949ed911432e4a23786aab1fa017ef1b4a manager: conda name: libgdal optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libgdal-3.6.1-h44a409f_2.conda - version: 3.6.1 + url: https://conda.anaconda.org/conda-forge/osx-64/libgdal-3.6.2-h623d8b8_3.conda + version: 3.6.2 - category: main dependencies: libblas: '>=3.9.0,<4.0a0' @@ -7576,27 +8419,48 @@ package: python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: cc6930f1a95f169e2caedb1b808bf7f7 - sha256: 4318194b73e93e018af16da9dd7f9060e481c6beb3a4894bcfecdce894e95200 + md5: fdc1dca5ea5064c9ea298c08b3dc786d + sha256: 9d2d75d378faba1bb07997fe72acad931afac3ec596638f91e5bc0fade0fccbc manager: conda name: numpy optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/numpy-1.23.5-py310h1b7c290_0.conda - version: 1.23.5 + url: https://conda.anaconda.org/conda-forge/osx-64/numpy-1.24.1-py310h788a5b3_0.conda + version: 1.24.1 - category: main dependencies: - cryptography: '>=38.0.0,<39' + cryptography: '>=38.0.0,<40' python: '>=3.6' hash: - md5: fbfa0a180d48c800f922a10a114a8632 - sha256: 42f04dded77ac2597108378d62b121697d0e982aba7b20a462a7239030563628 + md5: d41957700e83bbb925928764cb7f8878 + sha256: adbf8951f22bfa950b9e24394df1ef1d2b2d7dfb194d91c7f42bc11900695785 manager: conda name: pyopenssl optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-22.1.0-pyhd8ed1ab_0.tar.bz2 - version: 22.1.0 + url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-23.0.0-pyhd8ed1ab_0.conda + version: 23.0.0 +- category: main + dependencies: + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-mqtt: '>=0.7.13,<0.7.14.0a0' + aws-c-s3: '>=0.2.3,<0.2.4.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + libcxx: '>=14.0.6' + hash: + md5: 876c912a99faefc5310605bbf2d135f3 + sha256: 19ab6a18b08756d13be6cc6fb5f1e774f65ff475d7981356341a08d5f85f6149 + manager: conda + name: aws-crt-cpp + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-crt-cpp-0.18.16-hd0be3c5_10.conda + version: 0.18.16 - category: main dependencies: numpy: '>=1.21.6,<2.0a0' @@ -7613,37 +8477,52 @@ package: version: 1.6.2 - category: main dependencies: - libcxx: '>=14.0.4' + libcxx: '>=14.0.6' numpy: '>=1.16' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: b290d056085b3eaca8fbee4d40421d73 - sha256: 4fd2bf004dab27b3d99405bcf67102875c3215a9a65192291c5c1a574cb8901b + md5: bc714cc57ec6422105ed991167987a9d + sha256: 81cc479e4411d5833ed5798145c64490ccee6ad998c0712e57686d369ce90ba7 manager: conda name: contourpy optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/contourpy-1.0.6-py310ha23aa8a_0.tar.bz2 - version: 1.0.6 + url: https://conda.anaconda.org/conda-forge/osx-64/contourpy-1.0.7-py310ha23aa8a_0.conda + version: 1.0.7 - category: main dependencies: hdf5: '>=1.12.2,<1.12.3.0a0' libcxx: '>=14.0.6' - libgdal: 3.6.1 h44a409f_2 + libgdal: 3.6.2 h623d8b8_3 numpy: '>=1.21.6,<2.0a0' openssl: '>=3.0.7,<4.0a0' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 hash: - md5: c387729b468c2ca5f17f3debf17ac94a - sha256: 12bc39b17dd277542a6f8bd7584b12272460663bcc02cb301bd6ab52d3ba348e + md5: eebf4977fcaa1f20f94b55af26110eab + sha256: 043c842b3d96f3aa4ce491f12f8b5510aade83d74566655894f18d81837cfc81 manager: conda name: gdal optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/gdal-3.6.1-py310h5abc6fc_2.conda - version: 3.6.1 + url: https://conda.anaconda.org/conda-forge/osx-64/gdal-3.6.2-py310h5abc6fc_3.conda + version: 3.6.2 +- category: main + dependencies: + libcxx: '>=14.0.4' + numpy: '' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + hash: + md5: 2cc21b703261ca61e03d81ed565207e8 + sha256: 8586a4e6686cc27b143e8c49f91ad2d11c098046f53dc8e09106a6eb22c11e22 + manager: conda + name: h3-py + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/h3-py-3.7.4-py310h7a76584_1.tar.bz2 + version: 3.7.4 - category: main dependencies: blosc: '>=1.21.0,<2.0a0' @@ -7724,14 +8603,14 @@ package: python_abi: 3.10.* *_cp310 pytz: '>=2020.1' hash: - md5: ce0b1639b2084753be9ec8cabb754553 - sha256: a99ba2a03b746d5234ec5ec94401cde8c4a04740af1b0c0f42e273f3e8026c79 + md5: 00e667c3b1935c6c58f0717497fd5f1e + sha256: 1e93c4041ed757cf400a49439d9a11e1d47506c8a1d18d265c000695e39d1cdc manager: conda name: pandas optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/pandas-1.5.2-py310hecf8f37_0.conda - version: 1.5.2 + url: https://conda.anaconda.org/conda-forge/osx-64/pandas-1.5.3-py310hecf8f37_0.conda + version: 1.5.3 - category: main dependencies: numpy: '>=1.19.5,<2.0a0' @@ -7741,31 +8620,11 @@ package: md5: 87fb002f1348c0926fe803f834b0f7cb sha256: d3b7522c49e18af9d21f1f6dd764381c18d34cb3fb640a18d8561343f3ca721a manager: conda - name: pywavelets - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/pywavelets-1.3.0-py39h86b5767_0.tar.bz2 - version: 1.3.0 -- category: main - dependencies: - libblas: '>=3.9.0,<4.0a0' - libcblas: '>=3.9.0,<4.0a0' - libcxx: '>=14.0.4' - libgfortran: 5.* - libgfortran5: '>=11.3.0' - liblapack: '>=3.9.0,<4.0a0' - numpy: '>=1.21.6,<2.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - hash: - md5: 3875711195383daa898dd18c8800f72c - sha256: 9de4fd82cf5aecdd160cc9985242dd11b20caa207d82d4a273d6a71a4d91a22c - manager: conda - name: scipy + name: pywavelets optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/scipy-1.9.3-py310h240c617_2.tar.bz2 - version: 1.9.3 + url: https://conda.anaconda.org/conda-forge/osx-64/pywavelets-1.3.0-py39h86b5767_0.tar.bz2 + version: 1.3.0 - category: main dependencies: geos: '>=3.11.1,<3.11.2.0a0' @@ -7805,14 +8664,47 @@ package: pysocks: '>=1.5.6,<2.0,!=1.5.7' python: <4.0 hash: - md5: 3078ef2359efd6ecadbc7e085c5e0592 - sha256: 992f2d6ca50c98f865a4f2e4bada23f950e39f33ff7c64614a31ee152ec4d5ae + md5: 01f33ad2e0aaf6b5ba4add50dad5ad29 + sha256: f2f09c44e47946ce631dbc9a8a79bb463ac0f4122aaafdbcc51f200a1e420ca6 manager: conda name: urllib3 optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.13-pyhd8ed1ab_0.conda - version: 1.26.13 + url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.14-pyhd8ed1ab_0.conda + version: 1.26.14 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-crt-cpp: '>=0.18.16,<0.18.17.0a0' + libcurl: '>=7.87.0,<8.0a0' + libcxx: '>=14.0.6' + libzlib: '>=1.2.13,<1.3.0a0' + openssl: '>=3.0.7,<4.0a0' + hash: + md5: eb8dbbd85ee46d860423591893af5296 + sha256: 96581d96b1177f11910f5c1a88a53da509db332ec0c7aacbe0eb4875d27c3c18 + manager: conda + name: aws-sdk-cpp + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/aws-sdk-cpp-1.10.57-h19d6a86_1.conda + version: 1.10.57 +- category: main + dependencies: + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + python-dateutil: '>=2.1,<3.0.0' + urllib3: '>=1.25.4,<1.27' + hash: + md5: 766c1b31877841ebd4bbed9274ff81ed + sha256: b03b23c6bef007197788df421ed60d351bb80065c09fcb6c7bdf2884894db226 + manager: conda + name: botocore + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.29.60-pyhd8ed1ab_0.conda + version: 1.29.60 - category: main dependencies: attrs: '>=17' @@ -7821,7 +8713,7 @@ package: cligj: '>=0.5' gdal: '' libcxx: '>=14.0.6' - libgdal: '>=3.6.0,<3.7.0a0' + libgdal: '>=3.6.2,<3.7.0a0' munch: '' numpy: '>=1.21.6,<2.0a0' python: '>=3.10,<3.11.0a0' @@ -7830,14 +8722,14 @@ package: shapely: '' six: '>=1.7' hash: - md5: 34ccda7a32fb6a71ba13228219fd43f1 - sha256: 1782c30d290460df39229f9bfc30c76aeab9d5581fa45d40ad287c54c738e817 + md5: 1a900e5ab730d59d6be5f75d8520c138 + sha256: b3fbabcdbbc19a4e8696c5c2dd442376cb1d30e6d12897a66d6fff43e3897658 manager: conda name: fiona optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/fiona-1.8.22-py310h3963e5c_5.conda - version: 1.8.22 + url: https://conda.anaconda.org/conda-forge/osx-64/fiona-1.9.0-py310h3963e5c_0.conda + version: 1.9.0 - category: main dependencies: packaging: '' @@ -7863,7 +8755,7 @@ package: fonttools: '>=4.22.0' freetype: '>=2.12.1,<3.0a0' kiwisolver: '>=1.0.1' - libcxx: '>=14.0.4' + libcxx: '>=14.0.6' numpy: '>=1.21.6,<2.0a0' packaging: '>=20.0' pillow: '>=6.2.0' @@ -7872,14 +8764,14 @@ package: python-dateutil: '>=2.7' python_abi: 3.10.* *_cp310 hash: - md5: 08b09d49effc05e5b8f7645497d451b8 - sha256: 7ad8ebb5181465d18c0a14759f2a482cadf343ebf26c95f0e39de5fbc0e6e19c + md5: b20bc15f2a391c0d3b8cb50af6277c28 + sha256: c26da38e9e8638ed75d3c992172834b12bf37edeb0209a5980baf06d8ada8bfa manager: conda name: matplotlib-base optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/matplotlib-base-3.6.2-py310he725631_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/osx-64/matplotlib-base-3.6.3-py310he725631_0.conda + version: 3.6.3 - category: main dependencies: cftime: '' @@ -7899,21 +8791,6 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/netcdf4-1.6.2-nompi_py310h6892ea4_100.tar.bz2 version: 1.6.2 -- category: main - dependencies: - numpy: '>=1.4.0' - python: '>=3.6' - scipy: '' - six: '' - hash: - md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 - sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 - manager: conda - name: patsy - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 - version: 0.5.3 - category: main dependencies: affine: '' @@ -7947,34 +8824,14 @@ package: python: '>=3.7,<4.0' urllib3: '>=1.21.1,<1.27' hash: - md5: 089382ee0e2dc2eae33a04cc3c2bddb0 - sha256: b45d0da6774c8231ab4fef0427b3050e7c54c84dfe453143dd4010999c89e050 + md5: 11d178fc55199482ee48d6812ea83983 + sha256: 22c081b4cdd023a514400413f50efdf2c378f56f2a5ea9d65666aacf4696490a manager: conda name: requests optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.1-pyhd8ed1ab_1.tar.bz2 - version: 2.28.1 -- category: main - dependencies: - joblib: '>=1.1.1' - libcblas: '>=3.9.0,<4.0a0' - libcxx: '>=14.0.6' - llvm-openmp: '>=14.0.6' - numpy: '>=1.21.6,<2.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '' - threadpoolctl: '>=2.0.0' - hash: - md5: 0bf9a20847233ba57ea31cba1d115696 - sha256: 6433f1b35613f02cfaad95df05e8eff69f31ca581897715a3685e5fdd8c2a19a - manager: conda - name: scikit-learn - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/scikit-learn-1.2.0-py310hcebe997_0.conda - version: 1.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.2-pyhd8ed1ab_0.conda + version: 2.28.2 - category: main dependencies: imagecodecs: '>=2021.11.20' @@ -7989,27 +8846,6 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/tifffile-2022.3.25-pyhd8ed1ab_0.tar.bz2 version: 2022.3.25 -- category: main - dependencies: - geos: '>=3.11.1,<3.11.2.0a0' - libcxx: '>=14.0.6' - matplotlib-base: '>=3.1' - numpy: '>=1.21.6,<2.0a0' - pyproj: '>=3.0.0' - pyshp: '>=2.1' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '>=0.10' - shapely: '>=1.6.4' - hash: - md5: 23c86535a2835b96243a4ae008e77f9e - sha256: 9d80636aea7c4d5918de8b9d799ffb89f9ede5911610699c033d1a51c1c0c2a5 - manager: conda - name: cartopy - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/cartopy-0.21.1-py310h578c2b2_0.conda - version: 0.21.1 - category: main dependencies: geopy: '' @@ -8058,65 +8894,285 @@ package: rsa: '>=3.1.4,<5' six: '>=1.9.0' hash: - md5: ce0b3b567b3b8f7a3ef5bd43b2fd1a5e - sha256: 5525c0fe34e102d12f66fe96d2bac211cb42332e294718f72c15734a2b618dc4 + md5: 88944e8c28fbd7471213f8b23d40f001 + sha256: d9fbbaf18ca8dff81d004bad336a8cd04be717c8e41cc0ba49c4471f50db9472 manager: conda name: google-auth optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.15.0-pyh1a96a4e_0.conda - version: 2.15.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.16.0-pyh1a96a4e_1.conda + version: 2.16.0 - category: main dependencies: - networkx: '' - numpy: '>=1.3' - pandas: '>=1.0' - python: '>=3.5' - scikit-learn: '' - scipy: '>=1.0' + aws-crt-cpp: '>=0.18.16,<0.18.17.0a0' + aws-sdk-cpp: '>=1.10.57,<1.10.58.0a0' + bzip2: '>=1.0.8,<2.0a0' + c-ares: '>=1.18.1,<2.0a0' + gflags: '>=2.2.2,<2.3.0a0' + glog: '>=0.6.0,<0.7.0a0' + libabseil: 20220623.0 cxx17* + libbrotlicommon: '>=1.0.9,<1.1.0a0' + libbrotlidec: '>=1.0.9,<1.1.0a0' + libbrotlienc: '>=1.0.9,<1.1.0a0' + libcxx: '>=14.0.6' + libgoogle-cloud: '>=2.5.0,<2.5.1.0a0' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.12,<3.22.0a0' + libthrift: '>=0.16.0,<0.16.1.0a0' + libutf8proc: '>=2.8.0,<3.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + lz4-c: '>=1.9.3,<1.10.0a0' + openssl: '>=3.0.7,<4.0a0' + orc: '>=1.8.2,<1.8.3.0a0' + re2: '>=2022.6.1,<2022.6.2.0a0' + snappy: '>=1.1.9,<2.0a0' + zstd: '>=1.5.2,<1.6.0a0' + hash: + md5: 31d88e22bfb8ed80f52a0bad9e6b144b + sha256: 4f5456195a7db06d4d84910a2f3887fffa67fd97191c420d244bca442cfb1259 + manager: conda + name: libarrow + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-10.0.1-h5364784_6_cpu.conda + version: 10.0.1 +- category: main + dependencies: + matplotlib-base: '>=3.6.3,<3.6.4.0a0' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + tornado: '>=5' + hash: + md5: 7fd88cd3e040ac6b5a5dbb1834a207e9 + sha256: 1e7229c9b401b3c38483fe13de5052c8d1f20ac0022921b94b9bef6d7108b188 + manager: conda + name: matplotlib + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/matplotlib-3.6.3-py310h2ec42d9_0.conda + version: 3.6.3 +- category: main + dependencies: + appdirs: '>=1.3.0' + packaging: '>=20.0' + python: '>=3.6' + requests: '>=2.19.0' + hash: + md5: 6429e1d1091c51f626b5dcfdd38bf429 + sha256: 1f0548105de86fb2eb6fbb8d3d6cc2004079b8442d232258108687d6cc91eb73 + manager: conda + name: pooch + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/pooch-1.6.0-pyhd8ed1ab_0.tar.bz2 + version: 1.6.0 +- category: main + dependencies: + affine: <3.0 + cligj: '>=0.4' + fiona: '' + numpy: '>=1.9' + python: '>=3.6' + rasterio: '>=1.0' + shapely: '' + simplejson: '' + hash: + md5: 296927619ac3d70fabab55100430ded7 + sha256: 206b37004dd1e6635fff2c2be2b54230b2f6e10820ee4ba8c1a63e00049c139e + manager: conda + name: rasterstats + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/rasterstats-0.17.0-pyhd8ed1ab_0.tar.bz2 + version: 0.17.0 +- category: main + dependencies: + botocore: '>=1.12.36,<2.0a.0' + python: '>=3.7' + hash: + md5: 900e74d8547fbea3af028937df28ed77 + sha256: 0e459ed32b00e96b62c2ab7e2dba0135c73fd980120fe1a7bd49901f2d50760f + manager: conda + name: s3transfer + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/s3transfer-0.6.0-pyhd8ed1ab_0.tar.bz2 + version: 0.6.0 +- category: main + dependencies: + libarrow: 10.0.1 h5364784_6_cpu + hash: + md5: 21913386290e476e0e99368d8c526945 + sha256: d6932fa96916fa0247bd0427d0082859c93434fa03f3c655fc3ba4a654fd88c4 + manager: conda + name: arrow-cpp + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/arrow-cpp-10.0.1-h694c41f_6_cpu.conda + version: 10.0.1 +- category: main + dependencies: + botocore: '>=1.29.60,<1.30.0' + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + s3transfer: '>=0.6.0,<0.7.0' + hash: + md5: bdb3a067f03eded198b3fdf7c66ba8c3 + sha256: d81ae5f720229e0a81914ace6ea8e741eced99cb72d34d3742461496bec71e12 + manager: conda + name: boto3 + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.26.60-pyhd8ed1ab_0.conda + version: 1.26.60 +- category: main + dependencies: + matplotlib: '>=3.6.2' + numpy: 1.24.1.* + pip: '>=22.3.1' + python: '>=3.9,<3.11' + serapeum_utils: '>=0.1.1' + hash: + md5: 32cd6422f2e5730a7b5cc9f8c41f968d + sha256: 0bbe8125ef7303883f96239af0973784337c4fc3fa901a052616f71e7b4a6756 + manager: conda + name: cleopatra + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.7-pyhd8ed1ab_0.conda + version: 0.2.7 +- category: main + dependencies: + google-auth: '>=2.14.1,<3.0dev' + googleapis-common-protos: '>=1.56.2,<2.0dev' + protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' + python: '>=3.7' + requests: '>=2.18.0,<3.0.0dev' + hash: + md5: 72f60923cfbd91eec24e59c41454cecd + sha256: d5130c0b82d1801d6ec80e6f79a4e603325ed7f462874f43927b4419baa91ae4 + manager: conda + name: google-api-core + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/google-api-core-2.11.0-pyhd8ed1ab_0.conda + version: 2.11.0 +- category: main + dependencies: + google-auth: '' + httplib2: '>=0.15.0' + python: '>=3.6' + six: '' + hash: + md5: 829c632fd23d1d4dd0adeb461a4e6a13 + sha256: c637a9f3c45d1e1b09cc70eb5b5063a3cbd8cc7a7a8512b9b3f464abb748d4bf + manager: conda + name: google-auth-httplib2 + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/google-auth-httplib2-0.1.0-pyhd8ed1ab_1.tar.bz2 + version: 0.1.0 +- category: main + dependencies: + libblas: '>=3.9.0,<4.0a0' + libcblas: '>=3.9.0,<4.0a0' + libcxx: '>=14.0.6' + libgfortran: 5.* + libgfortran5: '>=11.3.0' + liblapack: '>=3.9.0,<4.0a0' + numpy: '>=1.21.6,<2.0a0' + pooch: '' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + hash: + md5: 0f05f17c0326731072325ed0f98cc60a + sha256: e5d350b012fa9386f2eec27630ab68a96da44ffdec2d43c745dea2ac5efa5a85 + manager: conda + name: scipy + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/scipy-1.10.0-py310h240c617_0.conda + version: 1.10.0 +- category: main + dependencies: + geos: '>=3.11.1,<3.11.2.0a0' + libcxx: '>=14.0.6' + matplotlib-base: '>=3.1' + numpy: '>=1.21.6,<2.0a0' + pyproj: '>=3.0.0' + pyshp: '>=2.1' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '>=0.10' + shapely: '>=1.6.4' + hash: + md5: 23c86535a2835b96243a4ae008e77f9e + sha256: 9d80636aea7c4d5918de8b9d799ffb89f9ede5911610699c033d1a51c1c0c2a5 + manager: conda + name: cartopy + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/cartopy-0.21.1-py310h578c2b2_0.conda + version: 0.21.1 +- category: main + dependencies: + google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' + google-auth: '>=1.19.0,<3.0.0dev' + google-auth-httplib2: '>=0.1.0' + httplib2: '>=0.15.0,<1dev' + python: '>=3.7' + uritemplate: '>=3.0.1,<5' + hash: + md5: 2fba68326c4a5f7308ae42725253d015 + sha256: 07590faf8e2b3939ff11026d71fc48c909b99995c00eea59f2de5734ee127773 + manager: conda + name: google-api-python-client + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.74.0-pyhd8ed1ab_0.conda + version: 2.74.0 +- category: main + dependencies: + google-api-core: '>=1.31.6,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' + google-auth: '>=1.25.0,<3.0dev' + grpcio: '>=1.38.0,<2.0.0dev' + python: '>=3.7' hash: - md5: 908bbfb54da154042c5cbda77b37a3d1 - sha256: 1435305fb0a127b3154e76c0836d44526eeb93e80bd37596128d7ad8fb196d97 + md5: 7a590deea6b9b082c6a24e18c3c83dc9 + sha256: 00fadd9a9425b3062fdf2476ff620aef5c0c0c5bf6b27bf23f7c0cda77f1b240 manager: conda - name: mapclassify + name: google-cloud-core optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.4.3-pyhd8ed1ab_0.tar.bz2 - version: 2.4.3 + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-core-2.3.2-pyhd8ed1ab_0.tar.bz2 + version: 2.3.2 - category: main dependencies: - matplotlib-base: '>=3.6.2,<3.6.3.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - tornado: '>=5' + arrow-cpp: '>=0.11.0' hash: - md5: 6e03d5c48a5535551daa8ab9e47de708 - sha256: bb115279d61c3912da8b5917b550dcfa433195e447288342038eeb8c9cede7aa + md5: 79a5f78c42817594ae016a7896521a97 + sha256: 15e50657515b791734ba045da5135377404ca37c518b2066b9c6451c65cd732e manager: conda - name: matplotlib + name: parquet-cpp optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/matplotlib-3.6.2-py310h2ec42d9_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/noarch/parquet-cpp-1.5.1-2.tar.bz2 + version: 1.5.1 - category: main dependencies: - affine: <3.0 - cligj: '>=0.4' - fiona: '' - numpy: '>=1.9' + numpy: '>=1.4.0' python: '>=3.6' - rasterio: '>=1.0' - shapely: '' - simplejson: '' + scipy: '' + six: '' hash: - md5: 296927619ac3d70fabab55100430ded7 - sha256: 206b37004dd1e6635fff2c2be2b54230b2f6e10820ee4ba8c1a63e00049c139e + md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 + sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 manager: conda - name: rasterstats + name: patsy optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/rasterstats-0.17.0-pyhd8ed1ab_0.tar.bz2 - version: 0.17.0 + url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 + version: 0.5.3 - category: main dependencies: cloudpickle: '>=0.2.1' @@ -8143,6 +9199,26 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/osx-64/scikit-image-0.19.2-py39h4d6be9b_0.tar.bz2 version: 0.19.2 +- category: main + dependencies: + joblib: '>=1.1.1' + libcblas: '>=3.9.0,<4.0a0' + libcxx: '>=14.0.6' + llvm-openmp: '>=14.0.6' + numpy: '>=1.21.6,<2.0a0' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '' + threadpoolctl: '>=2.0.0' + hash: + md5: bf0b2deb9b519872e481020b20883c8c + sha256: cbf6d6df338fd7dc4bd76b77a7f07b2aeb9e1ee7291061200e14a1c01289be7f + manager: conda + name: scikit-learn + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/scikit-learn-1.2.1-py310hcebe997_0.conda + version: 1.2.1 - category: main dependencies: matplotlib-base: '>=3.1,!=3.6.1' @@ -8160,6 +9236,78 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/seaborn-base-0.12.2-pyhd8ed1ab_0.conda version: 0.12.2 +- category: main + dependencies: + google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' + google-auth: '>=1.25.0,<3.0dev' + google-cloud-core: '>=2.3.0,<3.0dev' + google-resumable-media: '>=2.3.2' + protobuf: <5.0.0dev + python: '>=3.6' + requests: '>=2.18.0,<3.0.0dev' + hash: + md5: b6073f255f6fb03c9248fef84715a74e + sha256: 66e054fae25b2e2400e0394b9982f2ed4bdb1f6ebada1ac81745c2a02335e278 + manager: conda + name: google-cloud-storage + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-storage-2.7.0-pyh1a96a4e_0.conda + version: 2.7.0 +- category: main + dependencies: + networkx: '' + numpy: '>=1.3' + pandas: '>=1.0' + python: '>=3.6' + scikit-learn: '' + scipy: '>=1.0' + hash: + md5: db1aeaff6e248db425e049feffded7a9 + sha256: 78aadbd9953976678b6e3298ac26a63cf9390a8794db3ff71f3fe5b6d13a35ca + manager: conda + name: mapclassify + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.5.0-pyhd8ed1ab_1.conda + version: 2.5.0 +- category: main + dependencies: + gflags: '>=2.2.2,<2.3.0a0' + libarrow: 10.0.1 h5364784_6_cpu + libcxx: '>=14.0.6' + numpy: '>=1.21.6,<2.0a0' + parquet-cpp: 1.5.1.* + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + hash: + md5: 5712d23b59b26f4a24594d03b4976278 + sha256: 16f35cbc8e80335ee5c1b33e9f52be88f1e441d3429af2fd34fa4b492b5ff47d + manager: conda + name: pyarrow + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/osx-64/pyarrow-10.0.1-py310h435aefc_6_cpu.conda + version: 10.0.1 +- category: main + dependencies: + loguru: '>=0.6.0' + matplotlib: '>=3.6.3' + numpy: 1.24.1 + pandas: '>=1.5.3' + pip: '>=22.3.1' + python: '>=3.9,<3.15' + scikit-learn: '>=1.2.1' + scipy: '>=1.9.0' + hash: + md5: 75342a0eb483db01f3b0e9dd3c607815 + sha256: dc9cffa56c55a453d56b4fc1b2f057dde4a0f7dd06c4e8376de75b23e2e5cc5e + manager: conda + name: statista + optional: false + platform: osx-64 + url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.8-pyhd8ed1ab_0.conda + version: 0.1.8 - category: main dependencies: numpy: '>=1.21.6,<2.0a0' @@ -8180,19 +9328,25 @@ package: version: 0.13.5 - category: main dependencies: - matplotlib: '>=3.5.3' - numpy: 1.23.5.* - pip: '>=22.3.1' - python: '>=3.9,<3.12' + future: '' + google-api-python-client: '>=1.12.1' + google-auth: '>=1.4.1' + google-auth-httplib2: '>=0.0.3' + google-cloud-storage: '' + httplib2: '>=0.9.2,<1dev' + python: '>=3.6' + requests: '' + setuptools: '' + six: '' hash: - md5: 2e7d2a8e819e60c8a4c5ddbb07a6873a - sha256: 624f6ccb286c5fe0d04c5e924ea298c48f3ce5b4394ceb635b1ff1ebfc7cae71 + md5: 08b32b1b1369cff511af1592403bb2af + sha256: 9590c61b4613c52412cb16dcff9a3eca6cc2be685eb45fbba49803f6c552df30 manager: conda - name: cleopatra + name: earthengine-api optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.4-pyhd8ed1ab_0.conda - version: 0.2.4 + url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.338-pyhd8ed1ab_0.conda + version: 0.1.338 - category: main dependencies: fiona: '' @@ -8212,56 +9366,6 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/geopandas-0.12.2-pyhd8ed1ab_0.conda version: 0.12.2 -- category: main - dependencies: - google-auth: '>=2.14.1,<3.0dev' - googleapis-common-protos: '>=1.56.2,<2.0dev' - protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' - python: '>=3.7' - requests: '>=2.18.0,<3.0.0dev' - hash: - md5: 72f60923cfbd91eec24e59c41454cecd - sha256: d5130c0b82d1801d6ec80e6f79a4e603325ed7f462874f43927b4419baa91ae4 - manager: conda - name: google-api-core - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-api-core-2.11.0-pyhd8ed1ab_0.conda - version: 2.11.0 -- category: main - dependencies: - google-auth: '' - httplib2: '>=0.15.0' - python: '>=3.6' - six: '' - hash: - md5: 829c632fd23d1d4dd0adeb461a4e6a13 - sha256: c637a9f3c45d1e1b09cc70eb5b5063a3cbd8cc7a7a8512b9b3f464abb748d4bf - manager: conda - name: google-auth-httplib2 - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-auth-httplib2-0.1.0-pyhd8ed1ab_1.tar.bz2 - version: 0.1.0 -- category: main - dependencies: - loguru: '>=0.6.0' - matplotlib: '>=3.5.3' - numpy: 1.23.5 - pandas: '>=1.4.4' - pip: '>=22.3.1' - python: '>=3.9,<3.15' - scikit-learn: '>=1.1.1' - scipy: '>=1.9.0' - hash: - md5: 5f4118cfa89158b5b58bac3ac5495cba - sha256: 755f6d603c37c3dfcb48bf1379e81178474fdeec94eabc73af5825f96e8a3e4e - manager: conda - name: statista - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 - category: main dependencies: cartopy: '' @@ -8281,163 +9385,99 @@ package: platform: osx-64 url: https://conda.anaconda.org/conda-forge/noarch/geoplot-0.5.1-pyhd8ed1ab_0.tar.bz2 version: 0.5.1 -- category: main - dependencies: - google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' - google-auth: '>=1.19.0,<3.0.0dev' - google-auth-httplib2: '>=0.1.0' - httplib2: '>=0.15.0,<1dev' - python: '>=3.7' - uritemplate: '>=3.0.1,<5' - hash: - md5: 04241ec803212136585c4e7738de8543 - sha256: 59d5c1e9afce9be9042900e10ffa804bbe68fb1331fed2ace5d15ce461f83b87 - manager: conda - name: google-api-python-client - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.70.0-pyhd8ed1ab_0.conda - version: 2.70.0 -- category: main - dependencies: - google-api-core: '>=1.31.6,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' - google-auth: '>=1.25.0,<3.0dev' - grpcio: '>=1.38.0,<2.0.0dev' - python: '>=3.7' - hash: - md5: 7a590deea6b9b082c6a24e18c3c83dc9 - sha256: 00fadd9a9425b3062fdf2476ff620aef5c0c0c5bf6b27bf23f7c0cda77f1b240 - manager: conda - name: google-cloud-core - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-core-2.3.2-pyhd8ed1ab_0.tar.bz2 - version: 2.3.2 - category: main dependencies: affine: '>=2.3.1' gdal: '>=3.5.3' geopandas: '>=0.12.2' geopy: '>=2.2.0' + h3-py: '>=3.7.4' loguru: '>=0.6.0' netcdf4: '>=1.6.1' - numpy: 1.23.5 + numpy: 1.24.1 pandas: '>=1.4.4' pip: '>=22.3.1' + pyarrow: '>=10.0.1' pyproj: '>=3.4.0' python: '>=3.9,<3.11' + pyyaml: '>=6.0' rasterio: '>=1.3.0' requests: '>=2.28.1' rtree: '>=1.0.0' shapely: '>=1.8.4,<2' hash: - md5: 4f85b9d893e953d3f330a705bb64dc75 - sha256: e878e39e30761723e4b5f4c66fc250c00026e2ed62823f89cb72e3edc375fdeb + md5: e91e293e82bd2fc7da52944256d6d4e6 + sha256: 947c0bf9ec7c366299c57f8b4cfcdc1320168bd6bfc6ce15921a9f11af821c21 manager: conda name: pyramids optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.2.11-pyhd8ed1ab_0.conda - version: 0.2.11 + url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.3.2-pyhd8ed1ab_0.conda + version: 0.3.2 - category: main dependencies: - cleopatra: '>=0.2.4' + cleopatra: '>=0.2.7' gdal: '>=3.5.3' geopandas: '>=0.12.2' geoplot: '>=0.5.1' loguru: '>=0.6.0' - numpy: 1.23.5 + numpy: '>=1.24.1' pip: '>=22.3.1' - pyramids: 0.2.11 + pyramids: '>=0.3.2' python: '>=3.9,<3.11' hash: - md5: 6b63a9d1e41e7fb8a9181435b9cb89c5 - sha256: 50b88c8f546f9f9d860ca09316a89d6d5d33d2e815f9f8175c07eaa6dfeb8ea0 + md5: bf10a9e624caf50d57e643a73f91a70c + sha256: 8fe74e096a93b016a5dff3e852ac3079d74a6f7e2d21bf4b05a347e9bcbe61fa manager: conda name: digitalearth optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.10-pyhd8ed1ab_0.conda - version: 0.1.10 + url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.11-pyhd8ed1ab_0.conda + version: 0.1.11 - category: main dependencies: + boto3: '>=1.26.50' + earthengine-api: '>=0.1.324' + ecmwf-api-client: '>=1.6.3' + gdal: '>=3.5.3' + joblib: '>=1.2.0' loguru: '>=0.6.0' - numpy: 1.23.5 + netcdf4: '>=1.6.1' + numpy: 1.24.1 + pandas: '>=1.4.4' + pathlib: '>=1.0.1' pip: '>=22.3.1' - pyramids: '>=0.2.11' + pyramids: '>=0.3.2' python: '>=3.9,<3.11' - statista: '>=0.1.7' - hash: - md5: 679b3be1963e704ad7e5a4d45c248c91 - sha256: 6e4f2d622558a179e0c9e0d9ff3faf606ed19e4b22976ef3fec2d869d314c3ed - manager: conda - name: geostatista - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.5-pyhd8ed1ab_0.conda - version: 0.1.5 -- category: main - dependencies: - google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' - google-auth: '>=1.25.0,<3.0dev' - google-cloud-core: '>=2.3.0,<3.0dev' - google-resumable-media: '>=2.3.2' - protobuf: <5.0.0dev - python: '>=3.6' - requests: '>=2.18.0,<3.0.0dev' - hash: - md5: b6073f255f6fb03c9248fef84715a74e - sha256: 66e054fae25b2e2400e0394b9982f2ed4bdb1f6ebada1ac81745c2a02335e278 - manager: conda - name: google-cloud-storage - optional: false - platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-storage-2.7.0-pyh1a96a4e_0.conda - version: 2.7.0 -- category: main - dependencies: - future: '' - google-api-python-client: '>=1.12.1' - google-auth: '>=1.4.1' - google-auth-httplib2: '>=0.0.3' - google-cloud-storage: '' - httplib2: '>=0.9.2,<1dev' - python: '>=3.6' - requests: '' - setuptools: '' - six: '' + pyyaml: '>=6.0' + requests: '>=2.28.1' + serapeum_utils: '>=0.1.1' hash: - md5: 30e3d2c755cf9c0c0483f01ab25c7e59 - sha256: d498cf74bfa54861cc9a45b812d3321d0f482a62025c7abf69df914c94a3c3a9 + md5: 65d70fa30a34f9cbdc9dd130017310af + sha256: d75791533e3ea957c049ff3be8aae020003730d12216d149b6f086d2da66d8db manager: conda - name: earthengine-api + name: earth2observe optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.334-pyhd8ed1ab_1.conda - version: 0.1.334 + url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.2.2-pyhd8ed1ab_0.conda + version: 0.2.2 - category: main dependencies: - earthengine-api: '>=0.1.324' - ecmwf-api-client: '>=1.6.3' - gdal: '>=3.5.3' - joblib: '>=1.2.0' loguru: '>=0.6.0' - netcdf4: '>=1.6.1' - numpy: 1.23.5 - pandas: '>=1.4.4' + numpy: '>=1.24.1' pip: '>=22.3.1' - pyramids: '>=0.2.11' + pyramids: '>=0.3.2' python: '>=3.9,<3.11' - requests: '>=2.28.1' + statista: '>=0.1.8' hash: - md5: 51d9c69a0c90519b0dda07828319af81 - sha256: 3c4cd924212bac400b9bfafa36ed729240a47eafc01b2c9d251d37012552e3ef + md5: 7e2419cadcd4ecd1b77855ff3b47634c + sha256: a7ea64da036da19c224d8e61efdf862ae91b932501dad7297d4a364b3329c80a manager: conda - name: earth2observe + name: geostatista optional: false platform: osx-64 - url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 + url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.6-pyhd8ed1ab_0.conda + version: 0.1.6 - category: main dependencies: {} hash: @@ -8710,14 +9750,14 @@ package: dependencies: ucrt: '>=10.0.20348.0' hash: - md5: c98b6e39006315599b793592bcc3c978 - sha256: 6b6feb349d3414655c2f9c549092689e5f99b487ff7ed9c1f1fda69a5dd4a624 + md5: 25640086ba777e79e5d233d079d7c5fc + sha256: 3a23d4c98bdb87b06bd8af9e42eea34c39a9da52c3dd96ace706234c55422f2d manager: conda name: vs2015_runtime optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.32.31332-h1d6e394_9.tar.bz2 - version: 14.32.31332 + url: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.34.31931-h4c5c07a_10.conda + version: 14.34.31931 - category: main dependencies: fonts-conda-forge: '' @@ -8746,29 +9786,43 @@ package: version: 5.3.0 - category: main dependencies: - vs2015_runtime: '>=14.32.31332' + vs2015_runtime: '>=14.34.31931' hash: - md5: ba28983ef4f6d430827d0e7c5cdd7b48 - sha256: 1ca9e60e4e977be81dbee0ed236c6fb0a459c32957d2c2dc45a114d83f4c70d6 + md5: 52d246d8d14b83c516229be5bb03a163 + sha256: 05d5ae5859e8d097559f5445ffbaeac638c9875e4d2a0c5fd8c4bb1c010d35c1 manager: conda name: vc optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h3d8a991_9.tar.bz2 + url: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-hb6edc58_10.conda version: '14.3' - category: main dependencies: vc: '>=14.1,<15' vs2015_runtime: '>=14.16.27033' hash: - md5: 9cee475625229f107d7e16bfb039d8d9 - sha256: 2d27df13ef1c4f53750981b98f27ade919628c89f99ee03c5928d8091f8f777d + md5: 9cee475625229f107d7e16bfb039d8d9 + sha256: 2d27df13ef1c4f53750981b98f27ade919628c89f99ee03c5928d8091f8f777d + manager: conda + name: aom + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aom-3.3.0-h0e60522_1.tar.bz2 + version: 3.3.0 +- category: main + dependencies: + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 4e7f266991dfc670cb416f5e2a5b8824 + sha256: 5a975abf17ecf11f6cd8b8fb0d46add0990a752c2c74db531400d4242e6e4139 manager: conda - name: aom + name: aws-c-common optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/aom-3.3.0-h0e60522_1.tar.bz2 - version: 3.3.0 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.8.5-hcfcfb64_0.tar.bz2 + version: 0.8.5 - category: main dependencies: vc: '>=14.1,<15.0a0' @@ -8836,6 +9890,19 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/geos-3.11.1-h1537add_0.tar.bz2 version: 3.11.1 +- category: main + dependencies: + vc: '>=14.1,<15.0a0' + vs2015_runtime: '>=14.16.27012' + hash: + md5: e9442160f56fa442d4ff3eb2e4cf0f22 + sha256: d2dbb918efa9c89ead501347cce753bdbac3f5426d42ae5f48eee73790757f54 + manager: conda + name: gflags + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/gflags-2.2.2-ha925a31_1004.tar.bz2 + version: 2.2.2 - category: main dependencies: vc: '>=14.1,<15.0a0' @@ -8930,16 +9997,17 @@ package: version: '20220623.0' - category: main dependencies: - vc: '>=14.1,<15.0a0' - vs2015_runtime: '>=14.16.27012' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' hash: - md5: ac78b243f1ee03a2412b6e328aa3a12d - sha256: 47f2ef0486d690b2bc34035a165a86c1edcc18d48f1f8aa8eead594afa0dfebf + md5: f98474a8245f55f4a273889dbe7bf193 + sha256: 441f580f90279bd62bd27fb82d0bbbb2c2d9f850fcc4c8781f199c5287cd1499 manager: conda name: libaec optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libaec-1.0.6-h39d44d4_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/win-64/libaec-1.0.6-h63175ca_1.conda version: 1.0.6 - category: main dependencies: @@ -8970,17 +10038,18 @@ package: version: 1.1.2 - category: main dependencies: + ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 4366e00d3270eb229c026920474a6dda - sha256: c8b156fc81006234cf898f933b06bed8bb475970cb7983d0eceaf90db65beb8b + md5: ae9dfb57bcb42093a2417aceabb530f7 + sha256: 76e642ca8a11da1b537506447f8089353b6607956c069c938a4bec4de36e1194 manager: conda name: libdeflate optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.14-hcfcfb64_0.tar.bz2 - version: '1.14' + url: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.17-hcfcfb64_0.conda + version: '1.17' - category: main dependencies: vc: '>=14.1,<15.0a0' @@ -9007,6 +10076,19 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/libiconv-1.17-h8ffe710_0.tar.bz2 version: '1.17' +- category: main + dependencies: + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 24bf30c2957c1bf33b3e1131a88ae17d + sha256: ed081ab5e51b516723f30b49a84f1b5bb201d46d6da58de0ab435df3f01d4eb0 + manager: conda + name: libjpeg-turbo + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/libjpeg-turbo-2.1.4-hcfcfb64_0.tar.bz2 + version: 2.1.4 - category: main dependencies: vc: '>=14.1,<15.0a0' @@ -9047,6 +10129,20 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.40.0-hcfcfb64_0.tar.bz2 version: 3.40.0 +- category: main + dependencies: + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 076894846fe9f068f91c57d158c90cba + sha256: 6efa83e3f2fb9acaf096a18d21d0f679d110934798348c5defc780d4b759a76c + manager: conda + name: libutf8proc + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/libutf8proc-2.8.0-h82a8f57_0.tar.bz2 + version: 2.8.0 - category: main dependencies: vc: '>=14.1,<15' @@ -9089,17 +10185,18 @@ package: version: 1.0.3 - category: main dependencies: - vc: '>=14.1,<15.0a0' - vs2015_runtime: '>=14.16.27012' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' hash: - md5: d12763533276560a931c1bd3df1adf63 - sha256: 1d4b25978dc5f158d235908f1fd541116e9db3f31229bda665c4d3ff6b3979f8 + md5: e34720eb20a33fc3bfb8451dd837ab7a + sha256: a0954b4b1590735ea5f3d0f4579c3883f8ac837387afd5b398b241fda85124ab manager: conda name: lz4-c optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/lz4-c-1.9.3-h8ffe710_1.tar.bz2 - version: 1.9.3 + url: https://conda.anaconda.org/conda-forge/win-64/lz4-c-1.9.4-hcfcfb64_0.conda + version: 1.9.4 - category: main dependencies: m2w64-gcc-libs-core: '' @@ -9120,13 +10217,13 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: e48b661f57b25ddf34996fa8b685182d - sha256: 3f26c38e6167d7351de5bdcf13d1456891c242711e689999c25e50e5898bb660 + md5: b41e56cb0e3c892e1fde259122e825f2 + sha256: 9322b139708b58e796020a3de88e8c087df90828b70a1c04abaeb6a971e804a6 manager: conda name: openssl optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/openssl-3.0.7-hcfcfb64_1.conda + url: https://conda.anaconda.org/conda-forge/win-64/openssl-3.0.7-hcfcfb64_2.conda version: 3.0.7 - category: main dependencies: @@ -9259,6 +10356,67 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/zfp-0.5.5-h0e60522_8.tar.bz2 version: 0.5.5 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + openssl: '>=3.0.7,<4.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 0ef44c1017a1b17abe25f9d8f920658c + sha256: d1a2d374adaa43ae4418a8519d0aba1dc6855082098b94304822155dc5eace31 + manager: conda + name: aws-c-cal + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.5.20-h4b5e85a_3.tar.bz2 + version: 0.5.20 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 287574392044166f053bd9d1922e84a6 + sha256: 35532982d2dfd4affa09fac3c5e8c92e20b385e067e68b3e57d0bf7150ad0785 + manager: conda + name: aws-c-compression + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.2.16-h3dc32ea_0.tar.bz2 + version: 0.2.16 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 51e4ad8350c10ade1a16fdbed959ed7a + sha256: 785f07efea8573e7d61add82894a62f16ffc895f0ae2912e4318048662390b03 + manager: conda + name: aws-c-sdkutils + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.1.7-h3dc32ea_0.tar.bz2 + version: 0.1.7 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 58223bc85fd4ac4c7cbf607a079b3d2b + sha256: 19371ba7428da8dee2124550d65fad4391ac63a1da78d89cbef6b9305b61d60e + manager: conda + name: aws-checksums + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.1.14-h3dc32ea_0.conda + version: 0.1.14 - category: main dependencies: libiconv: '>=1.17,<2.0.0a0' @@ -9285,6 +10443,20 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/gettext-0.21.1-h5728263_0.tar.bz2 version: 0.21.1 +- category: main + dependencies: + gflags: '>=2.2.2,<2.3.0a0' + vc: '>=14.1,<15' + vs2015_runtime: '>=14.16.27033' + hash: + md5: fdc11ab9a621f009995e89f52bea3004 + sha256: 482167f378c66ecad9debf13e642013617931fc80971fb6e7d225493dbbfb90b + manager: conda + name: glog + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/glog-0.6.0-h4797de2_0.tar.bz2 + version: 0.6.0 - category: main dependencies: openssl: '>=3.0.7,<4.0a0' @@ -9351,14 +10523,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 393d6ec76308c7124568dd595f95d432 - sha256: fd54f65e225fb545ef373915073097beebfa81aaf5dc8efdf47ee45b686bd2ae + md5: fcc96fd81e1cbff83d87e7c641fecbcf + sha256: 6cb5c5ce662b185825603639cb5fae0d539f861117be6e681b8fc4358f67d59a manager: conda name: libclang13 optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libclang13-15.0.6-default_h77d9078_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/win-64/libclang13-15.0.7-default_h77d9078_0.conda + version: 15.0.7 - category: main dependencies: libzlib: '>=1.2.13,<1.3.0a0' @@ -9419,6 +10591,21 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.10.0-h9a1e1f7_3.tar.bz2 version: 1.10.0 +- category: main + dependencies: + libzlib: '>=1.2.12,<1.3.0a0' + openssl: '>=3.0.5,<4.0a0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 1578fe99150f62ae86e77793cb57e312 + sha256: b01bb2ca62f9b83d7fdda264747b3adaa562ffb3e7c00e63f14f9e877b068bea + manager: conda + name: libthrift + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/libthrift-0.16.0-h9ce19ad_2.tar.bz2 + version: 0.16.0 - category: main dependencies: libogg: '>=1.3.4,<1.4.0a0' @@ -9562,31 +10749,43 @@ package: version: 1.2.13 - category: main dependencies: - libzlib: '>=1.2.12,<1.3.0a0' - ucrt: '' - vc: '>=14.1,<15' - vs2015_runtime: '>=14.16.27033' + libzlib: '>=1.2.13,<1.3.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' hash: - md5: 13acb3626fcc8c0577249f3a7b6129f4 - sha256: 109f83494b8bc82d1c41eea92a3cf8303a151674b623df08cc85ca54061cb008 + md5: 62826565682d013b3e2346aaf7bded0e + sha256: ef23b2eb748b0b2139755e5a20d49a642340af1313017918dc91b4a4ce8f3bd9 manager: conda name: zstd optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.2-h7755175_4.tar.bz2 + url: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.2-h12be248_6.conda version: 1.5.2 - category: main dependencies: - python: '>=3.6' + python: '>=3.7' hash: - md5: 466dc5c1b75c93180efbd81d99dc29b0 - sha256: f3d58687fb000acc5d5f773d6e633ffb382575895abbc8db3d9b8e3996b05d39 + md5: ae5f4ad87126c55ba3f690ef07f81d64 + sha256: fbf0288cae7c6e5005280436ff73c95a36c5a4c978ba50175cc8e3eb22abc5f9 manager: conda name: affine optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/affine-2.3.1-pyhd8ed1ab_0.tar.bz2 - version: 2.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/affine-2.4.0-pyhd8ed1ab_0.conda + version: 2.4.0 +- category: main + dependencies: + python: '' + hash: + md5: 5f095bc6454094e96f146491fd03633b + sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111 + manager: conda + name: appdirs + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2 + version: 1.4.4 - category: main dependencies: python: '>=3.5' @@ -9599,6 +10798,22 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/attrs-22.2.0-pyh71513ae_0.conda version: 22.2.0 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: e53091607e264333f50e5bde582da2b7 + sha256: e7490bc206880fdd42f2b074be3aa181c18656b06f3f10faa5d46c729ea403a4 + manager: conda + name: aws-c-io + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.13.12-hea55e33_1.conda + version: 0.13.12 - category: main dependencies: libzlib: '>=1.2.13,<1.3.0a0' @@ -9668,14 +10883,14 @@ package: dependencies: python: '>=3.7' hash: - md5: c6653a1ed0c4a48ace64ab68a0bf9b27 - sha256: ae9d26949fcf8130d899e6bc22ed8afab40adcee782d79e0d82e0799960785af + md5: fd006afc4115740d8d52887ee813f262 + sha256: d17f6b5ae744e64a337c9dbad21b8d501916eaf0e55564dc81c78c492783d73a manager: conda name: cachetools optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.2.0-pyhd8ed1ab_0.tar.bz2 - version: 5.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.0-pyhd8ed1ab_0.conda + version: 5.3.0 - category: main dependencies: python: '>=3.7' @@ -9807,14 +11022,14 @@ package: dependencies: python: '>=3.8' hash: - md5: a6966947ba28bbe60f9904653da7fed5 - sha256: 286667d325d52cd866a410da18da5660eb8bcde10dd6eae90403fa462152eff6 + md5: fec8329fc739090f26a7d7803db254f1 + sha256: b3d34bf4924cb80363c1ab57ac821393f118ffaa94f05368bf4044941163b65e manager: conda name: future optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.2-pyhd8ed1ab_6.tar.bz2 - version: 0.18.2 + url: https://conda.anaconda.org/conda-forge/noarch/future-0.18.3-pyhd8ed1ab_0.conda + version: 0.18.3 - category: main dependencies: python: '>=3.6' @@ -9858,16 +11073,28 @@ package: version: '3.4' - category: main dependencies: - python: '' + python: '>=3.7' hash: - md5: 39161f81cc5e5ca45b8226fbb06c6905 - sha256: 9423ded508ebda87dae21d7876134e406ffeb88e6059f3fe1a909d180c351959 + md5: f800d2da156d08e289b14e87e43c1ae5 + sha256: 38740c939b668b36a50ef455b077e8015b8c9cf89860d421b3fff86048f49666 manager: conda name: iniconfig optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-1.1.1-pyh9f0ad1d_0.tar.bz2 - version: 1.1.1 + url: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.0.0-pyhd8ed1ab_0.conda + version: 2.0.0 +- category: main + dependencies: + python: '>=3.7' + hash: + md5: 2cfa3e1cf3fb51bb9b17acc5b5e9ea11 + sha256: 95ac5f9ee95fd4e34dc051746fc86016d3d4f6abefed113e2ede049d59ec2991 + manager: conda + name: jmespath + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/jmespath-1.0.1-pyhd8ed1ab_0.tar.bz2 + version: 1.0.1 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -9886,20 +11113,20 @@ package: version: 1.4.4 - category: main dependencies: - libclang13: 15.0.6 default_h77d9078_0 + libclang13: 15.0.7 default_h77d9078_0 libzlib: '>=1.2.13,<1.3.0a0' ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: aa7825a9f921f0cbc96b9d30e71389fc - sha256: 8c86c3e3e0cdd70d77571018c9e60f27fbb351bb6f18e2d306336fed0b24dcd1 + md5: af562fa0445ab8a9cca5830c24113150 + sha256: 56fa24efd4a0d201b28911ffae11221cc8a93337a2e868ca5587a59ecb78bc08 manager: conda name: libclang optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libclang-15.0.6-default_h77d9078_0.conda - version: 15.0.6 + url: https://conda.anaconda.org/conda-forge/win-64/libclang-15.0.7-default_h77d9078_0.conda + version: 15.0.7 - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' @@ -9981,19 +11208,19 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: f8d4199ae61ff1648ab0477aef3a7030 - sha256: 5b05658f836a6700442b523119f899ac8386e67226caa09a6b3a0714d5e8fe70 + md5: df733c6a37616fc0adc225795ea80423 + sha256: b74030343e1d97666bcf55fdb97308fe30c2137b6517dda3b164fb2172869685 manager: conda name: libpq optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libpq-15.1-ha9684e8_2.conda + url: https://conda.anaconda.org/conda-forge/win-64/libpq-15.1-ha9684e8_3.conda version: '15.1' - category: main dependencies: jpeg: '>=9e,<10a' lerc: '>=4.0.0,<5.0a0' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libzlib: '>=1.2.13,<1.3.0a0' ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' @@ -10001,13 +11228,13 @@ package: xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: d9b568beb74c97e53dbb9531b14f69c0 - sha256: dc13f42b392e05a1e705ceebcde82ed3e15663b511e78585d26fc4b9bf628b59 + md5: 2e003e276cc1375192569c96afd3d984 + sha256: 86cf8066db11f84b506ba246944901584ab199dfe7490586f5e9b6c299e3b8e0 manager: conda name: libtiff optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.5.0-hc4f729c_0.conda + url: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.5.0-hf8721a0_2.conda version: 4.5.0 - category: main dependencies: @@ -10029,14 +11256,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 24e57be449c71b8edc52cc6a48ff846d - sha256: 76576c5fd2a6ad1e987b3df4bba55dab05c0f4f35e4f54c6ddbb9559aaf01537 + md5: d75d2a8a37db95ba86660bf57969b7e2 + sha256: f99b78cbbce778a8313950517fb5400c63b4606844b35fe3f7c2308b087531b7 manager: conda name: markupsafe optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/markupsafe-2.1.1-py310h8d17308_2.tar.bz2 - version: 2.1.1 + url: https://conda.anaconda.org/conda-forge/win-64/markupsafe-2.1.2-py310h8d17308_0.conda + version: 2.1.2 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -10069,26 +11296,26 @@ package: dependencies: python: '>=3.8' hash: - md5: bb45ff9deddb045331fd039949f39650 - sha256: a8e3531fdb6f9acfde885dd94c8639c020013215dab98ff4ed82db7aa745277a + md5: 88e40007414ea9a13f8df20fcffa87e2 + sha256: edd149a40ea746ce17c1b135c72a1646810e99071bedb7d808914cc31b3c8a5d manager: conda name: networkx optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/networkx-2.8.8-pyhd8ed1ab_0.tar.bz2 - version: 2.8.8 + url: https://conda.anaconda.org/conda-forge/noarch/networkx-3.0-pyhd8ed1ab_0.conda + version: '3.0' - category: main dependencies: python: '>=3.7' hash: - md5: 0e8e1bd93998978fc3125522266d12db - sha256: 163f26e55246c506a75551ca01f35c7d4d533aee6db5c4cf2d598ae253e956b8 + md5: 1ff2e3ca41f0ce16afec7190db28288b + sha256: 00288f5e5e841711e8b8fef1f1242c858d8ef99ccbe5d7e0df4789d5d8d40645 manager: conda name: packaging optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/packaging-22.0-pyhd8ed1ab_0.conda - version: '22.0' + url: https://conda.anaconda.org/conda-forge/noarch/packaging-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -10190,14 +11417,14 @@ package: dependencies: python: '>=3.6' hash: - md5: c8d7e34ca76d6ecc03b84bedfd99d689 - sha256: 000f38e7ce7f020e2ce4d5024d3ffa63fcd65077edfe2182862965835f560525 + md5: f59d49a7b464901cf714b9e7984d01a2 + sha256: 93cfc7a92099e26b0575a343da4a667b52371cc38e4dee4ee264dc041ef77bac manager: conda name: pytz optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7-pyhd8ed1ab_0.conda - version: '2022.7' + url: https://conda.anaconda.org/conda-forge/noarch/pytz-2022.7.1-pyhd8ed1ab_0.conda + version: 2022.7.1 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -10229,18 +11456,30 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/rtree-1.0.1-py310h1cbd46b_1.tar.bz2 version: 1.0.1 +- category: main + dependencies: + python: '>=3.9,<3.11' + hash: + md5: 97162d83d23113ffa938c83c91758d1b + sha256: 34b2dc2eee575a80a55dbeaa1580df9ccf0afec7baca99f8bb71da8ff158aa21 + manager: conda + name: serapeum_utils + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/serapeum_utils-0.1.1-pyhd8ed1ab_0.conda + version: 0.1.1 - category: main dependencies: python: '>=3.7' hash: - md5: 9600fc9524d3f821e6a6d58c52f5bf5a - sha256: ea9f7eee2648d8078391cf9f968d848b400349c784e761501fb32ae01d323acf + md5: 9467d520d1457018e055bbbfdf9b7567 + sha256: 053447c82243033e6fd5cacbf7c349552146b135730a87fd942ec517d2b22efb manager: conda name: setuptools optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/setuptools-65.6.3-pyhd8ed1ab_0.conda - version: 65.6.3 + url: https://conda.anaconda.org/conda-forge/noarch/setuptools-66.1.1-pyhd8ed1ab_0.conda + version: 66.1.1 - category: main dependencies: python: '>=3.10,<3.11.0a0' @@ -10249,14 +11488,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: e472de8aa86c945fea9f9ba15063afb0 - sha256: 2badfd5f97f1521868a3a7d76c0bf1254373f6a7e109ecb791fe8ab67effd6d8 + md5: 591e36f1d47efe37f463df58f9278e0b + sha256: 02846938c94fb26858ec98210644bb641138460cca2b375f7e1ea2ab2a5b54a3 manager: conda name: simplejson optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.18.0-py310h8d17308_0.tar.bz2 - version: 3.18.0 + url: https://conda.anaconda.org/conda-forge/win-64/simplejson-3.18.1-py310h8d17308_0.conda + version: 3.18.1 - category: main dependencies: python: '' @@ -10459,6 +11698,40 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/aiosignal-1.3.1-pyhd8ed1ab_0.tar.bz2 version: 1.3.1 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 8014202343757d7c623e9e1cf7b2eb20 + sha256: 94cb11cf5edf9a5a56e9c12d87cc6da3bd2681d3c8dc7e038a825b137e871e3b + manager: conda + name: aws-c-event-stream + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-event-stream-0.2.18-h4de63fd_0.conda + version: 0.2.18 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-compression: '>=0.2.16,<0.2.17.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 4266ca1486873de4d4fecd4bfb635cb9 + sha256: 972f29647c50aacbb90ea9477d3735dfb7ba8dc808b80f45abf411f121515d16 + manager: conda + name: aws-c-http + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.7.0-h28290ff_0.conda + version: 0.7.0 - category: main dependencies: brotli-bin: 1.0.9 hcfcfb64_8 @@ -10532,14 +11805,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 2844c7c09ca769d9189ce941d6b21c76 - sha256: 6581036c189b8719224b695274595986f2794469fd92d40897a767bd3095e428 + md5: 78964ddbe4f7e66f31d8cf69dcf8f4a4 + sha256: 8ab36f55d088c1cac58416c5c6439589bd2d25d9a103159b3144223d0ebc24c2 manager: conda name: coverage optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/coverage-7.0.1-py310h8d17308_0.conda - version: 7.0.1 + url: https://conda.anaconda.org/conda-forge/win-64/coverage-7.1.0-py310h8d17308_0.conda + version: 7.1.0 - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' @@ -10576,7 +11849,7 @@ package: version: 0.11.2 - category: main dependencies: - expat: '>=2.4.9,<3.0a0' + expat: '>=2.5.0,<3.0a0' freetype: '>=2.12.1,<3.0a0' libiconv: '>=1.17,<2.0a0' libzlib: '>=1.2.13,<1.3.0a0' @@ -10584,14 +11857,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 7db47ce2d047923726eb2681d336b9d8 - sha256: b35d2f1e9980f9ce3c982d47b947f930cfaae84367641d4b917cf3426843305a + md5: 08767992f1a4f1336a257af1241034bd + sha256: 643f2b95be68abeb130c53d543dcd0c1244bebabd58c774a21b31e4b51ac3c96 manager: conda name: fontconfig optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/fontconfig-2.14.1-hbde0cde_0.tar.bz2 - version: 2.14.1 + url: https://conda.anaconda.org/conda-forge/win-64/fontconfig-2.14.2-hbde0cde_0.conda + version: 2.14.2 - category: main dependencies: geographiclib: <2,>=1.49 @@ -10717,6 +11990,26 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.14-ha5c8aab_1.conda version: '2.14' +- category: main + dependencies: + libabseil: 20220623.0 cxx17* + libcrc32c: '>=1.1.2,<1.2.0a0' + libcurl: '>=7.86.0,<8.0a0' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.10,<3.22.0a0' + openssl: '>=3.0.7,<4.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 3b3c3fd0626c99d0ce623c679e76c7c6 + sha256: 9b97701ec945615f746375ba775082f037723b106c46768b00baa2868c2bfab3 + manager: conda + name: libgoogle-cloud + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/libgoogle-cloud-2.5.0-h5fc25aa_1.conda + version: 2.5.0 - category: main dependencies: boost-cpp: '>=1.78.0,<1.78.1.0a0' @@ -10815,18 +12108,18 @@ package: setuptools: '' wheel: '' hash: - md5: da66f2851b9836d3a7c5190082a45f7d - sha256: 7a86b2427abbf5cf695da192ba1c03130115f157297e7bfde65f0a18a345a7bc + md5: 85b35999162ec95f9f999bac15279c02 + sha256: bbffec284bd0e154363e845121f43007e7e64c80412ff13be21909be907b697d manager: conda name: pip optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/pip-22.3.1-pyhd8ed1ab_0.tar.bz2 - version: 22.3.1 + url: https://conda.anaconda.org/conda-forge/noarch/pip-23.0-pyhd8ed1ab_0.conda + version: '23.0' - category: main dependencies: krb5: '>=1.20.1,<1.21.0a0' - libpq: 15.1 ha9684e8_2 + libpq: 15.1 ha9684e8_3 libxml2: '>=2.10.3,<2.11.0a0' libzlib: '>=1.2.13,<1.3.0a0' openssl: '>=3.0.7,<4.0a0' @@ -10835,13 +12128,13 @@ package: vs2015_runtime: '>=14.29.30139' zlib: '' hash: - md5: 4a23e94d7ce726009beee93eddeba8ce - sha256: 3d5d960e0b54525c581ad30bcd6d0e68fe35868410b48bcbeeef938ba3be5393 + md5: 9e0c79de1528d73b4014e943ce342ab9 + sha256: c4cb0004287bb7032a8a7d6f8b2d19a4c93eddf79d224317169ca2e674d5a592 manager: conda name: postgresql optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/postgresql-15.1-hd87cd2b_2.conda + url: https://conda.anaconda.org/conda-forge/win-64/postgresql-15.1-hd87cd2b_3.conda version: '15.1' - category: main dependencies: @@ -10917,14 +12210,14 @@ package: python: '>=3.8' tomli: '>=1.0.0' hash: - md5: ac82c7aebc282e6ac0450fca012ca78c - sha256: 854233dc2d0d64219b7e951ccf49c1f32332c6fc7085ecb62cc18bc1f4e791b0 + md5: f0be05afc9c9ab45e273c088e00c258b + sha256: d298dfe6c53555c9fb5662f5f936e621cddd3b0a7031789375b82a1ee3b3a96b manager: conda name: pytest optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.0-pyhd8ed1ab_2.tar.bz2 - version: 7.2.0 + url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.2.1-pyhd8ed1ab_0.conda + version: 7.2.1 - category: main dependencies: python: '>=3.6' @@ -10995,14 +12288,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 903b1d62725156782b17e9d311582d8c - sha256: c8f2857fffbaefe714dd86526eef5aac973239dcc1bdd2b260f11ca4d700a729 + md5: 40c937d31bbac179cd27aa30e251e9e8 + sha256: 1c057fa9991362a6bd8bbabaee4523b6972fac03a0ef2be76ce3c8242d24ebff manager: conda name: sip optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/sip-6.7.5-py310h00ffb61_0.conda - version: 6.7.5 + url: https://conda.anaconda.org/conda-forge/win-64/sip-6.7.6-py310h00ffb61_0.conda + version: 6.7.6 - category: main dependencies: libhwloc: '>=2.8.0,<2.8.1.0a0' @@ -11061,6 +12354,42 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/async-timeout-4.0.2-pyhd8ed1ab_0.tar.bz2 version: 4.0.2 +- category: main + dependencies: + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-sdkutils: '>=0.1.7,<0.1.8.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 7b39c9ee08516bee006868b010868176 + sha256: 64ba2aadf2ce79f61652acc14aba524b01311af4acfee4e13ad39f36cef27191 + manager: conda + name: aws-c-auth + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.6.21-h0fd8c68_3.conda + version: 0.6.21 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 0d0381b8c7a59c2f8bbc17809ca6d82c + sha256: 9bb7ec2a41fb06aa1083de42daaa4cc9c7b8752e1150db7250b126827adc862f + manager: conda + name: aws-c-mqtt + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-mqtt-0.7.13-h0d1d87f_12.conda + version: 0.7.13 - category: main dependencies: jinja2: '' @@ -11150,14 +12479,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 0e8b49bc425a99afa3c8d48776c6e6ed - sha256: 22930c11e3c44785cfc83f739bea5250bee34a078ee90f8840378a100d624aa4 + md5: 925c5f114c9a7ff2c2889c6ff0a091d4 + sha256: 5271dcb16870025fc730798ad49fcfac7579a1ee02c7de86292e22c26915f247 manager: conda name: cryptography optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/cryptography-38.0.4-py310h6e82f81_0.conda - version: 38.0.4 + url: https://conda.anaconda.org/conda-forge/win-64/cryptography-39.0.0-py310h6e82f81_0.conda + version: 39.0.0 - category: main dependencies: cloudpickle: '>=1.1.1' @@ -11256,14 +12585,14 @@ package: protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' python: '>=3.7' hash: - md5: 35947a7b1f5319de636d74ce38dcf131 - sha256: 88c2be80b3c4ca97f5259b6c6a814b730e6ab4d09c15dbbe60df779c3a7416f9 + md5: cbf8b4569c1d2a0a6077d34a2d38333e + sha256: 1b2a9ae4540e3056a7eaf126a4939360f521854c8a4aa04f10ed4c80da4edc7e manager: conda name: googleapis-common-protos optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.0-pyhd8ed1ab_3.conda - version: 1.57.0 + url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.57.1-pyhd8ed1ab_0.conda + version: 1.57.1 - category: main dependencies: hdf5: '>=1.12.2,<1.12.3.0a0' @@ -11354,8 +12683,8 @@ package: - category: main dependencies: freetype: '>=2.12.1,<3.0a0' - jpeg: '>=9e,<10a' lcms2: '>=2.14,<3.0a0' + libjpeg-turbo: '>=2.1.4,<3.0a0' libtiff: '>=4.5.0,<4.6.0a0' libwebp-base: '>=1.2.4,<2.0a0' libxcb: '>=1.13,<1.14.0a0' @@ -11368,14 +12697,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 88baf0b358a778c3bc93fa412611ea1f - sha256: 60152450a8a6f5d20fcd50f2b4d9654a212a3d8083621d32a1f3416859fae4a6 + md5: b5c8bb98f1d5af54621a77b2658a29eb + sha256: cacee1013eeae84f5bf085473b70033dd14f8f73d9bb6fb02c44bd1e62be00f2 manager: conda name: pillow optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/pillow-9.2.0-py310hdbb7713_4.conda - version: 9.2.0 + url: https://conda.anaconda.org/conda-forge/win-64/pillow-9.4.0-py310h3d7015a_0.conda + version: 9.4.0 - category: main dependencies: certifi: '' @@ -11405,13 +12734,13 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 636d6aa8f5fbc393ee7c365c1751875f - sha256: 433701362276a5d04d62da3fd04ec9ae8351761f5a88ba793903b9c8745593bc + md5: a4c757150f616bae079bc08cea956138 + sha256: fde9316830224ba2903d8a8db97ca68628304af878c5caba0f1decc7336dc68e manager: conda name: pyqt5-sip optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/pyqt5-sip-12.11.0-py310h00ffb61_2.tar.bz2 + url: https://conda.anaconda.org/conda-forge/win-64/pyqt5-sip-12.11.0-py310h00ffb61_3.conda version: 12.11.0 - category: main dependencies: @@ -11441,14 +12770,14 @@ package: zlib: '' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: a060e132e648e696862c3aa05f09b515 - sha256: bf5fc73b24aed62b50d5075f70aac25fbd30e7a3eeb6be10d341de2f884322a0 + md5: 07426c5e1301448738f66686548d41ff + sha256: 96c2de92dce5de54c923d9242163196bff6bce8e0fbfdbcfd4d9c47ce2fb1123 manager: conda name: tiledb optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/tiledb-2.13.0-h3132609_1.conda - version: 2.13.0 + url: https://conda.anaconda.org/conda-forge/win-64/tiledb-2.13.2-h3132609_0.conda + version: 2.13.2 - category: main dependencies: aiosignal: '>=1.1.2' @@ -11472,19 +12801,39 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/aiohttp-3.8.3-py310h8d17308_1.tar.bz2 version: 3.8.3 +- category: main + dependencies: + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: edbef89b00c712f0efd0ec83a6326dfe + sha256: da5598375c625f12549a29413a772844c656fd3c06901b946a9c90130ebf2649 + manager: conda + name: aws-c-s3 + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.2.3-h898862f_0.conda + version: 0.2.3 - category: main dependencies: google-crc32c: '>=1.0,<2.0.0dev' python: '>=3.7' hash: - md5: d8e92214f92379047780fd31bc8b1f94 - sha256: ff44d8c49f39afbcd2840f446a262a28b3f6f232be97604b55439dfed9756e38 + md5: a0d4c902824b3188a61df18c1e8bbf5e + sha256: d997737f75ff1132374f791b267e8a4322652a6b172da885abfc1d4bff18e883 manager: conda name: google-resumable-media optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.0-pyhd8ed1ab_0.tar.bz2 - version: 2.4.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-resumable-media-2.4.1-pyhd8ed1ab_0.conda + version: 2.4.1 - category: main dependencies: gettext: '>=0.21.1,<1.0a0' @@ -11546,17 +12895,17 @@ package: version: 22.12.0 - category: main dependencies: - cryptography: '>=38.0.0,<39' + cryptography: '>=38.0.0,<40' python: '>=3.6' hash: - md5: fbfa0a180d48c800f922a10a114a8632 - sha256: 42f04dded77ac2597108378d62b121697d0e982aba7b20a462a7239030563628 + md5: d41957700e83bbb925928764cb7f8878 + sha256: adbf8951f22bfa950b9e24394df1ef1d2b2d7dfb194d91c7f42bc11900695785 manager: conda name: pyopenssl optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-22.1.0-pyhd8ed1ab_0.tar.bz2 - version: 22.1.0 + url: https://conda.anaconda.org/conda-forge/noarch/pyopenssl-23.0.0-pyhd8ed1ab_0.conda + version: 23.0.0 - category: main dependencies: pyqt5-sip: 4.19.18 py39h415ef7b_8 @@ -11574,6 +12923,29 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/pyqt-impl-5.12.3-py39h415ef7b_8.tar.bz2 version: 5.12.3 +- category: main + dependencies: + aws-c-auth: '>=0.6.21,<0.6.22.0a0' + aws-c-cal: '>=0.5.20,<0.5.21.0a0' + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-c-http: '>=0.7.0,<0.7.1.0a0' + aws-c-io: '>=0.13.12,<0.13.13.0a0' + aws-c-mqtt: '>=0.7.13,<0.7.14.0a0' + aws-c-s3: '>=0.2.3,<0.2.4.0a0' + aws-checksums: '>=0.1.14,<0.1.15.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 94f7baa624b229af23a66f8bd7b45dec + sha256: edc2e033f134f5c4065c3b83756fd273f44d2ee22606ee083d8d5bf7a2bd960d + manager: conda + name: aws-crt-cpp + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-crt-cpp-0.18.16-h1904fea_10.conda + version: 0.18.16 - category: main dependencies: gettext: '>=0.21.1,<1.0a0' @@ -11620,7 +12992,7 @@ package: kealib: '>=1.5.0,<1.6.0a0' lerc: '>=4.0.0,<5.0a0' libcurl: '>=7.87.0,<8.0a0' - libdeflate: '>=1.14,<1.15.0a0' + libdeflate: '>=1.17,<1.18.0a0' libiconv: '>=1.17,<2.0a0' libkml: '>=1.3.0,<1.4.0a0' libnetcdf: '>=4.8.1,<4.8.2.0a0' @@ -11639,7 +13011,7 @@ package: poppler: '>=22.12.0,<22.13.0a0' postgresql: '' proj: '>=9.1.0,<9.1.1.0a0' - tiledb: '>=2.13.0,<2.14.0a0' + tiledb: '>=2.13.2,<2.14.0a0' ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' @@ -11647,14 +13019,14 @@ package: xz: '>=5.2.6,<6.0a0' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: a6717d2604c7fb59d381ebc1d50abdd0 - sha256: 4784cf2d227c96a592456723d47ba2f69c2ad2777c004a16e3b8ee1fe8261869 + md5: a73eceda4d87ffc2ae1e6d151f3f878a + sha256: 6dcd51c0c30b3c788b207ebc1ce716b27218333a2aa81e130d2e85bf26681d42 manager: conda name: libgdal optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libgdal-3.6.1-hffd0036_2.conda - version: 3.6.1 + url: https://conda.anaconda.org/conda-forge/win-64/libgdal-3.6.2-h060c9ed_3.conda + version: 3.6.2 - category: main dependencies: libblas: 3.9.0 16_win64_mkl @@ -11711,14 +13083,49 @@ package: pysocks: '>=1.5.6,<2.0,!=1.5.7' python: <4.0 hash: - md5: 3078ef2359efd6ecadbc7e085c5e0592 - sha256: 992f2d6ca50c98f865a4f2e4bada23f950e39f33ff7c64614a31ee152ec4d5ae + md5: 01f33ad2e0aaf6b5ba4add50dad5ad29 + sha256: f2f09c44e47946ce631dbc9a8a79bb463ac0f4122aaafdbcc51f200a1e420ca6 manager: conda name: urllib3 optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.13-pyhd8ed1ab_0.conda - version: 1.26.13 + url: https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.14-pyhd8ed1ab_0.conda + version: 1.26.14 +- category: main + dependencies: + aws-c-common: '>=0.8.5,<0.8.6.0a0' + aws-c-event-stream: '>=0.2.18,<0.2.19.0a0' + aws-crt-cpp: '>=0.18.16,<0.18.17.0a0' + libcurl: '>=7.87.0,<8.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + openssl: '>=3.0.7,<4.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 7136b097f46a767cbf9f7e93ef8eb6e9 + sha256: 112c6cbb3273910a4d307dffa2df87884bdcdea2426fba76713e737e8cc63d40 + manager: conda + name: aws-sdk-cpp + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/aws-sdk-cpp-1.10.57-h9ce19f4_1.conda + version: 1.10.57 +- category: main + dependencies: + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + python-dateutil: '>=2.1,<3.0.0' + urllib3: '>=1.25.4,<1.27' + hash: + md5: 766c1b31877841ebd4bbed9274ff81ed + sha256: b03b23c6bef007197788df421ed60d351bb80065c09fcb6c7bdf2884894db226 + manager: conda + name: botocore + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.29.60-pyhd8ed1ab_0.conda + version: 1.29.60 - category: main dependencies: libblas: '>=3.9.0,<4.0a0' @@ -11730,23 +13137,23 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: f734ade6fd852582e5c1a09152dd3a60 - sha256: 92900cc7e9561ea177878f838a6a8a105b750d5971affedc648090ef22b4db23 + md5: 9960f0424ff61ab6ed8584bfbf869e77 + sha256: d2ae9dfae20e91563467d3c045d7de9856018a66c31730ec258ea7d3193c5d11 manager: conda name: numpy optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/numpy-1.23.5-py310h4a8f9c9_0.conda - version: 1.23.5 + url: https://conda.anaconda.org/conda-forge/win-64/numpy-1.24.1-py310hd02465a_0.conda + version: 1.24.1 - category: main dependencies: - gst-plugins-base: '>=1.21.2,<1.22.0a0' - gstreamer: '>=1.21.2,<1.22.0a0' + gst-plugins-base: '>=1.21.3,<1.22.0a0' + gstreamer: '>=1.21.3,<1.22.0a0' icu: '>=70.1,<71.0a0' jpeg: '>=9e,<10a' krb5: '>=1.20.1,<1.21.0a0' - libclang: '>=15.0.6,<16.0a0' - libclang13: '>=15.0.6' + libclang: '>=15.0.7,<16.0a0' + libclang13: '>=15.0.7' libglib: '>=2.74.1,<3.0a0' libpng: '>=1.6.39,<1.7.0a0' libsqlite: '>=3.40.0,<4.0a0' @@ -11757,13 +13164,13 @@ package: vs2015_runtime: '>=14.29.30139' zstd: '>=1.5.2,<1.6.0a0' hash: - md5: f1351c4f6ac14a9c8d29103914e9b2d4 - sha256: c94a2ff1e06b8a6f6bf0fc505c45ee1dda14b289304bce1c9122d258e64e131c + md5: 5fa12874f6ba766c783a01a025f86f92 + sha256: f8d9118884c670d0f388cfdc0ff7144707cf2ddcf17739ea33ae9f14cf88a028 manager: conda name: qt-main optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/qt-main-5.15.6-h9580fe5_5.conda + url: https://conda.anaconda.org/conda-forge/win-64/qt-main-5.15.6-h9580fe5_6.conda version: 5.15.6 - category: main dependencies: @@ -11773,14 +13180,14 @@ package: python: '>=3.7,<4.0' urllib3: '>=1.21.1,<1.27' hash: - md5: 089382ee0e2dc2eae33a04cc3c2bddb0 - sha256: b45d0da6774c8231ab4fef0427b3050e7c54c84dfe453143dd4010999c89e050 + md5: 11d178fc55199482ee48d6812ea83983 + sha256: 22c081b4cdd023a514400413f50efdf2c378f56f2a5ea9d65666aacf4696490a manager: conda name: requests optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.1-pyhd8ed1ab_1.tar.bz2 - version: 2.28.1 + url: https://conda.anaconda.org/conda-forge/noarch/requests-2.28.2-pyhd8ed1ab_0.conda + version: 2.28.2 - category: main dependencies: numpy: '>=1.21.6,<2.0a0' @@ -11807,14 +13214,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: eb801fd448c931fa196bc3686d6a03c1 - sha256: ca6f17115078c56e1bf4aa735798843060e8c60c04de866163d3f1b329f7e16e + md5: 357f1ccd3fa2bbf3661146467f7afd44 + sha256: c2d32b16fedb41b184729fd87c5b61f69f6a4e5f331955e231b83d30aac99a61 manager: conda name: contourpy optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.0.6-py310h232114e_0.tar.bz2 - version: 1.0.6 + url: https://conda.anaconda.org/conda-forge/win-64/contourpy-1.0.7-py310h232114e_0.conda + version: 1.0.7 - category: main dependencies: branca: '>=0.6.0' @@ -11834,7 +13241,7 @@ package: - category: main dependencies: hdf5: '>=1.12.2,<1.12.3.0a0' - libgdal: 3.6.1 hffd0036_2 + libgdal: 3.6.2 h060c9ed_3 numpy: '>=1.21.6,<2.0a0' openssl: '>=3.0.7,<4.0a0' python: '>=3.10,<3.11.0a0' @@ -11843,14 +13250,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: ce9eef63c64b0ddadfe8e2e71c7a7309 - sha256: 2d8cfc500583862cb3f2a0685126138d7ae2fa661570582b01b8f777c08b4c5b + md5: 8934913fb065c580d1d78b55fd1e9b42 + sha256: a40a315a226c12cc1eace77d58220f9120f37027d17fa2f8698fd937fe95a725 manager: conda name: gdal optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/gdal-3.6.1-py310h644bc08_2.conda - version: 3.6.1 + url: https://conda.anaconda.org/conda-forge/win-64/gdal-3.6.2-py310h644bc08_3.conda + version: 3.6.2 - category: main dependencies: aiohttp: '>=3.6.2,<4.0.0dev' @@ -11863,14 +13270,31 @@ package: rsa: '>=3.1.4,<5' six: '>=1.9.0' hash: - md5: ce0b3b567b3b8f7a3ef5bd43b2fd1a5e - sha256: 5525c0fe34e102d12f66fe96d2bac211cb42332e294718f72c15734a2b618dc4 + md5: 88944e8c28fbd7471213f8b23d40f001 + sha256: d9fbbaf18ca8dff81d004bad336a8cd04be717c8e41cc0ba49c4471f50db9472 + manager: conda + name: google-auth + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.16.0-pyh1a96a4e_1.conda + version: 2.16.0 +- category: main + dependencies: + numpy: '' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 370a955d3a32a0a0cd78012f6719f4fb + sha256: 2131980a7b6bc7c3c5dfa4ef60bea7b96248b28e7d36a9de9eb0cd0129c1b112 manager: conda - name: google-auth + name: h3-py optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-auth-2.15.0-pyh1a96a4e_0.conda - version: 2.15.0 + url: https://conda.anaconda.org/conda-forge/win-64/h3-py-3.7.4-py310h00ffb61_1.tar.bz2 + version: 3.7.4 - category: main dependencies: blosc: '>=1.21.0,<2.0a0' @@ -11928,6 +13352,42 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/imageio-2.16.1-pyhcf75d05_0.tar.bz2 version: 2.16.1 +- category: main + dependencies: + aws-sdk-cpp: '>=1.10.57,<1.10.58.0a0' + bzip2: '>=1.0.8,<2.0a0' + c-ares: '>=1.18.1,<2.0a0' + gflags: '>=2.2.2,<2.3.0a0' + glog: '>=0.6.0,<0.7.0a0' + libabseil: 20220623.0 cxx17* + libbrotlicommon: '>=1.0.9,<1.1.0a0' + libbrotlidec: '>=1.0.9,<1.1.0a0' + libbrotlienc: '>=1.0.9,<1.1.0a0' + libcrc32c: '>=1.1.2,<1.2.0a0' + libcurl: '>=7.87.0,<8.0a0' + libgoogle-cloud: '>=2.5.0,<2.5.1.0a0' + libgrpc: '>=1.51.1,<1.52.0a0' + libprotobuf: '>=3.21.12,<3.22.0a0' + libthrift: '>=0.16.0,<0.16.1.0a0' + libutf8proc: '>=2.8.0,<3.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + lz4-c: '>=1.9.3,<1.10.0a0' + openssl: '>=3.0.7,<4.0a0' + re2: '>=2022.6.1,<2022.6.2.0a0' + snappy: '>=1.1.9,<2.0a0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + zstd: '>=1.5.2,<1.6.0a0' + hash: + md5: 9110460bb63ad588d7006df92f293406 + sha256: ec2a1c78af7a2417c11ef9982f2277d39448506913bda637dd30eb0fe12dd99a + manager: conda + name: libarrow + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/libarrow-10.0.1-h0f2029a_6_cpu.conda + version: 10.0.1 - category: main dependencies: numpy: '' @@ -11953,32 +13413,47 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 905643029444cd6ed5b11b918c3d04f8 - sha256: fce17e2fdca451ac46769ffe961cd0e1c939f4cc1426837233199dbbb0a81317 + md5: 9804d59dd8ea7d363ac2ccbba29c950c + sha256: 6296c53078700d69a4b4640b85face0b9950710ace5040148b610b9e638479d6 manager: conda name: pandas optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/pandas-1.5.2-py310h1c4a608_0.conda - version: 1.5.2 + url: https://conda.anaconda.org/conda-forge/win-64/pandas-1.5.3-py310h1c4a608_0.conda + version: 1.5.3 +- category: main + dependencies: + appdirs: '>=1.3.0' + packaging: '>=20.0' + python: '>=3.6' + requests: '>=2.19.0' + hash: + md5: 6429e1d1091c51f626b5dcfdd38bf429 + sha256: 1f0548105de86fb2eb6fbb8d3d6cc2004079b8442d232258108687d6cc91eb73 + manager: conda + name: pooch + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/pooch-1.6.0-pyhd8ed1ab_0.tar.bz2 + version: 1.6.0 - category: main dependencies: - pyqt5-sip: 12.11.0 py310h00ffb61_2 + pyqt5-sip: 12.11.0 py310h00ffb61_3 python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 qt-main: '>=5.15.6,<5.16.0a0' - sip: '>=6.7.2,<6.8.0a0' + sip: '>=6.7.5,<6.8.0a0' ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 4074e062528daa0e9c908ca7d39ff1a6 - sha256: 42076ea9a77e514c1d72d982a2526b87311a5264960aeace13dc4aca48202182 + md5: 4012c5ed74c63b82c344e38cf3e68a26 + sha256: 401d4650825a608bfae07f55bd6b7d0e302d026009efc495df7d1cb508b281db manager: conda name: pyqt optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/pyqt-5.15.7-py310h1fd54f2_2.tar.bz2 + url: https://conda.anaconda.org/conda-forge/win-64/pyqt-5.15.7-py310h1fd54f2_3.conda version: 5.15.7 - category: main dependencies: @@ -11998,25 +13473,17 @@ package: version: 1.3.0 - category: main dependencies: - libblas: '>=3.9.0,<4.0a0' - libcblas: '>=3.9.0,<4.0a0' - liblapack: '>=3.9.0,<4.0a0' - m2w64-gcc-libs: '' - numpy: '>=1.21.6,<2.0a0' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - ucrt: '>=10.0.20348.0' - vc: '>=14.2,<15' - vs2015_runtime: '>=14.29.30139' + botocore: '>=1.12.36,<2.0a.0' + python: '>=3.7' hash: - md5: dd00a0a254b250f6cc7546be6e79e396 - sha256: 4eb650f66f457a67b1ba8dda476d7f4de38fa1cddd1f64fb8e483fc82d42397b + md5: 900e74d8547fbea3af028937df28ed77 + sha256: 0e459ed32b00e96b62c2ab7e2dba0135c73fd980120fe1a7bd49901f2d50760f manager: conda - name: scipy + name: s3transfer optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/scipy-1.9.3-py310h578b7cb_2.tar.bz2 - version: 1.9.3 + url: https://conda.anaconda.org/conda-forge/noarch/s3transfer-0.6.0-pyhd8ed1ab_0.tar.bz2 + version: 0.6.0 - category: main dependencies: geos: '>=3.11.1,<3.11.2.0a0' @@ -12049,6 +13516,33 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/snuggs-1.4.7-py_0.tar.bz2 version: 1.4.7 +- category: main + dependencies: + libarrow: 10.0.1 h0f2029a_6_cpu + hash: + md5: 3a664cf5acbf41d53dc6b5ac1fede01b + sha256: fd57e06dfb3f0fe478280d69bb2ea6d484433959eafa67c53e47727e4a384e2b + manager: conda + name: arrow-cpp + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/arrow-cpp-10.0.1-h57928b3_6_cpu.conda + version: 10.0.1 +- category: main + dependencies: + botocore: '>=1.29.60,<1.30.0' + jmespath: '>=0.7.1,<2.0.0' + python: '>=3.7' + s3transfer: '>=0.6.0,<0.7.0' + hash: + md5: bdb3a067f03eded198b3fdf7c66ba8c3 + sha256: d81ae5f720229e0a81914ace6ea8e741eced99cb72d34d3742461496bec71e12 + manager: conda + name: boto3 + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.26.60-pyhd8ed1ab_0.conda + version: 1.26.60 - category: main dependencies: attrs: '>=17' @@ -12056,7 +13550,7 @@ package: click-plugins: '>=1.0' cligj: '>=0.5' gdal: '' - libgdal: '>=3.6.0,<3.7.0a0' + libgdal: '>=3.6.2,<3.7.0a0' munch: '' numpy: '>=1.21.6,<2.0a0' python: '>=3.10,<3.11.0a0' @@ -12068,14 +13562,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 154fbd5cb756b259346cb860ec67f211 - sha256: f5a0d4733db5d20ad841b85e6ce086729a27f923438cecb1bb7575d5a6899e04 + md5: 2d1e475df985f4bd3544b11f4cde76b8 + sha256: 1b94b61e78d1e1266c7894c0dc18a27d9ba9986e3f86ddfd1f0a48dde6c55d3f manager: conda name: fiona optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/fiona-1.8.22-py310h4a685fe_5.conda - version: 1.8.22 + url: https://conda.anaconda.org/conda-forge/win-64/fiona-1.9.0-py310h4a685fe_0.conda + version: 1.9.0 - category: main dependencies: packaging: '' @@ -12142,14 +13636,14 @@ package: vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: 51a43b04e415da8dd0850c2ded044c6e - sha256: f1ef00bb3af1777bb16ccbab290f2b3e5e9abd5885bfdc619a213499399b9f65 + md5: d26244793303f85463ec99177b711e1b + sha256: 40080b50100f0e477dc1c97946509a8cab7b29e6ee52adf29e24231b4f3f0faf manager: conda name: matplotlib-base optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.6.2-py310h51140c5_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.6.3-py310h51140c5_0.conda + version: 3.6.3 - category: main dependencies: cftime: '' @@ -12172,21 +13666,6 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/netcdf4-1.6.2-nompi_py310h459bb5f_100.tar.bz2 version: 1.6.2 -- category: main - dependencies: - numpy: '>=1.4.0' - python: '>=3.6' - scipy: '' - six: '' - hash: - md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 - sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 - manager: conda - name: patsy - optional: false - platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 - version: 0.5.3 - category: main dependencies: affine: '' @@ -12216,25 +13695,26 @@ package: version: 1.3.4 - category: main dependencies: - joblib: '>=1.1.1' + libblas: '>=3.9.0,<4.0a0' libcblas: '>=3.9.0,<4.0a0' + liblapack: '>=3.9.0,<4.0a0' + m2w64-gcc-libs: '' numpy: '>=1.21.6,<2.0a0' + pooch: '' python: '>=3.10,<3.11.0a0' python_abi: 3.10.* *_cp310 - scipy: '' - threadpoolctl: '>=2.0.0' ucrt: '>=10.0.20348.0' vc: '>=14.2,<15' vs2015_runtime: '>=14.29.30139' hash: - md5: f5433a3e1971e7565a4a9bd00d1df2eb - sha256: da4c825912e4c388268611df0f3f99fae86e443bdbf17214fa2af1d9ef4bc80e + md5: 87356a414020b1b468bb808d62c183c3 + sha256: f5e8f9f71dae19d89759814bdd5511a296854e68276dcd1ae3a2500d7c4f1890 manager: conda - name: scikit-learn + name: scipy optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.2.0-py310had3394f_0.conda - version: 1.2.0 + url: https://conda.anaconda.org/conda-forge/win-64/scipy-1.10.0-py310h578b7cb_0.conda + version: 1.10.0 - category: main dependencies: imagecodecs: '>=2021.11.20' @@ -12301,14 +13781,14 @@ package: python: '>=3.7' uritemplate: '>=3.0.1,<5' hash: - md5: 04241ec803212136585c4e7738de8543 - sha256: 59d5c1e9afce9be9042900e10ffa804bbe68fb1331fed2ace5d15ce461f83b87 + md5: 2fba68326c4a5f7308ae42725253d015 + sha256: 07590faf8e2b3939ff11026d71fc48c909b99995c00eea59f2de5734ee127773 manager: conda name: google-api-python-client optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.70.0-pyhd8ed1ab_0.conda - version: 2.70.0 + url: https://conda.anaconda.org/conda-forge/noarch/google-api-python-client-2.74.0-pyhd8ed1ab_0.conda + version: 2.74.0 - category: main dependencies: google-api-core: '>=1.31.6,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' @@ -12326,37 +13806,47 @@ package: version: 2.3.2 - category: main dependencies: - networkx: '' - numpy: '>=1.3' - pandas: '>=1.0' - python: '>=3.5' - scikit-learn: '' - scipy: '>=1.0' + matplotlib-base: '>=3.6.3,<3.6.4.0a0' + pyqt: '>=5' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + tornado: '>=5' hash: - md5: 908bbfb54da154042c5cbda77b37a3d1 - sha256: 1435305fb0a127b3154e76c0836d44526eeb93e80bd37596128d7ad8fb196d97 + md5: 6c14420fa968a9a7351fbf04f1770895 + sha256: 5d4f9203f82bafb99cfdc6d4b2ee33d6da0dcfd330bf4e6f2a9b65576a31254e manager: conda - name: mapclassify + name: matplotlib optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.4.3-pyhd8ed1ab_0.tar.bz2 - version: 2.4.3 + url: https://conda.anaconda.org/conda-forge/win-64/matplotlib-3.6.3-py310h5588dad_0.conda + version: 3.6.3 - category: main dependencies: - matplotlib-base: '>=3.6.2,<3.6.3.0a0' - pyqt: '>=5' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - tornado: '>=5' + arrow-cpp: '>=0.11.0' hash: - md5: 576c59f0abd7c6318e783f1938afca47 - sha256: 009402d6c94b2e2018e534abe483db6c1e3ae8a2a9736d889bc13aa604c45a6d + md5: 79a5f78c42817594ae016a7896521a97 + sha256: 15e50657515b791734ba045da5135377404ca37c518b2066b9c6451c65cd732e manager: conda - name: matplotlib + name: parquet-cpp optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/matplotlib-3.6.2-py310h5588dad_0.tar.bz2 - version: 3.6.2 + url: https://conda.anaconda.org/conda-forge/noarch/parquet-cpp-1.5.1-2.tar.bz2 + version: 1.5.1 +- category: main + dependencies: + numpy: '>=1.4.0' + python: '>=3.6' + scipy: '' + six: '' + hash: + md5: 50ef6b29b1fb0768ca82c5aeb4fb2d96 + sha256: 9d232f9cda05ce1833a7e5b16db4486ddfb71318635047fb64de119d364e0259 + manager: conda + name: patsy + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/patsy-0.5.3-pyhd8ed1ab_0.tar.bz2 + version: 0.5.3 - category: main dependencies: affine: <3.0 @@ -12403,6 +13893,27 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/win-64/scikit-image-0.19.2-py39h2e25243_0.tar.bz2 version: 0.19.2 +- category: main + dependencies: + joblib: '>=1.1.1' + libcblas: '>=3.9.0,<4.0a0' + numpy: '>=1.21.6,<2.0a0' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '' + threadpoolctl: '>=2.0.0' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 89c45b9f5f2941375946722eb7a03ff8 + sha256: a4d778720bd667f461d50fbbcfb040f472fcd4a2e379f09721205dc201c1c3ee + manager: conda + name: scikit-learn + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.2.1-py310had3394f_0.conda + version: 1.2.1 - category: main dependencies: matplotlib-base: '>=3.1,!=3.6.1' @@ -12422,59 +13933,20 @@ package: version: 0.12.2 - category: main dependencies: - numpy: '>=1.21.6,<2.0a0' - packaging: '' - pandas: '>=1.0' - patsy: '>=0.5.2' - python: '>=3.10,<3.11.0a0' - python_abi: 3.10.* *_cp310 - scipy: '>=1.3' - ucrt: '>=10.0.20348.0' - vc: '>=14.2,<15' - vs2015_runtime: '>=14.29.30139' - hash: - md5: 470231039de813f6883ee459b9634042 - sha256: fd1b92e315c078db508bd61542b9ee6cdce9557b8fc58984e914ca96fc2d92ba - manager: conda - name: statsmodels - optional: false - platform: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/statsmodels-0.13.5-py310h9b08ddd_2.tar.bz2 - version: 0.13.5 -- category: main - dependencies: - matplotlib: '>=3.5.3' - numpy: 1.23.5.* + matplotlib: '>=3.6.2' + numpy: 1.24.1.* pip: '>=22.3.1' - python: '>=3.9,<3.12' + python: '>=3.9,<3.11' + serapeum_utils: '>=0.1.1' hash: - md5: 2e7d2a8e819e60c8a4c5ddbb07a6873a - sha256: 624f6ccb286c5fe0d04c5e924ea298c48f3ce5b4394ceb635b1ff1ebfc7cae71 + md5: 32cd6422f2e5730a7b5cc9f8c41f968d + sha256: 0bbe8125ef7303883f96239af0973784337c4fc3fa901a052616f71e7b4a6756 manager: conda name: cleopatra optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.4-pyhd8ed1ab_0.conda - version: 0.2.4 -- category: main - dependencies: - fiona: '' - folium: '' - geopandas-base: 0.12.2 pyha770c72_0 - mapclassify: '>=2.4.0' - matplotlib-base: '' - python: '>=3.8' - rtree: '' - xyzservices: '' - hash: - md5: ee3b330f13297f5839d46e1ca3e57d56 - sha256: 51660094efee2a74b24ab535e03005a6ddedc9e160c0d573cfaf2724312d171c - manager: conda - name: geopandas - optional: false - platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/geopandas-0.12.2-pyhd8ed1ab_0.conda - version: 0.12.2 + url: https://conda.anaconda.org/conda-forge/noarch/cleopatra-0.2.7-pyhd8ed1ab_0.conda + version: 0.2.7 - category: main dependencies: google-api-core: '>=1.31.5,<3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0' @@ -12493,25 +13965,82 @@ package: platform: win-64 url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-storage-2.7.0-pyh1a96a4e_0.conda version: 2.7.0 +- category: main + dependencies: + networkx: '' + numpy: '>=1.3' + pandas: '>=1.0' + python: '>=3.6' + scikit-learn: '' + scipy: '>=1.0' + hash: + md5: db1aeaff6e248db425e049feffded7a9 + sha256: 78aadbd9953976678b6e3298ac26a63cf9390a8794db3ff71f3fe5b6d13a35ca + manager: conda + name: mapclassify + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/mapclassify-2.5.0-pyhd8ed1ab_1.conda + version: 2.5.0 +- category: main + dependencies: + libarrow: 10.0.1 h0f2029a_6_cpu + numpy: '>=1.21.6,<2.0a0' + parquet-cpp: 1.5.1.* + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 2aa16dc9bb00ee85c0efdd5d28f096dc + sha256: 58fb08fe6b4ce69db34d6c470897ad5f2b8b1af013afa3331508c9aab5072e8c + manager: conda + name: pyarrow + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/pyarrow-10.0.1-py310hd1a9178_6_cpu.conda + version: 10.0.1 - category: main dependencies: loguru: '>=0.6.0' - matplotlib: '>=3.5.3' - numpy: 1.23.5 - pandas: '>=1.4.4' + matplotlib: '>=3.6.3' + numpy: 1.24.1 + pandas: '>=1.5.3' pip: '>=22.3.1' python: '>=3.9,<3.15' - scikit-learn: '>=1.1.1' + scikit-learn: '>=1.2.1' scipy: '>=1.9.0' hash: - md5: 5f4118cfa89158b5b58bac3ac5495cba - sha256: 755f6d603c37c3dfcb48bf1379e81178474fdeec94eabc73af5825f96e8a3e4e + md5: 75342a0eb483db01f3b0e9dd3c607815 + sha256: dc9cffa56c55a453d56b4fc1b2f057dde4a0f7dd06c4e8376de75b23e2e5cc5e manager: conda name: statista optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 + url: https://conda.anaconda.org/conda-forge/noarch/statista-0.1.8-pyhd8ed1ab_0.conda + version: 0.1.8 +- category: main + dependencies: + numpy: '>=1.21.6,<2.0a0' + packaging: '' + pandas: '>=1.0' + patsy: '>=0.5.2' + python: '>=3.10,<3.11.0a0' + python_abi: 3.10.* *_cp310 + scipy: '>=1.3' + ucrt: '>=10.0.20348.0' + vc: '>=14.2,<15' + vs2015_runtime: '>=14.29.30139' + hash: + md5: 470231039de813f6883ee459b9634042 + sha256: fd1b92e315c078db508bd61542b9ee6cdce9557b8fc58984e914ca96fc2d92ba + manager: conda + name: statsmodels + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/win-64/statsmodels-0.13.5-py310h9b08ddd_2.tar.bz2 + version: 0.13.5 - category: main dependencies: future: '' @@ -12525,14 +14054,33 @@ package: setuptools: '' six: '' hash: - md5: 30e3d2c755cf9c0c0483f01ab25c7e59 - sha256: d498cf74bfa54861cc9a45b812d3321d0f482a62025c7abf69df914c94a3c3a9 + md5: 08b32b1b1369cff511af1592403bb2af + sha256: 9590c61b4613c52412cb16dcff9a3eca6cc2be685eb45fbba49803f6c552df30 manager: conda name: earthengine-api optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.334-pyhd8ed1ab_1.conda - version: 0.1.334 + url: https://conda.anaconda.org/conda-forge/noarch/earthengine-api-0.1.338-pyhd8ed1ab_0.conda + version: 0.1.338 +- category: main + dependencies: + fiona: '' + folium: '' + geopandas-base: 0.12.2 pyha770c72_0 + mapclassify: '>=2.4.0' + matplotlib-base: '' + python: '>=3.8' + rtree: '' + xyzservices: '' + hash: + md5: ee3b330f13297f5839d46e1ca3e57d56 + sha256: 51660094efee2a74b24ab535e03005a6ddedc9e160c0d573cfaf2724312d171c + manager: conda + name: geopandas + optional: false + platform: win-64 + url: https://conda.anaconda.org/conda-forge/noarch/geopandas-0.12.2-pyhd8ed1ab_0.conda + version: 0.12.2 - category: main dependencies: cartopy: '' @@ -12558,86 +14106,93 @@ package: gdal: '>=3.5.3' geopandas: '>=0.12.2' geopy: '>=2.2.0' + h3-py: '>=3.7.4' loguru: '>=0.6.0' netcdf4: '>=1.6.1' - numpy: 1.23.5 + numpy: 1.24.1 pandas: '>=1.4.4' pip: '>=22.3.1' + pyarrow: '>=10.0.1' pyproj: '>=3.4.0' python: '>=3.9,<3.11' + pyyaml: '>=6.0' rasterio: '>=1.3.0' requests: '>=2.28.1' rtree: '>=1.0.0' shapely: '>=1.8.4,<2' hash: - md5: 4f85b9d893e953d3f330a705bb64dc75 - sha256: e878e39e30761723e4b5f4c66fc250c00026e2ed62823f89cb72e3edc375fdeb + md5: e91e293e82bd2fc7da52944256d6d4e6 + sha256: 947c0bf9ec7c366299c57f8b4cfcdc1320168bd6bfc6ce15921a9f11af821c21 manager: conda name: pyramids optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.2.11-pyhd8ed1ab_0.conda - version: 0.2.11 + url: https://conda.anaconda.org/conda-forge/noarch/pyramids-0.3.2-pyhd8ed1ab_0.conda + version: 0.3.2 - category: main dependencies: - cleopatra: '>=0.2.4' + cleopatra: '>=0.2.7' gdal: '>=3.5.3' geopandas: '>=0.12.2' geoplot: '>=0.5.1' loguru: '>=0.6.0' - numpy: 1.23.5 + numpy: '>=1.24.1' pip: '>=22.3.1' - pyramids: 0.2.11 + pyramids: '>=0.3.2' python: '>=3.9,<3.11' hash: - md5: 6b63a9d1e41e7fb8a9181435b9cb89c5 - sha256: 50b88c8f546f9f9d860ca09316a89d6d5d33d2e815f9f8175c07eaa6dfeb8ea0 + md5: bf10a9e624caf50d57e643a73f91a70c + sha256: 8fe74e096a93b016a5dff3e852ac3079d74a6f7e2d21bf4b05a347e9bcbe61fa manager: conda name: digitalearth optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.10-pyhd8ed1ab_0.conda - version: 0.1.10 + url: https://conda.anaconda.org/conda-forge/noarch/digitalearth-0.1.11-pyhd8ed1ab_0.conda + version: 0.1.11 - category: main dependencies: + boto3: '>=1.26.50' earthengine-api: '>=0.1.324' ecmwf-api-client: '>=1.6.3' gdal: '>=3.5.3' joblib: '>=1.2.0' loguru: '>=0.6.0' netcdf4: '>=1.6.1' - numpy: 1.23.5 + numpy: 1.24.1 pandas: '>=1.4.4' + pathlib: '>=1.0.1' pip: '>=22.3.1' - pyramids: '>=0.2.11' + pyramids: '>=0.3.2' python: '>=3.9,<3.11' + pyyaml: '>=6.0' requests: '>=2.28.1' + serapeum_utils: '>=0.1.1' hash: - md5: 51d9c69a0c90519b0dda07828319af81 - sha256: 3c4cd924212bac400b9bfafa36ed729240a47eafc01b2c9d251d37012552e3ef + md5: 65d70fa30a34f9cbdc9dd130017310af + sha256: d75791533e3ea957c049ff3be8aae020003730d12216d149b6f086d2da66d8db manager: conda name: earth2observe optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.1.7-pyhd8ed1ab_0.conda - version: 0.1.7 + url: https://conda.anaconda.org/conda-forge/noarch/earth2observe-0.2.2-pyhd8ed1ab_0.conda + version: 0.2.2 - category: main dependencies: loguru: '>=0.6.0' - numpy: 1.23.5 + numpy: '>=1.24.1' pip: '>=22.3.1' - pyramids: '>=0.2.11' + pyramids: '>=0.3.2' python: '>=3.9,<3.11' - statista: '>=0.1.7' + statista: '>=0.1.8' hash: - md5: 679b3be1963e704ad7e5a4d45c248c91 - sha256: 6e4f2d622558a179e0c9e0d9ff3faf606ed19e4b22976ef3fec2d869d314c3ed + md5: 7e2419cadcd4ecd1b77855ff3b47634c + sha256: a7ea64da036da19c224d8e61efdf862ae91b932501dad7297d4a364b3329c80a manager: conda name: geostatista optional: false platform: win-64 - url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.5-pyhd8ed1ab_0.conda - version: 0.1.5 + url: https://conda.anaconda.org/conda-forge/noarch/geostatista-0.1.6-pyhd8ed1ab_0.conda + version: 0.1.6 - category: main dependencies: {} hash: diff --git a/docs/environment.yml b/docs/environment.yml index 88d22864..22acbec8 100644 --- a/docs/environment.yml +++ b/docs/environment.yml @@ -3,14 +3,8 @@ name: hapi channels: - conda-forge dependencies: - - python >=3.7.1,<3.10 + - python >=3.9,<3.11 - pip >=21.3.1 - matplotlib >=3.4.2,<3.6.0 - - pandas >=1.3.2,<1.4.3 - - geopandas >=0.10.2 - - pip: - #- numpy - #- pyproj - #- ffmpeg-python -# # - hapi-nile - #- git+https://github.com/MAfarrag/HAPI.git +# - pandas >=1.3.2,<1.4.3 +# - geopandas >=0.10.2 diff --git a/environment.yml b/environment.yml index 8e37091f..48ca251d 100644 --- a/environment.yml +++ b/environment.yml @@ -2,27 +2,29 @@ channels: - conda-forge dependencies: - python >=3.9,<3.11 - - numpy >=1.23.5 + - numpy >=1.24.1 - pip >=22.3.1 - PyYAML >=0.6.0 - pathlib >=1.0.1 - gdal >=3.5.3 - pandas >=1.4.4 - geopandas >=0.12.2 - - matplotlib >=3.5.3 + - matplotlib >=3.6.3 - scipy >=1.9.1 - statsmodels >=0.13.2 - rasterio >=1.3.2 - rasterstats >=0.17.0 - Fiona >=1.8.21 - - geostatista >=0.1.5 - - statista >=0.1.7 - - pyramids >=0.2.11 - - earth2observe >=0.1.7 + - geostatista >=0.1.6 + - statista >=0.1.8 + - pyramids >=0.3.2 + - earth2observe >=0.2.2 - Oasis >=1.0.2 - - digitalearth >=0.1.10 + - cleopatra >=0.2.7 + - digitalearth >=0.1.11 + - serapeum_utils >=0.1.1 - joblib >=1.2.0 - loguru >=0.6.0 - requests >=2.28.1 - - pytest >=7.2.0 + - pytest >=7.2.1 - pytest-cov ==4.0.0 diff --git a/examples/hydrodynamic-models/FullyDynamic Model.py b/examples/hydrodynamic-models/FullyDynamic Model.py index 1cd6d926..ac32e37d 100644 --- a/examples/hydrodynamic-models/FullyDynamic Model.py +++ b/examples/hydrodynamic-models/FullyDynamic Model.py @@ -4,7 +4,7 @@ #%% create the River object start = "2010-1-1 00:00:00" end = "2010-1-1 05:00:00" -# rrmstart="1950-1-1 00:00:00" +# rrm_start="1950-1-1 00:00:00" # dx in meter dx = 20 # dt in sec @@ -36,5 +36,5 @@ #%% Visualization start = "2010-01-01 00:00:00" end = "2010-1-1 05:00:00" -# ffmpegPath = "F:/Users/mofarrag/.matplotlib/ffmpeg-4.4-full_build/bin/ffmpeg.exe" +# ffmpeg_path = "F:/Users/mofarrag/.matplotlib/ffmpeg-4.4-full_build/bin/ffmpeg.exe" anim = Test.animatefloodwave(start=start, end=end, interval=2, textlocation=-1) diff --git a/examples/hydrodynamic-models/analysis/StatisticalAnalysis(max-annual).py b/examples/hydrodynamic-models/analysis/StatisticalAnalysis(max-annual).py index ff8e4a0e..da5f12fc 100644 --- a/examples/hydrodynamic-models/analysis/StatisticalAnalysis(max-annual).py +++ b/examples/hydrodynamic-models/analysis/StatisticalAnalysis(max-annual).py @@ -114,7 +114,7 @@ Rhine_obs.SP.loc[Rhine_obs.SP["id"] == SubID, xlabels].values.tolist()[0], zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=5, label="GRDC", ) @@ -124,7 +124,7 @@ Rhine.SP.loc[Rhine.SP["id"] == SubID, xlabels].values.tolist()[0], zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=5, label="HM ", ) @@ -137,7 +137,7 @@ bbox_transform=ax1.transAxes, fontsize=12, ) - # plt.title("Inundated Area ( 1000 cell)", fontsize = 15) + # plt.title("Inundated Area ( 1000 cell)", font_size = 15) plt.rcParams.update({"font.size": 12}) plt.tight_layout() """ @@ -182,29 +182,29 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=5, label="Observed", ) # ax1.plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), - # linewidth = 5, label = "RIM1.0") + # line_width = 5, label = "RIM1.0") ax1.plot( Rhine.Qrp.columns.tolist(), Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=5, label="HM", ) ax1.set_ylabel("Discharge (m3/s)", fontsize=15) ax1.set_xlabel("Return Period", fontsize=15) - # fig.legend(loc="upper right", bbox_to_anchor=(0.3,1), bbox_transform=ax1.transAxes,fontsize = 12) - # plt.title("Inundated Area ( 1000 cell)", fontsize = 15) + # fig.legend(loc="upper right", bbox_to_anchor=(0.3,1), bbox_transform=ax1.transAxes,font_size = 12) + # plt.title("Inundated Area ( 1000 cell)", font_size = 15) plt.rcParams.update({"font.size": 12}) plt.tight_layout() """ @@ -237,14 +237,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=3, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[0,0].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[0, 0].plot( @@ -252,7 +252,7 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=2, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) @@ -287,7 +287,7 @@ ax[0, 0].set_yticks([5000, 10000, 15000, 20000]) # ax[0,0].set_xticks([2,200,400,600,800,1000]) ax[0, 0].set_ylabel("Discharge (m3/s)", fontsize=labelsize) -# ax[0,0].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[0,0].set_xlabel("Return Period (year)", font_size = label_size) SubID = 42 ax[0, 1].plot( @@ -295,14 +295,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[0,1].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[0, 1].plot( @@ -310,15 +310,15 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) ax[0, 1].set_yticks([6000, 10000, 15000, 20000]) # ax[0,1].set_xticks([2,200,400,600,800,1000]) -# ax[0,1].set_ylabel("Discharge (m3/s)", fontsize = labelsize) -# ax[0,1].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[0,1].set_ylabel("Discharge (m3/s)", font_size = label_size) +# ax[0,1].set_xlabel("Return Period (year)", font_size = label_size) # start, end = ax[0,1].get_ylim() # ax[0,1].yaxis.set_ticks(np.linspace(start,end,4)) @@ -358,14 +358,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[0,2].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[0, 2].plot( @@ -373,15 +373,15 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) ax[0, 2].set_yticks([6000, 10000, 15000, 20000]) # ax[0,2].set_xticks([2,200,400,600,800,1000]) -# ax[0,2].set_ylabel("Discharge (m3/s)", fontsize = labelsize) -# ax[0,2].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[0,2].set_ylabel("Discharge (m3/s)", font_size = label_size) +# ax[0,2].set_xlabel("Return Period (year)", font_size = label_size) # points if PlotPoint: fromd = Qgauges.loc[Qgauges["SubID"] == SubID, "start"].tolist()[0] @@ -418,14 +418,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[1,0].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[1, 0].plot( @@ -433,13 +433,13 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) ax[1, 0].set_ylabel("Discharge (m3/s)", fontsize=labelsize) -# ax[1,0].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[1,0].set_xlabel("Return Period (year)", font_size = label_size) ax[1, 0].set_yticks([4000, 8000, 12000, 16000]) # ax[1,0].set_xticks([2,200,400,600,800,1000]) @@ -478,14 +478,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[1,1].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[1, 1].plot( @@ -493,13 +493,13 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) -# ax[1,1].set_ylabel("Discharge (m3/s)", fontsize = labelsize) -# ax[1,1].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[1,1].set_ylabel("Discharge (m3/s)", font_size = label_size) +# ax[1,1].set_xlabel("Return Period (year)", font_size = label_size) ax[1, 1].set_yticks([3000, 5500, 7500, 10000]) # ax[1,1].set_xticks([2,200,400,600,800,1000]) @@ -538,14 +538,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[1,2].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[1, 2].plot( @@ -553,13 +553,13 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) -# ax[1,2].set_ylabel("Discharge (m3/s)", fontsize = labelsize) -# ax[1,2].set_xlabel("Return Period (year)", fontsize = labelsize) +# ax[1,2].set_ylabel("Discharge (m3/s)", font_size = label_size) +# ax[1,2].set_xlabel("Return Period (year)", font_size = label_size) ax[1, 2].set_yticks([1500, 3500, 5500, 8000]) # ax[1,2].set_xticks([2,200,400,600,800,1000]) @@ -598,14 +598,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[2,0].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[2, 0].plot( @@ -613,7 +613,7 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) @@ -658,14 +658,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[2,1].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[2, 1].plot( @@ -673,13 +673,13 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) ax[2, 1].set_yticks([750, 1500, 2250, 3000]) # ax[2,1].set_xticks([2,200,400,600,800,1000]) -# ax[2,1].set_ylabel("Discharge (m3/s)", fontsize = labelsize) +# ax[2,1].set_ylabel("Discharge (m3/s)", font_size = label_size) ax[2, 1].set_xlabel("Return Period (year)", fontsize=labelsize) # points @@ -717,14 +717,14 @@ Rhine_obs.Qrp.loc[SubID, :].values.tolist(), zorder=5, color=color3, - linestyle=V.LineStyle(0), + linestyle=V.getLineStyle(0), linewidth=linewidth, label="Observed", ) # ax[2,2].plot(River1.Qrp.columns.tolist(),River1.Qrp.loc[SubID,:].values.tolist(), # zorder=1, color = color1 , linestyle = V.LineStyle(6), -# linewidth = linewidth, label = "RIM1.0") +# line_width = line_width, label = "RIM1.0") ax[2, 2].plot( @@ -732,14 +732,14 @@ Rhine.Qrp.loc[SubID, :].values.tolist(), zorder=1, color=color2, - linestyle=V.LineStyle(9), + linestyle=V.getLineStyle(9), linewidth=linewidth, label="HM", ) ax[2, 2].set_yticks([200, 450, 700, 950]) # ax[2,2].set_xticks([2,200,400,600,800,1000]) -# ax[2,2].set_ylabel("Discharge (m3/s)", fontsize = labelsize) +# ax[2,2].set_ylabel("Discharge (m3/s)", font_size = label_size) ax[2, 2].set_xlabel("Return Period (year)", fontsize=labelsize) # points if PlotPoint: diff --git a/examples/hydrodynamic-models/calibration/01CalibrationDataPreparation.py b/examples/hydrodynamic-models/calibration/01CalibrationDataPreparation.py index 4fb02160..7f0897e2 100644 --- a/examples/hydrodynamic-models/calibration/01CalibrationDataPreparation.py +++ b/examples/hydrodynamic-models/calibration/01CalibrationDataPreparation.py @@ -5,69 +5,30 @@ The code also extract the simulated hydrograph using swim at the down stream node to the sub-basins - -Inputs: - 1- DataPath : - [string] path to the folder where the calibration folders ("GRDC","RIM", - "SWIM") the GRDC data, Trace file, RIMSubBasinFile exist - 2- GRDC data: - 1- GRDC_file: - [string] .dat file contains time series of daily discharge data - with ID associated with each one - 2-processed_data_file: - [string] Excel file contains station names with column "MessID" - having the an ID and anothe column "SWIMBasinID" having the - Sub-basin ID - - 3- Trace file: - [string] text file contains two columns the first in the sub-basin ID and - the second is the ID of the down stream computational swim node - - 4- RIMSubBasinFile: - [string] text file contains the Sub-basin IDs which is routed using RIM - - -files names have to contain the extension (".txt", ".xls",".xlxs") - -Outputs: - 1- "GRDC","RIM","SWIM" : - txt file for each sub-basin with the name as the subID in three folders - "GRDC","RIM","SWIM" - 2- "calibratedSubs.txt" : - text file contains the subID of the sub-basins which have GRDC data and - those are the sub-basins that the code collects their data in the - previous three folders """ -#%% Libraries import os import matplotlib matplotlib.use("TkAgg") -# Comp = "F:/02Case studies/Rhine" -Comp = r"C:\gdrive\Case-studies\ClimXtreme\rim_base_data\setup" +rpath = r"C:\gdrive\\Case-studies" +Comp = rf"{rpath}\ClimXtreme\rim_base_data\setup" os.chdir(Comp + "/base_data/calibration_results") import Hapi.hm.calibration as RC import Hapi.hm.river as R -#%% Links -# RIM files -RIMResultsPath = Comp + "/base_data/calibration_results/all_results/rhine/" -RRMPath = Comp + "/base_data/mHM/" - -DataPath = Comp + "/base_data/calibration_results/" -RIMdata = Comp + "/base_data/calibrated_cross_sections/rhine/" -SaveQ = DataPath + "/gauge_results/Discharge/" -SaveWL = DataPath + "/gauge_results/Water_Level/" -GaugesPath = "F:/RFM/ClimXtreme/data" +#%% Hydraulic model files +hm_results_path = ( + rf"{rpath}\ClimXtreme\rim_base_data\setup\freq_analysis_rhine\1\results\1d" +) +hm_data = f"{Comp}/base_data/calibrated_cross_sections/rhine/" -addHQ2 = False -SaveTo = Comp + "/base_data/calibration/" +base_dir = rf"{rpath}\ClimXtreme\rim_base_data\setup\freq_analysis_rhine\1\results\gauges_results" +save_q = f"{base_dir}/q/" +save_wl = f"{base_dir}/wl/" #%% Gauges data - -GaugesF = GaugesPath + "/gauges/rhine_gauges.geojson" -WLGaugesPath = GaugesPath + "/gauges/Water_Levels/" -QgaugesPath = GaugesPath + "/gauges/discharge/" +gauges_dir = f"{rpath}/ClimXtreme/data" +GaugesF = f"{gauges_dir}/gauges/rhine_gauges.geojson" novalue = -9 start = "1951-01-01" @@ -75,24 +36,24 @@ Calib = RC.Calibration("RIM", version=3) Calib.readGaugesTable(GaugesF) -Calib.readObservedQ(QgaugesPath, start, end, novalue) -Calib.readObservedWL(WLGaugesPath, start, end, novalue) #%% start = "1955-1-1" rrmstart = "1955-1-1" - River = R.River("RIM", version=3, start=start, rrmstart=rrmstart) -River.onedresultpath = RIMResultsPath -River.readSlope(RIMdata + "/slope_rhine.csv") -River.readXS(RIMdata + "/xs_rhine.csv") -# River.RiverNetwork(RIMdata + "/rivernetwork.txt") +River.onedresultpath = hm_results_path +River.readSlope(hm_data + "/slope_rhine.csv") +River.readXS(hm_data + "/xs_rhine.csv") #%% column = "oid" segments = list(set(Calib.hm_gauges["id"])) -for i in range(len(segments)): + +for i in range(20, len(segments)): SubID = segments[i] - Sub = R.Sub(SubID, River) - Sub.read1DResult() + if not os.path.exists(f"{hm_results_path}/{SubID}.zip"): + print(f"{hm_results_path}/{SubID}.zip file does not exist") + continue + Sub = R.Reach(SubID, River) + Sub.read1DResult(path=hm_results_path, extension=".zip") # get the gauges that are in the segment Gauges = Calib.hm_gauges.loc[Calib.hm_gauges["id"] == SubID, :] Gauges.index = range(len(Gauges)) @@ -106,21 +67,17 @@ # Extract Results at the gauges Sub.read1DResult(xsid=GagueXS) print("Extract the XS results - " + str(fname)) - # Q = Sub.XSHydrographs[GagueXS].to_frame()#.resample('D').mean() - Q = Sub.XSHydrographs[GagueXS].resample("D").mean().to_frame() + Q = Sub.xs_hydrograph[GagueXS].resample("D").mean().to_frame() Q["date"] = ["'" + str(i)[:10] + "'" for i in Q.index] Q = Q.loc[:, ["date", GagueXS]] - WL = Sub.XSWaterLevel[GagueXS].resample("D").mean().to_frame() + WL = Sub.xs_water_level[GagueXS].resample("D").mean().to_frame() WL["date"] = Q["date"] WL = WL.loc[:, ["date", GagueXS]] Q.to_csv( - SaveQ + str(fname) + ".txt", - index=False, - index_label="Date", - float_format="%.3f", + f"{save_q}{fname}.txt", index=False, index_label="Date", float_format="%.3f" ) WL.to_csv( - SaveWL + str(fname) + ".txt", + f"{save_wl}{fname}.txt", index=False, index_label="Date", float_format="%.3f", diff --git a/examples/hydrodynamic-models/calibration/04CalibrateDike.py b/examples/hydrodynamic-models/calibration/04CalibrateDike.py index 0ffd10f9..52d46228 100644 --- a/examples/hydrodynamic-models/calibration/04CalibrateDike.py +++ b/examples/hydrodynamic-models/calibration/04CalibrateDike.py @@ -50,13 +50,13 @@ River.getCapacity("Qc2", Option=2) River.calibrateDike("RP", "QcRP") -River.crosssections["ZlDiff"] = ( - River.crosssections["zlnew"].values - River.crosssections["zl"].values +River.cross_sections["ZlDiff"] = ( + River.cross_sections["zlnew"].values - River.cross_sections["zl"].values ) -River.crosssections["ZrDiff"] = ( - River.crosssections["zrnew"].values - River.crosssections["zr"].values +River.cross_sections["ZrDiff"] = ( + River.cross_sections["zrnew"].values - River.cross_sections["zr"].values ) -# River.crosssections.to_csv(RIM2Files+"XS100.csv", index = None) +# River.cross_sections.to_csv(RIM2Files+"XS100.csv", index = None) #%% # read the overtopping files @@ -65,18 +65,18 @@ Event = E.Event("RIM2.0") Event.overtopping(wpath + "/processing/" + "overtopping.txt") # get the end days of each event -Event.GetAllEvents() +Event.getAllEvents() -River.EventIndex = Event.EventIndex +River.EventIndex = Event.event_index # read the left and right overtopping 1D results River.overtopping() XSleft = list() XSright = list() -print("No of Events = " + str(len(Event.EndDays))) -for i in range(len(Event.EndDays)): +print("No of Events = " + str(len(Event.end_days))) +for i in range(len(Event.end_days)): # get the cross sectin that was overtopped for a specific day - XSlefti, XSrighti = River.getOvertoppedXS(Event.EndDays[i], True) + XSlefti, XSrighti = River.getOvertoppedXS(Event.end_days[i], True) XSleft = XSleft + XSlefti XSright = XSright + XSrighti @@ -87,16 +87,16 @@ # raise the left dike of the overtopped cross section by 0.5 meter for i in XSleft: # print(i) - # print(River.crosssections.loc[i-1,'xsid']) - River.crosssections.loc[i - 1, "zl"] = River.crosssections.loc[i - 1, "zl"] + 0.5 + # print(River.cross_sections.loc[i-1,'xsid']) + River.cross_sections.loc[i - 1, "zl"] = River.cross_sections.loc[i - 1, "zl"] + 0.5 for i in XSright: # print(i) - # print(River.crosssections.loc[i-1,'xsid']) - River.crosssections.loc[i - 1, "zr"] = River.crosssections.loc[i - 1, "zr"] + 0.5 + # print(River.cross_sections.loc[i-1,'xsid']) + River.cross_sections.loc[i - 1, "zr"] = River.cross_sections.loc[i - 1, "zr"] + 0.5 # get the subs that was inundated # floodedSubs = River1.GetFloodedSubs(OvertoppedXS = XSleft + XSright) #%% Save the new cross section file -River.crosssections.to_csv(newxsPath, index=None) +River.cross_sections.to_csv(newxsPath, index=None) diff --git a/examples/hydrodynamic-models/calibration/Error.py b/examples/hydrodynamic-models/calibration/Error.py index 9fb5071c..82964d9d 100644 --- a/examples/hydrodynamic-models/calibration/Error.py +++ b/examples/hydrodynamic-models/calibration/Error.py @@ -2,7 +2,7 @@ - To run this code you have to prepare the calibration data first in a specific format and folder structure and to do that you have to run -the code 01CalibrationDataPreparation.py"" +the code 01calibration_data_preparation.py"" """ import datetime as dt @@ -105,15 +105,15 @@ # pos = max(SimMax, ObsMax) # plt.annotate("SubID = " + str(int(Calib.q_hm.columns[i])), xy=(dt.datetime(1971,1,1),pos-10), -# fontsize = 20) +# font_size = 20) # plt.annotate("RMSE = " + str(round(Calib.MetricsHM_RRM.loc[Calib.rrm_gauges[i],'rmse'],2)), xy=(dt.datetime(1971,1,1),pos-40), -# fontsize = 15) +# font_size = 15) # plt.annotate("KGE = " + str(round(Calib.MetricsHM_RRM.loc[Calib.rrm_gauges[i],'KGE'],2)), xy=(dt.datetime(1971,1,1),pos-70), -# fontsize = 15) +# font_size = 15) # plt.annotate("NSE = " + str(round(Calib.MetricsHM_RRM.loc[Calib.rrm_gauges[i],'NSE'],2)), xy=(dt.datetime(1971,1,1),pos-100), -# fontsize = 15) +# font_size = 15) # plt.annotate("WB = " + str(round(Calib.MetricsHM_RRM.loc[Calib.rrm_gauges[i],'WB'],2)), xy=(dt.datetime(1971,1,1),pos-130), -# fontsize = 15) +# font_size = 15) plt.xlabel("Time", fontsize=15) plt.ylabel("Discharge m3/s", fontsize=15) plt.xticks(fontsize=10) @@ -121,7 +121,7 @@ plt.legend(fontsize=15, framealpha=0.1) plt.title("Station: " + str(subid), fontsize=30) plt.tight_layout() -# plt.savefig(str(subid)+".tif", transparent=True) +# plt.savefig(str(reach_id)+".tif", transparent=True) #%% save Metrics dataframe to display in arc map -Calib.SaveMetices(SaveTo) +Calib.saveMetices(SaveTo) # sumarry.to_csv(DataPath + "summary.txt") diff --git a/examples/hydrodynamic-models/calibration/Manual_calibration.py b/examples/hydrodynamic-models/calibration/Manual_calibration.py index a31550d1..8e389a5f 100644 --- a/examples/hydrodynamic-models/calibration/Manual_calibration.py +++ b/examples/hydrodynamic-models/calibration/Manual_calibration.py @@ -37,7 +37,7 @@ ## result files onedresultpath = rpath + "/results/1d/" usbcpath = rpath + "/results/USbnd/" -# oneminresultpath = rpath + "/results/" +# one_min_result_path = rpath + "/results/" customized_runs_path = rpath + "/results/customized_results/" rrmpath = rpath + "/inputs/rrm/hm_location" twodresultpath = rpath + "/results/2d/zip/" @@ -82,9 +82,9 @@ # read the data of the river """the hourly results""" River.onedresultpath = onedresultpath -River.usbcpath = usbcpath +River.us_bc_path = usbcpath """the 1min results if exist""" -# River.oneminresultpath = oneminresultpath +# River.one_min_result_path = one_min_result_path """river slope, cross-sections, and river network""" River.readSlope(river_slope) River.readXS(xs_file) @@ -94,8 +94,8 @@ """ the results of the rain-runoff model""" River.rrmpath = rrmpath """2D model results""" -# River.twodresultpath = twodresultpath -# River.Compressed = True +# River.two_d_result_path = two_d_result_path +# River.compressed = True # %% Interface # The interface between the rainfall-runoff model and the hydraulic model IF = Interface("Rhine", start=start) @@ -138,7 +138,7 @@ print(print(gauges)) except KeyError: print("No gauge - choose another gauge to compare") - gaugexs = Sub.lastxs + gaugexs = Sub.last_xs segment_xs = str(SubID) + "_" + str(gaugexs) # get the gauge of the upstream segment """ write the segment id you want to get its data""" @@ -194,8 +194,8 @@ # Specific XS specificxs = False -start = str(Sub.firstday)[:-9] -end = str(Sub.lastday)[:-9] +start = str(Sub.first_day)[:-9] +end = str(Sub.last_day)[:-9] fig, ax = Sub.plotQ( Calib, @@ -241,7 +241,7 @@ # str(dt.datetime.now())[0:11] + ".png") # %% Hydrograph progression in a segment xss = [] -start = str(Sub.firstday)[:-9] +start = str(Sub.first_day)[:-9] end = "1955-03-01" fromxs = "" toxs = "" @@ -261,8 +261,8 @@ # str(gauges.loc[gaugei, 'name']) + # str(dt.datetime.now())[0:11] + ".png") # %% Water Level -start = str(Sub.firstday.date()) -end = str(Sub.lastday.date()) +start = str(Sub.first_day.date()) +end = str(Sub.last_day.date()) Sub.plotWL(Calib, start, end, gaugexs, stationname, gaugename, plotgauge=True) @@ -284,17 +284,17 @@ # %% calibration (the bed level change the levels) # NoSegments = 1 # read theoriginal slope and XS files -Calib.crosssections = River.crosssections +Calib.cross_sections = River.cross_sections Calib.slope = River.slope BedlevelDS = 88 Manning = 0.06 BC_slope = -0.03 Calib.calculateProfile(SubID, BedlevelDS, Manning, BC_slope) -# River.crosssections.to_csv(tolopogy_file + "/xs_rhine2.csv", index=False, float_format="%.3f") +# River.cross_sections.to_csv(tolopogy_file + "/xs_rhine2.csv", index=False, float_format="%.3f") # River.slope.to_csv(tolopogy_file + "/slope2.csv",header=None,index=False) #%% Smooth cross section -Calib.crosssections = River.crosssections[:] +Calib.cross_sections = River.cross_sections[:] Calib.smoothMaxSlope(SubID) Calib.smoothBedLevel(SubID) Calib.downWardBedLevel(SubID, 0.05) @@ -302,7 +302,7 @@ # Calib.SmoothFloodplainHeight(SubID) Calib.smoothBedWidth(SubID) # Calib.CheckFloodplain() -# Calib.crosssections.to_csv(tolopogy_file + "/XS2.csv", index=None, float_format="%.3f") +# Calib.cross_sections.to_csv(tolopogy_file + "/XS2.csv", index=None, float_format="%.3f") #%% customized Run result saveing # the last cross section results to use it in calibration """ @@ -313,32 +313,32 @@ you have to un comment the following two lines """ # Path = wpath + "/results/customized_results/" -Sub.saveHydrograph(Sub.lastxs) # Path +Sub.saveHydrograph(Sub.last_xs) # Path # %% Filters """ check the max sf """ ## calculate the water surface difference -# wl = Reach.Result1D.loc[Reach.Result1D.index[i],'wl'] +# wl = Reach.results_1d.loc[Reach.results_1d.index[i],'wl'] sf = [ ( - Sub.Result1D.loc[Sub.Result1D.index[i], "wl"] - - Sub.Result1D.loc[Sub.Result1D.index[i + 1], "wl"] + Sub.results_1d.loc[Sub.results_1d.index[i], "wl"] + - Sub.results_1d.loc[Sub.results_1d.index[i + 1], "wl"] ) / 500 - for i in range(len(Sub.Result1D.index) - 1) + for i in range(len(Sub.results_1d.index) - 1) ] sf = sf + [np.mean(sf)] -Sub.Result1D["sf"] = sf +Sub.results_1d["sf"] = sf -print(Sub.Result1D[Sub.Result1D["sf"] == Sub.Result1D["sf"].max()]) -print(Sub.Result1D[Sub.Result1D["sf"] == Sub.Result1D["sf"].min()]) +print(Sub.results_1d[Sub.results_1d["sf"] == Sub.results_1d["sf"].max()]) +print(Sub.results_1d[Sub.results_1d["sf"] == Sub.results_1d["sf"].min()]) """some filter to get where the min depth (dryness limit)""" -# dataX = Reach.Result1D[Reach.Result1D['xs'] == 700] -dataX = Sub.Result1D[Sub.Result1D["h"] == 0.01] -# dataX = Reach.Result1D[Reach.Result1D['xs'] == 121] +# dataX = Reach.results_1d[Reach.results_1d['xs'] == 700] +dataX = Sub.results_1d[Sub.results_1d["h"] == 0.01] +# dataX = Reach.results_1d[Reach.results_1d['xs'] == 121] #%% get the boundary conditions start = "1955-01-01" end = "1955-03-21" @@ -350,24 +350,24 @@ Vis = V(resolution="Hourly") -Vis.GroundSurface( +Vis.plotGroundSurface( Sub, floodplain=True, - plotlateral=True, - nxlabels=20, - fromxs=fromxs, - toxs=toxs, + plot_lateral=True, + xlabels_number=20, + from_xs=fromxs, + to_xs=toxs, option=2, ) #%% cross-sections -fig, ax = Vis.CrossSections( +fig, ax = Vis.plotCrossSections( Sub, bedlevel=True, - fromxs=fromxs, - toxs=toxs, - samescale=True, - textspacing=[(1, 1), (1, 4)], - plottingoption=3, + from_xs=fromxs, + to_xs=toxs, + same_scale=True, + text_spacing=[(1, 1), (1, 4)], + plotting_option=3, ) #%% Animation """ periods of water level exceeds the bankful depth""" @@ -381,11 +381,11 @@ start, end, fps=2, - nxlabels=5, - fromxs=fromxs, - toxs=toxs, - xaxislabelsize=10, - textlocation=(-1, -2), + xlabels_number=5, + from_xs=fromxs, + to_xs=toxs, + x_axis_label_size=10, + text_location=(-1, -2), repeat=True, ) plt.close() @@ -401,7 +401,7 @@ start = "1955-01-01" end = "1955-01-10" -Sub.readSubDailyResults(start, end, Lastsegment=True) +Sub.readSubDailyResults(start, end, last_river_reach=True) #%% # negative values # TODO : check CheckNegativeQ makes problem @@ -419,8 +419,8 @@ start, end, interval=0.000000000000000000000000000000000001, - fromxs=fromxs, - toxs=toxs, + from_xs=fromxs, + to_xs=toxs, ) #%% Q for all XS """ @@ -469,35 +469,37 @@ # table_new['logQ'] = np.log10(table_new ['Q']) # table_new['logH'] = np.log10(table_new ['depth']) -dbf = Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] -b = Sub.crosssections["b"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] +dbf = Sub.cross_sections["dbf"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] +b = Sub.cross_sections["b"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] Abf = dbf * b Pbf = b + 2 * dbf # Qdbf = (1.0/0.03)*(Abf *((Abf/Pbf)**(2.0/3.0)))*((0.1/500)**0.5) plt.figure(50, figsize=(15, 8)) -# plt.plot(table_new['area_T'],table_new['depth'], label = 'Area_T', linewidth = 5) -# plt.plot(table_new['area_U'],table_new['depth'], label = 'Area_U', linewidth = 5) -# plt.plot(table_new['area_L'],table_new['depth'], label = 'Area_L', linewidth = 5) +# plt.plot(table_new['area_T'],table_new['depth'], label = 'Area_T', line_width = 5) +# plt.plot(table_new['area_U'],table_new['depth'], label = 'Area_U', line_width = 5) +# plt.plot(table_new['area_L'],table_new['depth'], label = 'Area_L', line_width = 5) plt.plot(table_new["perimeter_T"], table_new["depth"], label="Perimeter_T", linewidth=5) plt.plot(table_new["perimeter_U"], table_new["depth"], label="Perimeter_U", linewidth=5) plt.plot(table_new["perimeter_L"], table_new["depth"], label="Perimeter_L", linewidth=5) -# plt.plot(table_new['Q_U'],table_new['depth'], label = 'Q_U', linewidth = 5) -# plt.plot(table_new['Q_L'],table_new['depth'], label = 'Q_L', linewidth = 5) -# plt.plot(table_new['Q_T'],table_new['depth'], label = 'Q_T', linewidth = 5) +# plt.plot(table_new['Q_U'],table_new['depth'], label = 'Q_U', line_width = 5) +# plt.plot(table_new['Q_L'],table_new['depth'], label = 'Q_L', line_width = 5) +# plt.plot(table_new['Q_T'],table_new['depth'], label = 'Q_T', line_width = 5) -# plt.plot(table['logQ'],table['logH'], label = 'Area', linewidth = 5) +# plt.plot(table['logQ'],table['logH'], label = 'Area', line_width = 5) plt.ylabel("Depth (m)", fontsize=20) plt.ylim([0, 8]) plt.xlim([0, table_new["Q_T"].loc[table_new["depth"] == 8].values[0] + 5]) plt.hlines( - Sub.crosssections["dbf"].loc[Sub.crosssections["xsid"] == Sub.xsname[0]].values[0], + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] + .values[0], 0, table_new["area_T"].loc[table_new["depth"] == 5].values[0], linewidth=5, @@ -505,14 +507,14 @@ plt.annotate( "Dbf = " + str( - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] ), xy=( table_new["perimeter_T"].loc[table_new["depth"] == 5].values[0] - 80, - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] + 0.2, ), @@ -536,8 +538,8 @@ table["logQ"] = np.log10(table["Q"]) table["logH"] = np.log10(table["depth"]) -dbf = Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] -b = Sub.crosssections["b"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] +dbf = Sub.cross_sections["dbf"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] +b = Sub.cross_sections["b"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] Abf = dbf * b Pbf = b + 2 * dbf Qdbf = (1.0 / 0.03) * (Abf * ((Abf / Pbf) ** (2.0 / 3.0))) * ((0.1 / 500) ** 0.5) @@ -549,9 +551,9 @@ dbfloc = list( np.where( table["depth"] - <= Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[ - 0 - ] + <= Sub.cross_sections["dbf"][ + Sub.cross_sections["xsid"] == Sub.xs_names[0] + ].values[0] ) )[-1][-1] @@ -564,13 +566,15 @@ plt.plot(table["Q"], table["depth"], label="Q", linewidth=5) plt.plot(table["v"], table["depth"], label="velocity", linewidth=5) -# plt.plot(table['logQ'],table['logH'], label = 'Area', linewidth = 5) +# plt.plot(table['logQ'],table['logH'], label = 'Area', line_width = 5) plt.ylabel("Depth (m)", fontsize=20) plt.ylim([0, 5]) plt.xlim([0, table["perimeter"].loc[table["depth"] == 5].values[0] + 5]) plt.hlines( - Sub.crosssections["dbf"].loc[Sub.crosssections["xsid"] == Sub.xsname[0]].values[0], + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] + .values[0], 0, table["area"].loc[table["depth"] == 5].values[0], linewidth=5, @@ -578,14 +582,14 @@ plt.annotate( "Dbf = " + str( - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] ), xy=( table["perimeter"].loc[table["depth"] == 5].values[0] - 80, - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] + 0.2, ), diff --git a/examples/hydrodynamic-models/calibration/Manual_calibration_compacted.py b/examples/hydrodynamic-models/calibration/Manual_calibration_compacted.py index 8138d403..4b9e73b4 100644 --- a/examples/hydrodynamic-models/calibration/Manual_calibration_compacted.py +++ b/examples/hydrodynamic-models/calibration/Manual_calibration_compacted.py @@ -100,7 +100,7 @@ print(print(gauges)) except KeyError: print("No gauge - choose another gauge to compare") - gaugexs = Sub.lastxs + gaugexs = Sub.last_xs segment_xs = str(SubID) + "_" + str(gaugexs) # get the gauge of the upstream segment """ write the segment id you want to get its data""" @@ -156,8 +156,8 @@ # Specific XS specificxs = False -start = str(Sub.firstday)[:-9] -end = str(Sub.lastday)[:-9] +start = str(Sub.first_day)[:-9] +end = str(Sub.last_day)[:-9] fig, ax = Sub.plotQ( Calib, @@ -203,7 +203,7 @@ # str(dt.datetime.now())[0:11] + ".png") # %% Hydrograph progression in a segment xss = [] -start = str(Sub.firstday)[:-9] +start = str(Sub.first_day)[:-9] end = "1955-03-01" fromxs = "" toxs = "" @@ -223,8 +223,8 @@ # str(gauges.loc[gaugei, 'name']) + # str(dt.datetime.now())[0:11] + ".png") # %% Water Level -start = str(Sub.firstday)[:-9] -end = str(Sub.lastday)[:-9] +start = str(Sub.first_day)[:-9] +end = str(Sub.last_day)[:-9] Sub.plotWL(Calib, start, end, gaugexs, stationname, gaugename, plotgauge=True) @@ -246,17 +246,17 @@ # %% calibration (the bed level change the levels) # NoSegments = 1 # read theoriginal slope and XS files -Calib.crosssections = River.crosssections +Calib.cross_sections = River.cross_sections Calib.slope = River.slope BedlevelDS = 88 Manning = 0.06 BC_slope = -0.03 Calib.calculateProfile(SubID, BedlevelDS, Manning, BC_slope) -# River.crosssections.to_csv(RIM2Files + "/xs_rhine2.csv", index=False, float_format="%.3f") +# River.cross_sections.to_csv(RIM2Files + "/xs_rhine2.csv", index=False, float_format="%.3f") # River.slope.to_csv(RIM2Files + "/slope2.csv",header=None,index=False) #%% Smooth cross section -Calib.crosssections = River.crosssections[:] +Calib.cross_sections = River.cross_sections[:] Calib.smoothMaxSlope(SubID) Calib.smoothBedLevel(SubID) Calib.downWardBedLevel(SubID, 0.05) @@ -264,7 +264,7 @@ # Calib.SmoothFloodplainHeight(SubID) Calib.smoothBedWidth(SubID) # Calib.CheckFloodplain() -# Calib.crosssections.to_csv(RIM2Files + "/XS2.csv", index=None, float_format="%.3f") +# Calib.cross_sections.to_csv(RIM2Files + "/XS2.csv", index=None, float_format="%.3f") #%% customized Run result saveing # the last cross section results to use it in calibration """ @@ -275,32 +275,32 @@ you have to un comment the following two lines """ # Path = wpath + "/results/customized_results/" -Sub.saveHydrograph(Sub.lastxs) # Path +Sub.saveHydrograph(Sub.last_xs) # Path # %% Filters """ check the max sf """ ## calculate the water surface difference -# wl = Reach.Result1D.loc[Reach.Result1D.index[i],'wl'] +# wl = Reach.results_1d.loc[Reach.results_1d.index[i],'wl'] sf = [ ( - Sub.Result1D.loc[Sub.Result1D.index[i], "wl"] - - Sub.Result1D.loc[Sub.Result1D.index[i + 1], "wl"] + Sub.results_1d.loc[Sub.results_1d.index[i], "wl"] + - Sub.results_1d.loc[Sub.results_1d.index[i + 1], "wl"] ) / 500 - for i in range(len(Sub.Result1D.index) - 1) + for i in range(len(Sub.results_1d.index) - 1) ] sf = sf + [np.mean(sf)] -Sub.Result1D["sf"] = sf +Sub.results_1d["sf"] = sf -print(Sub.Result1D[Sub.Result1D["sf"] == Sub.Result1D["sf"].max()]) -print(Sub.Result1D[Sub.Result1D["sf"] == Sub.Result1D["sf"].min()]) +print(Sub.results_1d[Sub.results_1d["sf"] == Sub.results_1d["sf"].max()]) +print(Sub.results_1d[Sub.results_1d["sf"] == Sub.results_1d["sf"].min()]) """some filter to get where the min depth (dryness limit)""" -# dataX = Reach.Result1D[Reach.Result1D['xs'] == 700] -dataX = Sub.Result1D[Sub.Result1D["h"] == 0.01] -# dataX = Reach.Result1D[Reach.Result1D['xs'] == 121] +# dataX = Reach.results_1d[Reach.results_1d['xs'] == 700] +dataX = Sub.results_1d[Sub.results_1d["h"] == 0.01] +# dataX = Reach.results_1d[Reach.results_1d['xs'] == 121] #%% get the boundary conditions start = "1955-01-01" end = "1955-03-21" @@ -312,24 +312,24 @@ Vis = V(resolution="Hourly") -Vis.GroundSurface( +Vis.plotGroundSurface( Sub, floodplain=True, - plotlateral=True, - nxlabels=20, - fromxs=fromxs, - toxs=toxs, + plot_lateral=True, + xlabels_number=20, + from_xs=fromxs, + to_xs=toxs, option=2, ) #%% cross-sections -fig, ax = Vis.CrossSections( +fig, ax = Vis.plotCrossSections( Sub, bedlevel=True, - fromxs=fromxs, - toxs=toxs, - samescale=True, - textspacing=[(1, 1), (1, 4)], - plottingoption=3, + from_xs=fromxs, + to_xs=toxs, + same_scale=True, + text_spacing=[(1, 1), (1, 4)], + plotting_option=3, ) #%% Animation """ periods of water level exceeds the bankful depth""" @@ -343,11 +343,11 @@ start, end, fps=2, - nxlabels=5, - fromxs=fromxs, - toxs=toxs, - xaxislabelsize=10, - textlocation=(-1, -2), + xlabels_number=5, + from_xs=fromxs, + to_xs=toxs, + x_axis_label_size=10, + text_location=(-1, -2), repeat=True, ) plt.close() @@ -363,7 +363,7 @@ start = "1955-01-01" end = "1955-01-10" -Sub.readSubDailyResults(start, end, Lastsegment=True) +Sub.readSubDailyResults(start, end, last_river_reach=True) #%% # negative values # TODO : check CheckNegativeQ makes problem @@ -381,8 +381,8 @@ start, end, interval=0.000000000000000000000000000000000001, - fromxs=fromxs, - toxs=toxs, + from_xs=fromxs, + to_xs=toxs, ) #%% Q for all XS """ @@ -431,35 +431,37 @@ # table_new['logQ'] = np.log10(table_new ['Q']) # table_new['logH'] = np.log10(table_new ['depth']) -dbf = Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] -b = Sub.crosssections["b"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] +dbf = Sub.cross_sections["dbf"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] +b = Sub.cross_sections["b"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] Abf = dbf * b Pbf = b + 2 * dbf # Qdbf = (1.0/0.03)*(Abf *((Abf/Pbf)**(2.0/3.0)))*((0.1/500)**0.5) plt.figure(50, figsize=(15, 8)) -# plt.plot(table_new['area_T'],table_new['depth'], label = 'Area_T', linewidth = 5) -# plt.plot(table_new['area_U'],table_new['depth'], label = 'Area_U', linewidth = 5) -# plt.plot(table_new['area_L'],table_new['depth'], label = 'Area_L', linewidth = 5) +# plt.plot(table_new['area_T'],table_new['depth'], label = 'Area_T', line_width = 5) +# plt.plot(table_new['area_U'],table_new['depth'], label = 'Area_U', line_width = 5) +# plt.plot(table_new['area_L'],table_new['depth'], label = 'Area_L', line_width = 5) plt.plot(table_new["perimeter_T"], table_new["depth"], label="Perimeter_T", linewidth=5) plt.plot(table_new["perimeter_U"], table_new["depth"], label="Perimeter_U", linewidth=5) plt.plot(table_new["perimeter_L"], table_new["depth"], label="Perimeter_L", linewidth=5) -# plt.plot(table_new['Q_U'],table_new['depth'], label = 'Q_U', linewidth = 5) -# plt.plot(table_new['Q_L'],table_new['depth'], label = 'Q_L', linewidth = 5) -# plt.plot(table_new['Q_T'],table_new['depth'], label = 'Q_T', linewidth = 5) +# plt.plot(table_new['Q_U'],table_new['depth'], label = 'Q_U', line_width = 5) +# plt.plot(table_new['Q_L'],table_new['depth'], label = 'Q_L', line_width = 5) +# plt.plot(table_new['Q_T'],table_new['depth'], label = 'Q_T', line_width = 5) -# plt.plot(table['logQ'],table['logH'], label = 'Area', linewidth = 5) +# plt.plot(table['logQ'],table['logH'], label = 'Area', line_width = 5) plt.ylabel("Depth (m)", fontsize=20) plt.ylim([0, 8]) plt.xlim([0, table_new["Q_T"].loc[table_new["depth"] == 8].values[0] + 5]) plt.hlines( - Sub.crosssections["dbf"].loc[Sub.crosssections["xsid"] == Sub.xsname[0]].values[0], + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] + .values[0], 0, table_new["area_T"].loc[table_new["depth"] == 5].values[0], linewidth=5, @@ -467,14 +469,14 @@ plt.annotate( "Dbf = " + str( - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] ), xy=( table_new["perimeter_T"].loc[table_new["depth"] == 5].values[0] - 80, - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] + 0.2, ), @@ -498,8 +500,8 @@ table["logQ"] = np.log10(table["Q"]) table["logH"] = np.log10(table["depth"]) -dbf = Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] -b = Sub.crosssections["b"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[0] +dbf = Sub.cross_sections["dbf"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] +b = Sub.cross_sections["b"][Sub.cross_sections["xsid"] == Sub.xs_names[0]].values[0] Abf = dbf * b Pbf = b + 2 * dbf Qdbf = (1.0 / 0.03) * (Abf * ((Abf / Pbf) ** (2.0 / 3.0))) * ((0.1 / 500) ** 0.5) @@ -511,9 +513,9 @@ dbfloc = list( np.where( table["depth"] - <= Sub.crosssections["dbf"][Sub.crosssections["xsid"] == Sub.xsname[0]].values[ - 0 - ] + <= Sub.cross_sections["dbf"][ + Sub.cross_sections["xsid"] == Sub.xs_names[0] + ].values[0] ) )[-1][-1] @@ -526,13 +528,15 @@ plt.plot(table["Q"], table["depth"], label="Q", linewidth=5) plt.plot(table["v"], table["depth"], label="velocity", linewidth=5) -# plt.plot(table['logQ'],table['logH'], label = 'Area', linewidth = 5) +# plt.plot(table['logQ'],table['logH'], label = 'Area', line_width = 5) plt.ylabel("Depth (m)", fontsize=20) plt.ylim([0, 5]) plt.xlim([0, table["perimeter"].loc[table["depth"] == 5].values[0] + 5]) plt.hlines( - Sub.crosssections["dbf"].loc[Sub.crosssections["xsid"] == Sub.xsname[0]].values[0], + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] + .values[0], 0, table["area"].loc[table["depth"] == 5].values[0], linewidth=5, @@ -540,14 +544,14 @@ plt.annotate( "Dbf = " + str( - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] ), xy=( table["perimeter"].loc[table["depth"] == 5].values[0] - 80, - Sub.crosssections["dbf"] - .loc[Sub.crosssections["xsid"] == Sub.xsname[0]] + Sub.cross_sections["dbf"] + .loc[Sub.cross_sections["xsid"] == Sub.xs_names[0]] .values[0] + 0.2, ), diff --git a/examples/hydrodynamic-models/kinematic wave.py b/examples/hydrodynamic-models/kinematic wave.py index a161e8c9..561e4ba8 100644 --- a/examples/hydrodynamic-models/kinematic wave.py +++ b/examples/hydrodynamic-models/kinematic wave.py @@ -40,7 +40,7 @@ #%% Visualization plotstart = "2010-01-01 00:00:00" plotend = "2010-1-1 05:00:00" -# ffmpegPath = "F:/Users/mofarrag/.matplotlib/ffmpeg-4.4-full_build/bin/ffmpeg.exe" +# ffmpeg_path = "F:/Users/mofarrag/.matplotlib/ffmpeg-4.4-full_build/bin/ffmpeg.exe" anim = Test.animatefloodwave(start=plotstart, end=plotend, interval=0.0000002) #%% save results # Test.SaveResult(path + "/data/hydrodynamic model/" ) diff --git a/examples/hydrodynamic-models/run_model.py b/examples/hydrodynamic-models/run_model.py index 90b9b2b7..08a99a2c 100644 --- a/examples/hydrodynamic-models/run_model.py +++ b/examples/hydrodynamic-models/run_model.py @@ -91,7 +91,7 @@ # get the max discharge Q = Sub.Laterals.loc[step : step + dt.timedelta(days=1), :] # index starts from 1 - Q.loc[:, "US"] = Sub.USHydrographs[step_ind - 1 : step_ind + 1] + Q.loc[:, "US"] = Sub.us_hydrographs[step_ind - 1 : step_ind + 1] if Q.sum(axis=1).values.max() > River.D1["MinQ"]: # interpolate to 1 min resolution diff --git a/examples/hydrological-model/Jiboa-distributed-model-muskingum-lake.py b/examples/hydrological-model/Jiboa-distributed-model-muskingum-lake.py index 3ac416b8..de71781e 100644 --- a/examples/hydrological-model/Jiboa-distributed-model-muskingum-lake.py +++ b/examples/hydrological-model/Jiboa-distributed-model-muskingum-lake.py @@ -157,7 +157,7 @@ ============================================================================= plotDistributedResults(StartDate, EndDate, fmt="%Y-%m-%d", Option = 1, Gauges=False, TicksSpacing = 2, Figsize=(8,8), PlotNumbers=True, - NumSize= 8, Title = 'Total Discharge',titlesize = 15, Backgroundcolorthreshold=None, + NumSize= 8, Title = 'Total Discharge',title_size = 15, Backgroundcolorthreshold=None, cbarlabel = 'Discharge m3/s', cbarlabelsize = 12, textcolors=("white","black"), Cbarlength = 0.75, Interval = 200,cmap='coolwarm_r', Textloc=[0.1,0.2], Gaugecolor='red',Gaugesize=100, ColorScale = 1,gamma=1./2.,linthresh=0.0001, @@ -193,7 +193,7 @@ size of the numbers plotted intop of each cells. The default is 8. Title : [str], optional title of the plot. The default is 'Total Discharge'. -titlesize : [integer], optional +title_size : [integer], optional title size. The default is 15. Backgroundcolorthreshold : [float/integer], optional threshold value if the value of the cell is greater, the plotted diff --git a/examples/hydrological-model/Prepare Input Data/00-Coello-data_download .py b/examples/hydrological-model/Prepare Input Data/00-Coello-data_download.py similarity index 62% rename from examples/hydrological-model/Prepare Input Data/00-Coello-data_download .py rename to examples/hydrological-model/Prepare Input Data/00-Coello-data_download.py index 9888d218..f0999ef2 100644 --- a/examples/hydrological-model/Prepare Input Data/00-Coello-data_download .py +++ b/examples/hydrological-model/Prepare Input Data/00-Coello-data_download.py @@ -4,16 +4,18 @@ install and use earth2observe package https://github.com/MAfarrag/earth2observe """ -from earth2observe.chirps import CHIRPS -from earth2observe.ecmwf import ECMWF, Variables +from earth2observe.chirps import CHIRPS, Catalog +from earth2observe.earth2observe import Earth2Observe +from earth2observe.ecmwf import Catalog root_path = "C:/MyComputer/01Algorithms/Hydrology/Hapi/" # %% Basin data -StartDate = "2009-01-01" -EndDate = "2009-02-01" -time = "daily" +start = "2009-01-01" +end = "2009-02-01" +temporal_resolution = "daily" latlim = [4.190755, 4.643963] lonlim = [-75.649243, -74.727286] + # make sure to provide a full path not relative path # please replace the following root_path to the repo main directory in your machine path = root_path + "examples/data/satellite_data/" @@ -21,41 +23,42 @@ """ check the ECMWF variable names that you have to provide to the RemoteSensing object """ -Vars = Variables("daily") -Vars.__str__() -# %% ECMWF +var = "T" +catalog = Catalog() +print(catalog.catalog) +catalog.get_variable(var) +# %% pripitation data from ecmwf """ provide the time period, temporal resolution, extent and variables of your interest """ -start = "2009-01-01" -end = "2009-01-10" -ts = "daily" -latlim = [4.190755, 4.643963] -lonlim = [-75.649243, -74.727286] # Temperature, Evapotranspiration variables = ["T", "E"] - -Coello = ECMWF( - time=time, +source = "ecmwf" +e2o = Earth2Observe( + data_source=source, start=start, end=end, + variables=variables, lat_lim=latlim, lon_lim=lonlim, + temporal_resolution=temporal_resolution, path=path, - variables=variables, ) -Coello.download() +e2o.download() # %% CHRIPS -Coello = CHIRPS( - StartDate=StartDate, - EndDate=EndDate, - Time=time, - latlim=latlim, - lonlim=lonlim, - Path=path, +variables = ["precipitation"] +e2o = Earth2Observe( + data_source=source, + start=start, + end=end, + variables=variables, + lat_lim=latlim, + lon_lim=lonlim, + temporal_resolution=temporal_resolution, + path=path, ) -Coello.Download() +e2o.download() # %% """ if you want to use parallel downloads using multi cores, enter the number of @@ -63,14 +66,4 @@ PS. the multi-coredownload does not have an indication bar """ -cores = 4 - -Coello = CHIRPS( - StartDate=StartDate, - EndDate=EndDate, - Time=time, - latlim=latlim, - lonlim=lonlim, - Path=path, -) -Coello.Download(cores=cores) +e2o.download(cores=4) diff --git a/examples/hydrological-model/Prepare Input Data/01-Coello-prepare_inputdata.py b/examples/hydrological-model/Prepare Input Data/01-Coello-prepare_inputdata.py index b3aaeef1..1c7c8f88 100644 --- a/examples/hydrological-model/Prepare Input Data/01-Coello-prepare_inputdata.py +++ b/examples/hydrological-model/Prepare Input Data/01-Coello-prepare_inputdata.py @@ -17,18 +17,20 @@ """ dem_path = f"{gis_data_path}/acc4000.tif" + +In = Inputs(dem_path) outputpath = f"{metao_data_path}/meteodata_prepared/" # prec prec_in_path = f"{metao_data_path}/raw_data/prec/" -Inputs.prepareInputs(dem_path, prec_in_path, outputpath + "prec0") +In.prepareInputs(prec_in_path, outputpath + "prec0") # evap evap_in_path = f"{metao_data_path}/raw_data/evap/" -Inputs.prepareInputs(dem_path, evap_in_path, outputpath + "evap0") +In.prepareInputs(evap_in_path, outputpath + "evap0") # temp temp_in_path = f"{metao_data_path}/raw_data/temp/" -Inputs.prepareInputs(dem_path, temp_in_path, outputpath + "temp0") +In.prepareInputs(temp_in_path, outputpath + "temp0") """ in case you want to manipulate the value in all the rasters of one of the inputs diff --git a/examples/plot/plot_array.py b/examples/plot/plot_array.py index 7208691c..3fcc1740 100644 --- a/examples/plot/plot_array.py +++ b/examples/plot/plot_array.py @@ -36,7 +36,7 @@ figure size. The default is (8,8). Title : [str], optional title of the plot. The default is 'Total Discharge'. -titlesize : [integer], optional +title_size : [integer], optional title size. The default is 15. """ Figsize = (8, 8) diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 22b6fb29..00000000 --- a/mkdocs.yml +++ /dev/null @@ -1,26 +0,0 @@ -site_name: Hapi -site_description: The documentation of Hapi Hydrological Model -site_author: Mostafa Farrag - -repo_url: https://github.com/MAfarrag/Hapi -edit_url: "" - -theme: - name: readthedocs - -nav: - - Home: index.md - - Available Models: - - HBV Bergström 1992: HBV.md - - Muskingum Routing: Muskingum.md - - Inputs: - - Meteorological Inputs: Meteo_Inputs.md - - GIS Inputs: GIS_inputs.md - - Parameters: Parameters.md - - Tutorial: - - Build Distributed Model: build_model.md - #- Inputs: CreateInputs.md - - Lumped Hydrological Model: Lumped_HBV.md - - Sensitivity Analysis: Sensitivity_Analysis.md - - GIS: GIS.md - - License: license.md diff --git a/requirements-dev.txt b/requirements-dev.txt index 5c929dd1..35a034ec 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,12 +1,12 @@ black >=22.12.0 darglint >=1.8.1 flake8-bandit >=4.1.1 -flake8-bugbear >=22.12.6 -flake8-docstrings >=1.6.0 +flake8-bugbear >=23.1.20 +flake8-docstrings >=1.7.0 flake8-rst-docstrings >=0.3.0 pep8-naming >=0.13.3 -pre-commit >=2.21.0 +pre-commit >=3.0.3 pre-commit-hooks >=4.4.0 -pytest >=7.2.0 +pytest >=7.2.1 pytest-cov ==4.0.0 reorder-python-imports >=3.9.0 diff --git a/requirements.txt b/requirements.txt index baaaeb4b..a2fa09b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,24 @@ -digitalearth >=0.1.10 -earth2observe >=0.1.7 +cleopatra >=0.2.7 +digitalearth >=0.1.11 +earth2observe >=0.2.2 Fiona >=1.8.21 gdal >=3.5.3 geopandas >=0.12.2 -geostatista >=0.1.5 +geostatista >=0.1.6 +joblib >=1.2.0 loguru >=0.6.0 -matplotlib >=3.5.3 -numpy >=1.23.5 +matplotlib >=3.6.3 +numpy >=1.24.1 Oasis-Optimization >=1.0.2 pandas >=1.4.4 pathlib >=1.0.1 pip >=22.3.1 -pyramids-gis >=0.2.11 +pyramids-gis >=0.3.2 PyYAML >=0.6.0 rasterio >=1.3.2 rasterstats >=0.17.0 requests >=2.28.1 scipy >=1.9.1 -statista >=0.1.7 -statista >=0.1.7 +serapeum_utils >=0.1.1 +statista >=0.1.8 statsmodels >=0.13.2 diff --git a/setup.py b/setup.py index 9e6e50f0..76e3dcd1 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setup( name="HAPI-Nile", - version="1.5.0", + version="1.6.0", description="Distributed hydrological-model", author="Mostafa Farrag", author_email="moah.farag@gmail.come", diff --git a/tests/Distributed_mode_run.py b/tests/Distributed_mode_run.py index 1a14b194..c1cc48b3 100644 --- a/tests/Distributed_mode_run.py +++ b/tests/Distributed_mode_run.py @@ -84,7 +84,7 @@ """ ============================================================================= AnimateArray(Arr, Time, NoElem, TicksSpacing = 2, Figsize=(8,8), PlotNumbers=True, - NumSize= 8, Title = 'Total Discharge',titlesize = 15, Backgroundcolorthreshold=None, + NumSize= 8, Title = 'Total Discharge',title_size = 15, Backgroundcolorthreshold=None, cbarlabel = 'Discharge m3/s', cbarlabelsize = 12, textcolors=("white","black"), Cbarlength = 0.75, Interval = 200,cmap='coolwarm_r', Textloc=[0.1,0.2], Gaugecolor='red',Gaugesize=100, ColorScale = 1,gamma=1./2.,linthresh=0.0001, @@ -109,7 +109,7 @@ size of the numbers plotted intop of each cells. The default is 8. Title : [str], optional title of the plot. The default is 'Total Discharge'. -titlesize : [integer], optional +title_size : [integer], optional title size. The default is 15. Backgroundcolorthreshold : [float/integer], optional threshold value if the value of the cell is greater, the plotted diff --git a/tests/FloodModel.py b/tests/FloodModel.py index d20daf7d..ee7a0e3f 100644 --- a/tests/FloodModel.py +++ b/tests/FloodModel.py @@ -111,7 +111,7 @@ """ ============================================================================= AnimateArray(Arr, Time, NoElem, TicksSpacing = 2, Figsize=(8,8), PlotNumbers=True, - NumSize= 8, Title = 'Total Discharge',titlesize = 15, Backgroundcolorthreshold=None, + NumSize= 8, Title = 'Total Discharge',title_size = 15, Backgroundcolorthreshold=None, cbarlabel = 'Discharge m3/s', cbarlabelsize = 12, textcolors=("white","black"), Cbarlength = 0.75, Interval = 200,cmap='coolwarm_r', Textloc=[0.1,0.2], Gaugecolor='red',Gaugesize=100, ColorScale = 1,gamma=1./2.,linthresh=0.0001, @@ -136,7 +136,7 @@ size of the numbers plotted intop of each cells. The default is 8. Title : [str], optional title of the plot. The default is 'Total Discharge'. -titlesize : [integer], optional +title_size : [integer], optional title size. The default is 15. Backgroundcolorthreshold : [float/integer], optional threshold value if the value of the cell is greater, the plotted diff --git a/tests/hm/calibration/test_hm_calibration.py b/tests/hm/calibration/test_hm_calibration.py index 1ebc3154..80aa7028 100644 --- a/tests/hm/calibration/test_hm_calibration.py +++ b/tests/hm/calibration/test_hm_calibration.py @@ -64,8 +64,8 @@ def test_ReadObservedWL( gauge_date_format=gauge_date_format, ) assert ( - len(Calib.WLGauges) == test_time_series_length - and len(Calib.WLGauges.columns) == 3 + len(Calib.wl_gauges) == test_time_series_length + and len(Calib.wl_gauges.columns) == 3 and len(Calib.hm_gauges.columns) == 12 ) @@ -89,7 +89,7 @@ def test_CalculateProfile( ) assert ( - Calib.crosssections.loc[Calib.crosssections["id"] == 3, "gl"].tolist()[-1] + Calib.cross_sections.loc[Calib.cross_sections["id"] == 3, "gl"].tolist()[-1] == calibrateProfile_DS_bedlevel ) @@ -114,6 +114,16 @@ def test_SmoothBedLevel( Calib.smoothBedLevel(segment3) +def test_SmoothDikeLevel( + version: int, + river_cross_section_path: str, + segment3: int, +): + Calib = RC.Calibration("HM", version=version) + Calib.readXS(river_cross_section_path) + Calib.smoothDikeLevel(segment3) + + def test_DownWardBedLevel( version: int, river_cross_section_path: str, @@ -204,10 +214,10 @@ def test_ReadHMWL( Calib = RC.Calibration("HM", version=3) Calib.readGaugesTable(gauges_table_path) Calib.readHMWL(hm_separated_wl_results_path, fmt="'%Y-%m-%d'") - assert len(Calib.WLHM) == test_time_series_length and len( - Calib.WLHM.columns + assert len(Calib.wl_hm) == test_time_series_length and len( + Calib.wl_hm.columns ) == len(rrmgauges) - assert all(elem in Calib.WLHM.columns.to_list() for elem in rrmgauges) + assert all(elem in Calib.wl_hm.columns.to_list() for elem in rrmgauges) class Test_GetAnnualMax: @@ -283,15 +293,15 @@ def test_HMvsRRM( Calib.readHMQ(hm_separated_q_results_path, fmt="'%Y-%m-%d'") Calib.readRRM(rrmpath, fmt="'%Y-%m-%d'") Calib.HMvsRRM() - assert isinstance(Calib.MetricsHMvsRRM, DataFrame) and isinstance( - Calib.MetricsHMvsRRM, GeoDataFrame + assert isinstance(Calib.metrics_hm_vs_rrm, DataFrame) and isinstance( + Calib.metrics_hm_vs_rrm, GeoDataFrame ) - assert len(Calib.MetricsHMvsRRM) == 3 + assert len(Calib.metrics_hm_vs_rrm) == 3 assert all( - Calib.MetricsHMvsRRM.index + Calib.metrics_hm_vs_rrm.index == Calib.hm_gauges.loc[:, Calib.gauge_id_col].to_list() ) - assert all(Calib.MetricsHMvsRRM.columns == Metrics_table_columns) + assert all(Calib.metrics_hm_vs_rrm.columns == Metrics_table_columns) def test_RRMvsObserved( @@ -314,15 +324,17 @@ def test_RRMvsObserved( ) Calib.readRRM(rrmpath, fmt="'%Y-%m-%d'") Calib.RRMvsObserved() - assert isinstance(Calib.MetricsRRMvsObs, DataFrame) and isinstance( - Calib.MetricsRRMvsObs, GeoDataFrame + assert isinstance(Calib.metrics_rrm_vs_obs, DataFrame) and isinstance( + Calib.metrics_rrm_vs_obs, GeoDataFrame ) - assert len(Calib.MetricsRRMvsObs) == 3 + assert len(Calib.metrics_rrm_vs_obs) == 3 assert all( - Calib.MetricsRRMvsObs.index + Calib.metrics_rrm_vs_obs.index == Calib.hm_gauges.loc[:, Calib.gauge_id_col].to_list() ) - assert all(elem in Calib.MetricsRRMvsObs.columns for elem in Metrics_table_columns) + assert all( + elem in Calib.metrics_rrm_vs_obs.columns for elem in Metrics_table_columns + ) def test_HMQvsObserved( @@ -345,15 +357,17 @@ def test_HMQvsObserved( ) Calib.readHMQ(hm_separated_q_results_path, fmt="'%Y-%m-%d'") Calib.HMQvsObserved() - assert isinstance(Calib.MetricsHMQvsObs, DataFrame) and isinstance( - Calib.MetricsHMQvsObs, GeoDataFrame + assert isinstance(Calib.metrics_hm_q_vs_obs, DataFrame) and isinstance( + Calib.metrics_hm_q_vs_obs, GeoDataFrame ) - assert len(Calib.MetricsHMQvsObs) == 3 + assert len(Calib.metrics_hm_q_vs_obs) == 3 assert all( - Calib.MetricsHMQvsObs.index + Calib.metrics_hm_q_vs_obs.index == Calib.hm_gauges.loc[:, Calib.gauge_id_col].to_list() ) - assert all(elem in Calib.MetricsHMQvsObs.columns for elem in Metrics_table_columns) + assert all( + elem in Calib.metrics_hm_q_vs_obs.columns for elem in Metrics_table_columns + ) def test_HMWLvsObserved( @@ -376,15 +390,17 @@ def test_HMWLvsObserved( ) Calib.readHMWL(hm_separated_wl_results_path, fmt="'%Y-%m-%d'") Calib.HMWLvsObserved() - assert isinstance(Calib.MetricsHMWLvsObs, DataFrame) and isinstance( - Calib.MetricsHMWLvsObs, GeoDataFrame + assert isinstance(Calib.metrics_hm_wl_vs_obs, DataFrame) and isinstance( + Calib.metrics_hm_wl_vs_obs, GeoDataFrame ) - assert len(Calib.MetricsHMWLvsObs) == 3 + assert len(Calib.metrics_hm_wl_vs_obs) == 3 assert all( - Calib.MetricsHMWLvsObs.index + Calib.metrics_hm_wl_vs_obs.index == Calib.hm_gauges.loc[:, Calib.gauge_id_col].to_list() ) - assert all(elem in Calib.MetricsHMWLvsObs.columns for elem in Metrics_table_columns) + assert all( + elem in Calib.metrics_hm_wl_vs_obs.columns for elem in Metrics_table_columns + ) def test_InspectGauge( @@ -468,4 +484,4 @@ def test_SaveMetices( Calib.readRRM(rrmpath, fmt="'%Y-%m-%d'") Calib.HMvsRRM() Calib.RRMvsObserved() - Calib.SaveMetices(hm_saveto) + Calib.saveMetices(hm_saveto) diff --git a/tests/hm/interface/test_hm_interface.py b/tests/hm/interface/test_hm_interface.py index d1b42d38..a0e8d5ed 100644 --- a/tests/hm/interface/test_hm_interface.py +++ b/tests/hm/interface/test_hm_interface.py @@ -14,7 +14,7 @@ def test_readLateralsTable( IF.readXS(river_cross_section_path) IF.readLateralsTable(interface_Laterals_table_path) - assert len(IF.LateralsTable) == 9 and len(IF.LateralsTable.columns) == 2 + assert len(IF.laterals_table) == 9 and len(IF.laterals_table.columns) == 2 class TestreadLaterals: @@ -68,7 +68,7 @@ def test_readBoundaryConditionsTable( IF = Interface("Rhine", start=dates[0]) IF.readBoundaryConditionsTable(interface_bc_path) - assert len(IF.BCTable) == 2 and len(IF.BCTable.columns) == 2 + assert len(IF.bc_table) == 2 and len(IF.bc_table.columns) == 2 def test_ReadBoundaryConditions( @@ -104,6 +104,6 @@ def test_ReadRRMProgression( date_format=interface_Laterals_date_format, laterals=False, ) - assert len(IF.routedRRM) == laterals_number_ts + assert len(IF.routed_rrm) == laterals_number_ts # number of laterals + the total column - assert len(IF.routedRRM.columns) == no_laterals + 1 + assert len(IF.routed_rrm.columns) == no_laterals + 1 diff --git a/tests/hm/river/test_hm_river.py b/tests/hm/river/test_hm_river.py index 2214c2ef..b7c15172 100644 --- a/tests/hm/river/test_hm_river.py +++ b/tests/hm/river/test_hm_river.py @@ -9,7 +9,7 @@ def test_create_river_instance(dates: list, rrm_start: str, version: int): - assert R.River("HM", version=version, start=dates[0], rrmstart=rrm_start) + assert R.River("HM", version=version, start=dates[0], rrm_start=rrm_start) def test_read_slope_method(version: int, slope_path: str): @@ -24,8 +24,8 @@ def test_read_crosssections_method( River = R.River("HM", version=version) River.readXS(river_cross_section_path) assert ( - len(River.crosssections) == xs_total_no - and len(River.crosssections.columns) == xs_col_no + len(River.cross_sections) == xs_total_no + and len(River.cross_sections.columns) == xs_col_no ) @@ -50,8 +50,8 @@ def test_create_sub_instance( River.readSlope(slope_path) Sub = R.Reach(segment1, River) assert ( - Sub.firstxs == create_sub_instance_firstxs - and Sub.lastxs == create_sub_instance_lastxs + Sub.first_xs == create_sub_instance_firstxs + and Sub.last_xs == create_sub_instance_lastxs ) assert Sub.slope @@ -97,7 +97,7 @@ def test_sub_GetFlow( and len(Sub.BC.columns) == 1 and len(Sub.Laterals.columns) == 4 ) - assert all(elem in Sub.LateralsTable for elem in sub_GetFlow_lateralTable) + assert all(elem in Sub.laterals_table for elem in sub_GetFlow_lateralTable) class TestRead1DResult: @@ -112,33 +112,33 @@ def test_read_complete_file( ): River = R.River("HM", version=version) - River.onedresultpath = Read1DResult_path - # River.results_paths = {"onedresultpath": Read1DResult_path} + River.one_d_result_path = Read1DResult_path + # River.results_paths = {"one_d_result_path": Read1DResult_path} River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.read1DResult() assert ( - len(Sub.Result1D) - == test_time_series_length * 24 * (len(Sub.crosssections) + 1) - and len(Sub.Result1D.columns) == 6 + len(Sub.results_1d) + == test_time_series_length * 24 * (len(Sub.cross_sections) + 1) + and len(Sub.results_1d.columns) == 6 ) assert ( - len(Sub.XSHydrographs) == test_time_series_length * 24 - and len(Sub.XSHydrographs.columns) == 2 + len(Sub.xs_hydrograph) == test_time_series_length * 24 + and len(Sub.xs_hydrograph.columns) == 2 ) assert ( - len(Sub.XSWaterLevel) == test_time_series_length * 24 - and len(Sub.XSWaterLevel.columns) == 2 + len(Sub.xs_water_level) == test_time_series_length * 24 + and len(Sub.xs_water_level.columns) == 2 ) assert ( - len(Sub.XSWaterDepth) == test_time_series_length * 24 - and len(Sub.XSWaterDepth.columns) == 2 + len(Sub.xs_water_depth) == test_time_series_length * 24 + and len(Sub.xs_water_depth.columns) == 2 ) Sub.read1DResult(xsid=Read1DResult_xsid) - assert Read1DResult_xsid in Sub.XSHydrographs.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterLevel.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterDepth.columns.tolist() + assert Read1DResult_xsid in Sub.xs_hydrograph.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_level.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_depth.columns.tolist() def test_Read_chunks( self, @@ -151,33 +151,33 @@ def test_Read_chunks( ): River = R.River("HM", version=version) - River.onedresultpath = Read1DResult_path - # River.results_paths = {"onedresultpath": Read1DResult_path} + River.one_d_result_path = Read1DResult_path + # River.results_paths = {"one_d_result_path": Read1DResult_path} River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.read1DResult(chunk_size=10000) assert ( - len(Sub.Result1D) - == test_time_series_length * 24 * (len(Sub.crosssections) + 1) - and len(Sub.Result1D.columns) == 6 + len(Sub.results_1d) + == test_time_series_length * 24 * (len(Sub.cross_sections) + 1) + and len(Sub.results_1d.columns) == 6 ) assert ( - len(Sub.XSHydrographs) == test_time_series_length * 24 - and len(Sub.XSHydrographs.columns) == 2 + len(Sub.xs_hydrograph) == test_time_series_length * 24 + and len(Sub.xs_hydrograph.columns) == 2 ) assert ( - len(Sub.XSWaterLevel) == test_time_series_length * 24 - and len(Sub.XSWaterLevel.columns) == 2 + len(Sub.xs_water_level) == test_time_series_length * 24 + and len(Sub.xs_water_level.columns) == 2 ) assert ( - len(Sub.XSWaterDepth) == test_time_series_length * 24 - and len(Sub.XSWaterDepth.columns) == 2 + len(Sub.xs_water_depth) == test_time_series_length * 24 + and len(Sub.xs_water_depth.columns) == 2 ) Sub.read1DResult(xsid=Read1DResult_xsid) - assert Read1DResult_xsid in Sub.XSHydrographs.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterLevel.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterDepth.columns.tolist() + assert Read1DResult_xsid in Sub.xs_hydrograph.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_level.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_depth.columns.tolist() def test_Read_zip( self, @@ -190,33 +190,33 @@ def test_Read_zip( ): River = R.River("HM", version=version) - River.onedresultpath = Read1DResult_path - # River.results_paths = {"onedresultpath": Read1DResult_path} + River.one_d_result_path = Read1DResult_path + # River.results_paths = {"one_d_result_path": Read1DResult_path} River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.read1DResult(extension=".zip") assert ( - len(Sub.Result1D) - == test_time_series_length * 24 * (len(Sub.crosssections) + 1) - and len(Sub.Result1D.columns) == 6 + len(Sub.results_1d) + == test_time_series_length * 24 * (len(Sub.cross_sections) + 1) + and len(Sub.results_1d.columns) == 6 ) assert ( - len(Sub.XSHydrographs) == test_time_series_length * 24 - and len(Sub.XSHydrographs.columns) == 2 + len(Sub.xs_hydrograph) == test_time_series_length * 24 + and len(Sub.xs_hydrograph.columns) == 2 ) assert ( - len(Sub.XSWaterLevel) == test_time_series_length * 24 - and len(Sub.XSWaterLevel.columns) == 2 + len(Sub.xs_water_level) == test_time_series_length * 24 + and len(Sub.xs_water_level.columns) == 2 ) assert ( - len(Sub.XSWaterDepth) == test_time_series_length * 24 - and len(Sub.XSWaterDepth.columns) == 2 + len(Sub.xs_water_depth) == test_time_series_length * 24 + and len(Sub.xs_water_depth.columns) == 2 ) Sub.read1DResult(xsid=Read1DResult_xsid) - assert Read1DResult_xsid in Sub.XSHydrographs.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterLevel.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterDepth.columns.tolist() + assert Read1DResult_xsid in Sub.xs_hydrograph.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_level.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_depth.columns.tolist() def test_Read_zip_chunks( self, @@ -229,33 +229,33 @@ def test_Read_zip_chunks( ): River = R.River("HM", version=version) - River.onedresultpath = Read1DResult_path - # River.results_paths = {"onedresultpath": Read1DResult_path} + River.one_d_result_path = Read1DResult_path + # River.results_paths = {"one_d_result_path": Read1DResult_path} River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.read1DResult(chunk_size=10000, extension=".zip") assert ( - len(Sub.Result1D) - == test_time_series_length * 24 * (len(Sub.crosssections) + 1) - and len(Sub.Result1D.columns) == 6 + len(Sub.results_1d) + == test_time_series_length * 24 * (len(Sub.cross_sections) + 1) + and len(Sub.results_1d.columns) == 6 ) assert ( - len(Sub.XSHydrographs) == test_time_series_length * 24 - and len(Sub.XSHydrographs.columns) == 2 + len(Sub.xs_hydrograph) == test_time_series_length * 24 + and len(Sub.xs_hydrograph.columns) == 2 ) assert ( - len(Sub.XSWaterLevel) == test_time_series_length * 24 - and len(Sub.XSWaterLevel.columns) == 2 + len(Sub.xs_water_level) == test_time_series_length * 24 + and len(Sub.xs_water_level.columns) == 2 ) assert ( - len(Sub.XSWaterDepth) == test_time_series_length * 24 - and len(Sub.XSWaterDepth.columns) == 2 + len(Sub.xs_water_depth) == test_time_series_length * 24 + and len(Sub.xs_water_depth.columns) == 2 ) Sub.read1DResult(xsid=Read1DResult_xsid) - assert Read1DResult_xsid in Sub.XSHydrographs.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterLevel.columns.tolist() - assert Read1DResult_xsid in Sub.XSWaterDepth.columns.tolist() + assert Read1DResult_xsid in Sub.xs_hydrograph.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_level.columns.tolist() + assert Read1DResult_xsid in Sub.xs_water_depth.columns.tolist() def test_Sub_GetLaterals( @@ -301,7 +301,7 @@ def test_Sub_GetLaterals( and len(Sub.BC.columns) == 1 and len(Sub.Laterals.columns) == len(sub_GetFlow_lateralTable) + 1 ) - assert all(elem in Sub.LateralsTable for elem in sub_GetFlow_lateralTable) + assert all(elem in Sub.laterals_table for elem in sub_GetFlow_lateralTable) def test_ReadRRMHydrograph_one_location( @@ -314,7 +314,7 @@ def test_ReadRRMHydrograph_one_location( test_time_series_length: int, ): River = R.River("HM", version=version) - River.rrmpath = ReadRRMHydrograph_path + River.rrm_path = ReadRRMHydrograph_path River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.readRRMHydrograph( @@ -338,7 +338,7 @@ def test_ReadRRMHydrograph_two_location( test_time_series_length: int, ): River = R.River("HM", version=version) - River.rrmpath = ReadRRMHydrograph_path + River.rrm_path = ReadRRMHydrograph_path River.readXS(river_cross_section_path) Sub = R.Reach(segment1, River) Sub.readRRMHydrograph( @@ -370,10 +370,10 @@ def test_ReadUSHydrograph( Sub = R.Reach(segment3, River) Sub.readUSHydrograph() assert ( - len(Sub.USHydrographs) == test_time_series_length - and len(Sub.USHydrographs.columns) == len(segment3_us_subs) + 1 + len(Sub.us_hydrographs) == test_time_series_length + and len(Sub.us_hydrographs.columns) == len(segment3_us_subs) + 1 ) - assert all(elem in Sub.USHydrographs.columns.tolist() for elem in segment3_us_subs) + assert all(elem in Sub.us_hydrographs.columns.tolist() for elem in segment3_us_subs) class TestGetTotalFlow: @@ -513,7 +513,7 @@ def test_PlotQ( River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) River.customized_runs_path = CustomizedRunspath - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path IF = Interface("Rhine", start=dates[0]) IF.readBoundaryConditionsTable(interface_bc_path) @@ -575,7 +575,7 @@ def test_CalculateQMetrics( stationname = gauges.loc[gaugei, "oid"] River = R.River("HM", version=version, start=dates[0]) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) @@ -583,10 +583,10 @@ def test_CalculateQMetrics( Sub.read1DResult() # without filter - Sub.calculateQMetrics(Calib, stationname, Sub.lastxs) + Sub.calculateQMetrics(Calib, stationname, Sub.last_xs) Sub.calculateQMetrics( - Calib, stationname, Sub.lastxs, Filter=True, start=dates[0], end=dates[1] + Calib, stationname, Sub.last_xs, Filter=True, start=dates[0], end=dates[1] ) @@ -599,7 +599,7 @@ def test_PlotHydrographProgression( segment3: int, ): River = R.River("HM", version=version, start=dates[0]) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) @@ -609,17 +609,15 @@ def test_PlotHydrographProgression( xss = [] start = dates[0] end = dates[1] - fromxs = "" - toxs = "" fig, ax = Sub.plotHydrographProgression( xss, start, end, - fromxs=fromxs, - toxs=toxs, - linewidth=2, + from_xs=None, + to_xs=None, + line_width=2, spacing=20, - figsize=(6, 4), + fig_size=(6, 4), xlabels=5, ) plt.close() @@ -658,7 +656,7 @@ def test_PlotWL( River = R.River("HM", version=version) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path Sub = R.Reach(segment3, River) Sub.read1DResult() @@ -697,7 +695,7 @@ def test_CalculateWLMetrics( stationname = gauges.loc[gaugei, "oid"] River = R.River("HM", version=version, start=dates[0]) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) @@ -705,10 +703,10 @@ def test_CalculateWLMetrics( Sub.read1DResult() # without filter - Sub.calculateWLMetrics(Calib, stationname, Sub.lastxs) + Sub.calculateWLMetrics(Calib, stationname, Sub.last_xs) Sub.calculateWLMetrics( - Calib, stationname, Sub.lastxs, Filter=True, start=dates[0], end=dates[1] + Calib, stationname, Sub.last_xs, Filter=True, start=dates[0], end=dates[1] ) @@ -724,12 +722,12 @@ def test_SaveHydrograph( River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) River.customized_runs_path = CustomizedRunspath - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path Sub = R.Reach(segment3, River) Sub.read1DResult() - Sub.saveHydrograph(Sub.lastxs) + Sub.saveHydrograph(Sub.last_xs) # option 2 - Sub.saveHydrograph(Sub.lastxs, Option=2) + Sub.saveHydrograph(Sub.last_xs, Option=2) def test_ReadBoundaryConditions( @@ -747,8 +745,8 @@ def test_ReadBoundaryConditions( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.onedresultpath = Read1DResult_path - River.usbcpath = usbc_path + River.one_d_result_path = Read1DResult_path + River.us_bc_path = usbc_path Sub = R.Reach(segment3, River) Sub.read1DResult() # read only 10 days @@ -783,23 +781,23 @@ def test_ReadSubDailyResults( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.usbcpath = usbc_path - River.oneminresultpath = subdailyresults_path + River.us_bc_path = usbc_path + River.one_min_result_path = subdailyresults_path Sub = R.Reach(segment3, River) Sub.readSubDailyResults( - onemin_results_dates[0], onemin_results_dates[1], Lastsegment=lastsegment + onemin_results_dates[0], onemin_results_dates[1], last_river_reach=lastsegment ) assert len(Sub.h) == onemin_results_len * subdaily_no_timesteps assert all(elem in Sub.h.columns.tolist() for elem in segment3_xs_ids_list) assert len(Sub.q) == onemin_results_len * subdaily_no_timesteps assert all(elem in Sub.q.columns.tolist() for elem in segment3_xs_ids_list) assert ( - len(Sub.QBCmin.columns) == subdaily_no_timesteps - and len(Sub.QBCmin) == onemin_results_len + len(Sub.q_bc_1min.columns) == subdaily_no_timesteps + and len(Sub.q_bc_1min) == onemin_results_len ) assert ( - len(Sub.HBCmin.columns) == subdaily_no_timesteps - and len(Sub.HBCmin) == onemin_results_len + len(Sub.h_bc_1min.columns) == subdaily_no_timesteps + and len(Sub.h_bc_1min) == onemin_results_len ) @@ -817,11 +815,11 @@ def test_PlotBC( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.usbcpath = usbc_path - River.oneminresultpath = subdailyresults_path + River.us_bc_path = usbc_path + River.one_min_result_path = subdailyresults_path Sub = R.Reach(segment3, River) Sub.readSubDailyResults( - onemin_results_dates[0], onemin_results_dates[1], Lastsegment=lastsegment + onemin_results_dates[0], onemin_results_dates[1], last_river_reach=lastsegment ) Sub.plotBC(dates[0]) @@ -861,7 +859,7 @@ def test_GetCapacity( River.statisticalProperties(distribution_properties_hm_results_fpath) River.getCapacity("Qbkf") River.getCapacity("Qc2", Option=2) - cols = River.crosssections.columns.tolist() + cols = River.cross_sections.columns.tolist() assert "Slope" in cols assert "Qbkf" in cols assert "QbkfRP" in cols diff --git a/tests/plot/conftest.py b/tests/plot/conftest.py index 1e8c1683..f78edc6e 100644 --- a/tests/plot/conftest.py +++ b/tests/plot/conftest.py @@ -1 +1,27 @@ -from tests.plot.map.conftest import * +import pytest + +# xs in segment 3 +fromxs = 230 +toxs = 235 +start = "1955-02-10" +end = "1955-02-11" + + +@pytest.fixture(scope="module") +def plot_xs_seg3_fromxs() -> int: + return fromxs + + +@pytest.fixture(scope="module") +def plot_xs_seg3_toxs() -> int: + return toxs + + +@pytest.fixture(scope="module") +def animate_start() -> str: + return start + + +@pytest.fixture(scope="module") +def animate_end() -> str: + return end diff --git a/tests/plot/map/__init__.py b/tests/plot/map/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/plot/map/conftest.py b/tests/plot/map/conftest.py deleted file mode 100644 index 96b22256..00000000 --- a/tests/plot/map/conftest.py +++ /dev/null @@ -1,97 +0,0 @@ -from typing import List - -import numpy as np -import pandas as pd -import pytest -from osgeo import gdal -from osgeo.gdal import Dataset - - -@pytest.fixture(scope="module") -def src() -> Dataset: - return gdal.Open("examples/GIS/data/acc4000.tif") - - -@pytest.fixture(scope="module") -def src_arr(src: Dataset) -> np.ndarray: - return src.ReadAsArray() - - -@pytest.fixture(scope="module") -def src_no_data_value(src: Dataset) -> float: - return src.GetRasterBand(1).GetNoDataValue() - - -@pytest.fixture(scope="module") -def cmap() -> str: - return "terrain" - - -@pytest.fixture(scope="module") -def ColorScale() -> List[int]: - return [1, 2, 3, 4, 5] - - -@pytest.fixture(scope="module") -def TicksSpacing() -> int: - return 500 - - -@pytest.fixture(scope="module") -def color_scale_2_gamma() -> float: - return 0.5 - - -@pytest.fixture(scope="module") -def color_scale_3_linscale() -> float: - return 0.001 - - -@pytest.fixture(scope="module") -def color_scale_3_linthresh() -> float: - return 0.0001 - - -@pytest.fixture(scope="module") -def midpoint() -> int: - return 20 - - -@pytest.fixture(scope="module") -def display_cellvalue() -> bool: - return True - - -@pytest.fixture(scope="module") -def NumSize() -> int: - return 8 - - -@pytest.fixture(scope="module") -def Backgroundcolorthreshold(): - return None - - -@pytest.fixture(scope="module") -def points() -> pd.DataFrame: - return pd.read_csv("examples/GIS/data/points.csv") - - -@pytest.fixture(scope="module") -def IDsize() -> int: - return 20 - - -@pytest.fixture(scope="module") -def IDcolor() -> str: - return "green" - - -@pytest.fixture(scope="module") -def Gaugesize() -> int: - return 100 - - -@pytest.fixture(scope="module") -def Gaugecolor() -> str: - return "blue" diff --git a/tests/visualiser/test_visualiser.py b/tests/plot/test_visualiser.py similarity index 81% rename from tests/visualiser/test_visualiser.py rename to tests/plot/test_visualiser.py index 2efd813c..bca9436c 100644 --- a/tests/visualiser/test_visualiser.py +++ b/tests/plot/test_visualiser.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import matplotlib.pyplot as plt from matplotlib.figure import Figure @@ -8,13 +6,6 @@ from Hapi.plot.visualizer import Visualize as V -def test_create_visualize_instance(): - Vis = V(resolution="Hourly") - assert isinstance(Vis.MarkerStyleList, list) - assert isinstance(Vis.FigureDefaultOptions, dict) - assert isinstance(Vis.linestyles, OrderedDict) - - def test_GroundSurface( version: int, river_cross_section_path: str, @@ -32,7 +23,7 @@ def test_GroundSurface( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path Sub = R.Reach(segment3, River) IF = Interface("Rhine", start=dates[0]) @@ -51,9 +42,13 @@ def test_GroundSurface( Sub.read1DResult() Vis = V(resolution="Hourly") - Vis.GroundSurface(Sub, floodplain=True, plotlateral=True, nxlabels=20, option=2) + Vis.plotGroundSurface( + Sub, floodplain=True, plot_lateral=True, xlabels_number=20, option=2 + ) plt.close() - Vis.GroundSurface(Sub, floodplain=True, plotlateral=True, nxlabels=20, option=1) + Vis.plotGroundSurface( + Sub, floodplain=True, plot_lateral=True, xlabels_number=20, option=1 + ) plt.close() @@ -76,18 +71,18 @@ def test_CrossSections( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.onedresultpath = Read1DResult_path + River.one_d_result_path = Read1DResult_path Sub = R.Reach(segment3, River) Vis = V(resolution="Hourly") - fig, ax = Vis.CrossSections( + fig, ax = Vis.plotCrossSections( Sub, bedlevel=True, - fromxs=plot_xs_seg3_fromxs, - toxs=plot_xs_seg3_toxs, - samescale=True, - textspacing=[(1, 1), (1, 4)], - plottingoption=3, + from_xs=plot_xs_seg3_fromxs, + to_xs=plot_xs_seg3_toxs, + same_scale=True, + text_spacing=[(1, 1), (1, 4)], + plotting_option=3, ) plt.close() assert isinstance(fig, Figure) @@ -115,8 +110,8 @@ def test_CrossSections( # River = R.River('HM', version=version, start=dates[0]) # River.ReadCrossSections(river_cross_section_path) # River.RiverNetwork(river_network_path) -# River.onedresultpath = Read1DResult_path -# River.usbcpath = usbc_path +# River.one_d_result_path = Read1DResult_path +# River.us_bc_path = usbc_path # Reach = R.Reach(segment3, River) # # IF = Interface('Rhine', start=dates[0]) @@ -132,8 +127,8 @@ def test_CrossSections( # Reach.ReadBoundaryConditions(start=dates[0], end=dates[1]) # # Vis = V(resolution="Hourly") -# Anim = Vis.WaterSurfaceProfile(Reach, animate_start, animate_end, fps=2, nxlabels=5, -# xaxislabelsize=10, textlocation=(-1, -2),repeat=False) +# Anim = Vis.WaterSurfaceProfile(Reach, animate_start, animate_end, fps=2, xlabels_number=5, +# x_axis_label_size=10, text_location=(-1, -2),repeat=False) # # rc('animation', html='jshtml') # plt.close() # assert isinstance(Anim, FuncAnimation) @@ -162,9 +157,9 @@ def test_CrossSections( # River = R.River('HM', version=version, start=dates[0]) # River.ReadCrossSections(river_cross_section_path) # River.RiverNetwork(river_network_path) -# River.onedresultpath = Read1DResult_path -# River.oneminresultpath = subdailyresults_path -# River.usbcpath = usbc_path +# River.one_d_result_path = Read1DResult_path +# River.one_min_result_path = subdailyresults_path +# River.us_bc_path = usbc_path # Reach = R.Reach(segment3, River) # # IF = Interface('Rhine', start=dates[0]) @@ -177,7 +172,7 @@ def test_CrossSections( # # Reach.GetFlow(IF) # Reach.Read1DResult() -# Reach.ReadSubDailyResults(animate_start, animate_end, Lastsegment=lastsegment) +# Reach.ReadSubDailyResults(animate_start, animate_end, last_river_reach=lastsegment) # Reach.ReadBoundaryConditions(start=animate_start, end=animate_end) # # Vis = V(resolution="Hourly") @@ -204,12 +199,12 @@ def test_Plot1minProfile( River = R.River("HM", version=version, start=dates[0]) River.readXS(river_cross_section_path) River.readRiverNetwork(river_network_path) - River.usbcpath = usbc_path - River.oneminresultpath = subdailyresults_path + River.us_bc_path = usbc_path + River.one_min_result_path = subdailyresults_path Sub = R.Reach(segment3, River) Sub.readSubDailyResults( - onemin_results_dates[0], onemin_results_dates[1], Lastsegment=lastsegment + onemin_results_dates[0], onemin_results_dates[1], last_river_reach=lastsegment ) Vis = V(resolution="Hourly") - Vis.plot1minProfile(Sub, dates[0], nxlabels=20) + Vis.plot1minProfile(Sub, dates[0], xlabels_number=20) diff --git a/tests/visualiser/__init__.py b/tests/visualiser/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/visualiser/conftest.py b/tests/visualiser/conftest.py deleted file mode 100644 index f78edc6e..00000000 --- a/tests/visualiser/conftest.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest - -# xs in segment 3 -fromxs = 230 -toxs = 235 -start = "1955-02-10" -end = "1955-02-11" - - -@pytest.fixture(scope="module") -def plot_xs_seg3_fromxs() -> int: - return fromxs - - -@pytest.fixture(scope="module") -def plot_xs_seg3_toxs() -> int: - return toxs - - -@pytest.fixture(scope="module") -def animate_start() -> str: - return start - - -@pytest.fixture(scope="module") -def animate_end() -> str: - return end