Skip to content

Commit

Permalink
Fixes for new openmmtools 0.23.0 (#1203)
Browse files Browse the repository at this point in the history
* change default to none

* Changing default in template yaml file

* Test null/none atom selection

* null atom selection should be empty list

* Document how to control the log level in the CLI

* Update examples/new-cli/README.md

Co-authored-by: Iván Pulido <2949729+ijpulidos@users.noreply.github.com>

* Add solvent_model kwarg to RelativeFEPSetup

* Specify solvent model input YAML/CLI

* Test openmmtools_dev

* Update CI.yaml

* Update CI.yaml

* make sure we pull in openmmtools

* forgot to add new env file

* Online analysis multiple of checkpoint interval. Fix tests.

* Using dask import inside functions

* dask import inside functions

* Pinning dask version

* set CI to just pull from normal channels since we pushed update

* Drop 3.8 testing

* Testing with ambertools < 23

* Revert "Merge branch '1192-add-solvent-model-cli' into mikemhenry-patch-3"

This reverts commit cd0e440, reversing
changes made to 1a7f712.

* Revert "Merge remote-tracking branch 'origin/mikemhenry-patch-2' into mikemhenry-patch-3"

This reverts commit 1a7f712, reversing
changes made to 1505004.

* Revert "Merge branch 'feat/set_default_to_none_for_atom_selection' into mikemhenry-patch-3"

This reverts commit 1505004, reversing
changes made to 0474f9a.

---------

Co-authored-by: Iván Pulido <2949729+ijpulidos@users.noreply.github.com>
  • Loading branch information
mikemhenry and ijpulidos authored Jun 15, 2023
1 parent 53c61b8 commit 72eac20
Show file tree
Hide file tree
Showing 6 changed files with 20 additions and 9 deletions.
8 changes: 5 additions & 3 deletions .github/workflows/CI.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: [3.8, 3.9, "3.10"]
python-version: [3.9, "3.10"]
openmm: ["8.0", "7.7"]
include:
- openmm: "dev"
Expand All @@ -49,12 +49,13 @@ jobs:
with:
environment-file: devtools/conda-envs/test_env.yaml
environment-name: test
channels: jaimergp/label/unsupported-cudatoolkit-shim,conda-forge/label/openmm_dev/linux-64,conda-forge,openeye
channels: jaimergp/label/unsupported-cudatoolkit-shim,conda-forge/label/openmm_dev,conda-forge,openeye
channel-priority: flexible
cache-env: true
cache-downloads: true
extra-specs: |
python==${{ matrix.python-version }}
openmmtools==0.23.0
openmm==8.0.0dev3
- name: Setup micromamba
Expand All @@ -68,6 +69,7 @@ jobs:
extra-specs: |
python==${{ matrix.python-version }}
openmm==${{ matrix.openmm }}
openmmtools==0.23.0
- name: Install package
shell: bash -l {0}
Expand Down Expand Up @@ -103,4 +105,4 @@ jobs:
uses: codecov/codecov-action@v1
with:
file: ./coverage.xml
fail_ci_if_error: true
fail_ci_if_error: true
5 changes: 3 additions & 2 deletions devtools/conda-envs/test_env.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@ channels:
- conda-forge
- openeye
dependencies:
- ambertools <23
- arsenic
- autograd
- click
- cloudpathlib-s3 >=0.13.0
- coverage
- dask
- dask <2023.5
- dask-jobqueue
- dicttoxml
- distributed
Expand All @@ -27,7 +28,7 @@ dependencies:
- openff-units >=0.1.8
- openmm >=7.7
- openmmforcefields >=0.9.0
- openmmtools >=0.21.5 # may need to sort out ambermini/ambertools/parmed dependencies
- openmmtools # may need to sort out ambermini/ambertools/parmed dependencies
- openmoltools # may need to sort out ambermini/ambertools/parmed dependencies (we don't want ambertools)
- parmed # may need to sort out ambermini/ambertools/parmed dependencies
- pdbfixer
Expand Down
4 changes: 2 additions & 2 deletions perses/app/relative_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@
from openmmtools.constants import kB
import logging
import os
import dask.distributed as distributed
from collections import namedtuple
from collections import namedtuple
import random
from scipy.special import logsumexp
Expand Down Expand Up @@ -971,6 +969,7 @@ def activate_client(self,
LSF = True,
num_processes = 2,
adapt = False):
import dask.distributed as distributed

if LSF:
from dask_jobqueue import LSFCluster
Expand Down Expand Up @@ -1056,6 +1055,7 @@ def wait(self, futures):
"""
wrapper to wait until futures are complete.
"""
import dask.distributed as distributed
if self.client is None:
pass
else:
Expand Down
7 changes: 6 additions & 1 deletion perses/dispersed/parallel.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
import time
import dask.distributed as distributed


# Instantiate logger
_logger = logging.getLogger("parallelism")
Expand Down Expand Up @@ -35,6 +35,7 @@ def activate_client(self,
timeout : int
number of seconds to wait to fulfill the workers order
"""
import dask.distributed as distributed
self.library = library
if library is not None:
_logger.debug(f"library is not None")
Expand Down Expand Up @@ -231,6 +232,7 @@ def gather_actor_result(self, future):
future : <generalized> future
the future object to be collected from an actor
"""
import dask.distributed as distributed
if self.client is None:
return future
else:
Expand All @@ -254,6 +256,7 @@ class to put on a worker
-------
actor : dask.distributed.Actor pointer (future)
"""
import dask.distributed as distributed
if self.client is not None:
if self.library[0] == 'dask':
future = self.client.submit(_class, workers = [self.workers[self.worker_counter]], actor=True) # Create a _class on a worker
Expand All @@ -275,6 +278,7 @@ def progress(self, futures):
futures : list of <generalized> futures
futures that are to be gathered
"""
import dask.distributed as distributed
if self.client is None:
pass
else:
Expand All @@ -293,6 +297,7 @@ def wait(self, futures):
futures : list of <generalized> futures
futures that are to be gathered
"""
import dask.distributed as distributed
if self.client is None:
pass
else:
Expand Down
4 changes: 3 additions & 1 deletion perses/dispersed/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import time
from collections import namedtuple
from perses.annihilation.lambda_protocol import LambdaProtocol
import dask.distributed as distributed
from scipy.special import logsumexp
import openmmtools.cache as cache

Expand Down Expand Up @@ -1117,6 +1116,7 @@ def activate_LocallyOptimalAnnealing(thermodynamic_state,
"""
Function to set worker attributes for annealing.
"""
import dask.distributed as distributed
supported_integrators = ['langevin', 'hmc']

if remote_worker == 'remote':
Expand All @@ -1142,6 +1142,7 @@ def deactivate_worker_attributes(remote_worker):
"""
Function to remove worker attributes for annealing
"""
import dask.distributed as distributed
if remote_worker == 'remote':
_logger.debug(f"\t\tremote_worker is True, getting worker")
_class = distributed.get_worker()
Expand All @@ -1168,6 +1169,7 @@ def call_anneal_method(remote_worker,
since we can only map functions with parallelisms (no actors), we need to submit a function that calls
the LocallyOptimalAnnealing.anneal method.
"""
import dask.distributed as distributed
if remote_worker == 'remote':
_class = distributed.get_worker()
else:
Expand Down
1 change: 1 addition & 0 deletions perses/tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
n_steps_per_move_application: 1
fe_type: repex
checkpoint_interval: 50
offline-freq: 50
n_cycles: 1
n_states: 3
n_equilibration_iterations: 0
Expand Down

0 comments on commit 72eac20

Please sign in to comment.