Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop the support for PyTorch<2.0 #3272

Merged
merged 5 commits into from
Oct 4, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ To run a single test from the command line
```sh
pytest -vs {path_to_test}::{test_name}
# or in cuda mode
CUDA_TEST=1 PYRO_TENSOR_TYPE=torch.cuda.DoubleTensor pytest -vs {path_to_test}::{test_name}
CUDA_TEST=1 PYRO_DTYPE=float64 PYRO_DEVICE=cuda pytest -vs {path_to_test}::{test_name}
```

To ensure documentation builds correctly, run
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,11 @@ test-all: lint FORCE
| xargs pytest -vx --nbval-lax

test-cuda: lint FORCE
CUDA_TEST=1 PYRO_TENSOR_TYPE=torch.cuda.DoubleTensor pytest -vx --stage unit
CUDA_TEST=1 PYRO_DTYPE=float64 PYRO_DEVICE=cuda pytest -vx --stage unit
CUDA_TEST=1 pytest -vx tests/test_examples.py::test_cuda

test-cuda-lax: lint FORCE
CUDA_TEST=1 PYRO_TENSOR_TYPE=torch.cuda.DoubleTensor pytest -vx --stage unit --lax
CUDA_TEST=1 PYRO_DTYPE=float64 PYRO_DEVICE=cuda pytest -vx --stage unit --lax
CUDA_TEST=1 pytest -vx tests/test_examples.py::test_cuda

test-jit: FORCE
Expand Down
2 changes: 1 addition & 1 deletion examples/baseball.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,6 @@ def main(args):
torch.multiprocessing.set_sharing_strategy("file_system")

if args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_device("cuda")

main(args)
2 changes: 1 addition & 1 deletion examples/contrib/cevae/synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def generate_data(args):

def main(args):
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.set_default_device("cuda")

# Generate synthetic data.
pyro.set_rng_seed(args.seed)
Expand Down
9 changes: 3 additions & 6 deletions examples/contrib/epidemiology/regional.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,12 +205,9 @@ def main(args):
if args.warmup_steps is None:
args.warmup_steps = args.num_samples
if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_dtype(torch.float64)
if args.cuda:
torch.set_default_device("cuda")

main(args)

Expand Down
9 changes: 3 additions & 6 deletions examples/contrib/epidemiology/sir.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,12 +391,9 @@ def main(args):
if args.warmup_steps is None:
args.warmup_steps = args.num_samples
if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_dtype(torch.float64)
if args.cuda:
torch.set_default_device("cuda")

main(args)

Expand Down
2 changes: 1 addition & 1 deletion examples/contrib/funsor/hmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def model_7(sequences, lengths, args, batch_size=None, include_prior=True):

def main(args):
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.set_default_device("cuda")

logging.info("Loading data")
data = poly.load_data(poly.JSB_CHORALES)
Expand Down
5 changes: 2 additions & 3 deletions examples/contrib/mue/FactorMuE.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,9 +427,8 @@ def main(args):
)
args = parser.parse_args()

torch.set_default_dtype(torch.float64)
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
torch.set_default_device("cuda")

main(args)
5 changes: 2 additions & 3 deletions examples/contrib/mue/ProfileHMM.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,9 +316,8 @@ def main(args):
)
args = parser.parse_args()

torch.set_default_dtype(torch.float64)
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
torch.set_default_device("cuda")

main(args)
4 changes: 1 addition & 3 deletions examples/einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,7 @@ def time_fn(fn, equation, *operands, **kwargs):

def main(args):
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
else:
torch.set_default_tensor_type("torch.FloatTensor")
torch.set_default_device("cuda")

if args.method == "all":
for method in ["prob", "logprob", "gradient", "marginal", "map", "sample"]:
Expand Down
2 changes: 1 addition & 1 deletion examples/hmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,7 @@ def model_7(sequences, lengths, args, batch_size=None, include_prior=True):

def main(args):
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.set_default_device("cuda")

logging.info("Loading data")
data = poly.load_data(poly.JSB_CHORALES)
Expand Down
9 changes: 3 additions & 6 deletions examples/sir_hmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,12 +663,9 @@ def main(args):
args = parser.parse_args()

if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_tensor_type(torch.DoubleTensor)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_dtype(torch.float64)
if args.cuda:
torch.set_default_device("cuda")

main(args)

Expand Down
2 changes: 1 addition & 1 deletion examples/sparse_gamma_def.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from pyro.infer import SVI, TraceMeanField_ELBO
from pyro.infer.autoguide import AutoDiagonalNormal, init_to_feasible

torch.set_default_tensor_type("torch.FloatTensor")
torch.set_default_dtype(torch.float32)
pyro.util.set_rng_seed(0)


Expand Down
2 changes: 1 addition & 1 deletion examples/sparse_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"""


torch.set_default_tensor_type("torch.FloatTensor")
torch.set_default_dtype(torch.float32)


def dot(X, Z):
Expand Down
2 changes: 1 addition & 1 deletion examples/svi_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def main(args):
if args.cuda:
torch.cuda.set_device(hvd.local_rank())
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.set_default_device("cuda")
device = torch.tensor(0).device

if args.horovod:
Expand Down
3 changes: 2 additions & 1 deletion profiler/gaussianhmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ def random_mvn(batch_shape, dim, requires_grad=False):

def main(args):
if args.cuda:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.set_default_device("cuda")
torch.set_default_dtype(torch.float32)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the set_default_dtype needed here? I see it omitted in other changes.


hidden_dim = args.hidden_dim
obs_dim = args.obs_dim
Expand Down
2 changes: 1 addition & 1 deletion pyro/contrib/gp/parameterized.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ class Parameterized(PyroModule):
>>> assert "b_scale_unconstrained" in dict(linear.named_parameters())

Note that by default, data of a parameter is a float :class:`torch.Tensor`
(unless we use :func:`torch.set_default_tensor_type` to change default
(unless we use :func:`torch.set_default_dtype` to change default
tensor type). To cast these parameters to a correct data type or GPU device,
we can call methods such as :meth:`~torch.nn.Module.double` or
:meth:`~torch.nn.Module.cuda`. See :class:`torch.nn.Module` for more
Expand Down
6 changes: 4 additions & 2 deletions pyro/infer/mcmc/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,15 @@ def __init__(
self.rng_seed = (torch.initial_seed() + chain_id) % MAX_SEED
self.log_queue = log_queue
self.result_queue = result_queue
self.default_tensor_type = torch.Tensor().type()
self.default_dtype = torch.Tensor().dtype
self.default_device = torch.Tensor().device
self.hook = hook
self.event = event

def run(self, *args, **kwargs):
pyro.set_rng_seed(self.rng_seed)
torch.set_default_tensor_type(self.default_tensor_type)
torch.set_default_dtype(self.default_dtype)
torch.set_default_device(self.default_device)
kwargs = kwargs
logger = logging.getLogger("pyro.infer.mcmc")
logger_id = "CHAIN:{}".format(self.chain_id)
Expand Down
7 changes: 3 additions & 4 deletions tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,13 +101,12 @@ def tensors_default_to(host):
:param str host: Either "cuda" or "cpu".
"""
assert host in ("cpu", "cuda"), host
old_module, name = torch.Tensor().type().rsplit(".", 1)
new_module = "torch.cuda" if host == "cuda" else "torch"
torch.set_default_tensor_type("{}.{}".format(new_module, name))
old_host = torch.Tensor().device
torch.set_default_device(host)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Could we move the torch.set_default_device(host) into the try block? I realize this was wrong before your PR, but seems like a good time to fix it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need this context manager at all? How is it different from with torch.device(device)? https://pytorch.org/docs/stable/generated/torch.set_default_device.html

To only temporarily change the default device instead of setting it globally, use with torch.device(device): instead.

Copy link
Member

@fritzo fritzo Oct 4, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh it would be great to replace this custom context manager with torch.device, as long as torch.device can be used as a context manager in our earliest supported torch version 1.11.0

Copy link
Member Author

@ordabayevy ordabayevy Oct 4, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, well let's keep the polyfill until we drop support for torch==1.11.

Copy link
Member Author

@ordabayevy ordabayevy Oct 4, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To clarify: this also applies to torch.set_default_device throughout this PR and we if we want to keep torch 1.11 support then there will be a lot of if/else statements between torch.set_default_device and torch.set_default_tensor_type to set the device

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm.. it looks like set_default_device is not exposed or not available even in torch 1.13

https://pytorch.org/docs/1.13/search.html?q=set_default&check_keywords=yes&area=default

Copy link
Member

@fritzo fritzo Oct 4, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Pyro has always aimed at being more stable than torch, and we have historically implemented polyfills in Pyro to smooth over PyTorch's move-fast-and-break-things attitude. If I had time, I'd implement polyfills like a pyro.util.device() context manager, a pyro.util.set_default_device() helper. etc. But I don't have time, and maybe it's time to drop this aim as our maintenance resources dwindle. 🤷

The motivation for being very stable is to avoid giving people headaches. Every time we drop support for some version of some underlying library, some grad student wastes a day trying to install an old repo whose author has graduated and didn't pin versions. Every time we pin a library version, some software engineer at BigCo wastes a week solving dependency issues between conflicting libraries with non-overlapping version pins, spending a day committing to an upstream repo maintained by an overcommitted professor, building polyfills around another dependency (who doesn't explicitly pin versions but actually depends on a version outside our range, which it took haf a day to figure out), and replacing a third library that is no longer maintained.

If you do decide to drop torch 1.11 support, could you update the version pins everywhere and update the python supported versions? And we'll bump the minor version in our next release.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm.. it looks like set_default_device is not exposed or not available even in torch 1.13

https://pytorch.org/docs/1.13/search.html?q=set_default&check_keywords=yes&area=default

I confirmed this by installing it locally:

>>> import torch
>>> torch.__version__
'1.13.0+cu117'
>>> torch.set_default_device
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
AttributeError: module 'torch' has no attribute 'set_default_device'

Can you confirm that it's ok to drop the support for all torch 1.11, 1.12, and 1.13?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, let's just be sure to announce in our release notes and bump the minor version.

try:
yield
finally:
torch.set_default_tensor_type("{}.{}".format(old_module, name))
torch.set_default_device(old_host)


@contextlib.contextmanager
Expand Down
4 changes: 3 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@

import pyro

torch.set_default_tensor_type(os.environ.get("PYRO_TENSOR_TYPE", "torch.DoubleTensor"))
DTYPE = getattr(torch, os.environ.get("PYRO_DTYPE", "float64"))
torch.set_default_dtype(DTYPE)
torch.set_default_device(os.environ.get("PYRO_DEVICE", "cpu"))


def pytest_configure(config):
Expand Down
2 changes: 1 addition & 1 deletion tests/contrib/timeseries/test_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
)
@pytest.mark.parametrize("T", [11, 37])
def test_timeseries_models(model, nu_statedim, obs_dim, T):
torch.set_default_tensor_type("torch.DoubleTensor")
torch.set_default_dtype(torch.float64)
dt = 0.1 + torch.rand(1).item()

if model == "lcmgp":
Expand Down
2 changes: 1 addition & 1 deletion tests/contrib/timeseries/test_lgssm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
@pytest.mark.parametrize("obs_dim", [2, 4])
@pytest.mark.parametrize("T", [11, 17])
def test_generic_lgssm_forecast(model_class, state_dim, obs_dim, T):
torch.set_default_tensor_type("torch.DoubleTensor")
torch.set_default_dtype(torch.float64)

if model_class == "lgssm":
model = GenericLGSSM(
Expand Down
6 changes: 3 additions & 3 deletions tests/infer/autoguide/test_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -856,10 +856,10 @@ def test_profile(backend, jit, n=1, num_steps=1, log_every=1):
args = parser.parse_args()

torch.set_default_dtype(torch.double if args.double else torch.float)
if args.double:
torch.set_default_dtype(torch.float64)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

delete these two new lines (they are unnecessary given line 858 above)

if args.cuda:
torch.set_default_tensor_type(
torch.cuda.DoubleTensor if args.double else torch.cuda.FloatTensor
)
torch.set_default_device("cuda")

if args.profile:
p = cProfile.Profile()
Expand Down
2 changes: 1 addition & 1 deletion tutorial/source/logistic-growth.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"\n",
"if torch.cuda.is_available():\n",
" print(\"Using GPU\")\n",
" torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n",
" torch.set_default_device(\"cuda\")\n",
"else:\n",
" print(\"Using CPU\")\n",
"\n",
Expand Down