Skip to content

Commit

Permalink
Merge pull request #708 from nipy/rel/2.3.2
Browse files Browse the repository at this point in the history
REL: 2.3.2
  • Loading branch information
effigies authored Jan 2, 2019
2 parents c57662e + e551af8 commit e2b5a90
Show file tree
Hide file tree
Showing 41 changed files with 299 additions and 102 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ before_install:
- virtualenv --python=python venv
- source venv/bin/activate
- python --version # just to check
- pip install -U pip wheel # needed at one point
- pip install -U pip setuptools>=27.0 wheel
- retry pip install nose flake8 mock # always
- pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS
- if [ "${COVERAGE}" == "1" ]; then
Expand Down
39 changes: 39 additions & 0 deletions Changelog
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,45 @@ Gerhard (SG), Eric Larson (EL), Yaroslav Halchenko (YOH) and Chris Cheng (CC).

References like "pr/298" refer to github pull request numbers.

2.3.2 (Wedmesday 2 January 2019)
================================

Enhancements
------------
* Enable toggling crosshair with ``Ctrl-x`` in ``OrthoSlicer3D`` viewer (pr/701)
(Miguel Estevan Moreno, reviewed by CM)

Bug fixes
---------
* Read .PAR files corresponding to ADC maps (pr/685) (Gregory R. Lee, reviewed
by CM)
* Increase maximum number of items read from Siemens CSA format (Igor Solovey,
reviewed by CM, MB)
* Check boolean dtypes with ``numpy.issubdtype(..., np.bool_)`` (pr/707)
(Jon Haitz Legarreta Gorroño, reviewed by CM)

Maintenance
-----------
* Fix small typos in parrec2nii help text (pr/682) (Thomas Roos, reviewed by
MB)
* Remove deprecated calls to ``numpy.asscalar`` (pr/686) (CM, reviewed by
Gregory R. Lee)
* Update QA directives to accommodate Flake8 3.6 (pr/695) (CM)
* Update DOI links to use ``https://doi.org`` (pr/703) (Katrin Leinweber,
reviewed by CM)
* Remove deprecated calls to ``numpy.fromstring`` (pr/700) (Ariel Rokem,
reviewed by CM, MB)
* Drop ``distutils`` support, require ``bz2file`` for Python 2.7 (pr/700)
(CM, reviewed by MB)
* Replace mutable ``bytes`` hack, disabled in numpy pre-release, with
``bytearray``/``readinto`` strategy (pr/700) (Ariel Rokem, CM, reviewed by
CM, MB)

API changes and deprecations
----------------------------
* Add ``Opener.readinto`` method to read file contents into pre-allocated buffers
(pr/700) (Ariel Rokem, reviewed by CM, MB)

2.3.1 (Tuesday 16 October 2018)
===============================

Expand Down
1 change: 1 addition & 0 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ install:
- SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%

# Install the dependencies of the project.
- python -m pip install --upgrade pip setuptools wheel
- pip install numpy scipy matplotlib nose h5py mock pydicom
- pip install .
- SET NIBABEL_DATA_DIR=%CD%\nibabel-data
Expand Down
2 changes: 1 addition & 1 deletion doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@

# General information about the project.
project = u'NiBabel'
copyright = u'2006-2018, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel
copyright = u'2006-2019, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
Expand Down
2 changes: 1 addition & 1 deletion doc/source/devel/make_release.rst
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ Release checklist
text for adding a DOI badge in various formats. Copy the DOI Markdown text.
The markdown will look something like this::

[![DOI](https://zenodo.org/badge/doi/10.5281/zenodo.60847.svg)](http://dx.doi.org/10.5281/zenodo.60847)
[![DOI](https://zenodo.org/badge/doi/10.5281/zenodo.60847.svg)](https://doi.org/10.5281/zenodo.60847)

Go back to the Github release page for this release, click "Edit release".
and copy the DOI into the release notes. Click "Update release".
Expand Down
5 changes: 5 additions & 0 deletions doc/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,11 @@ contributed code and discussion (in rough order of appearance):
* Mathias Goncalves
* Jakub Kaczmarzyk
* Dimitri Papadopoulos Orfanos
* Miguel Estevan Moreno
* Thomas Roos
* Igor Solovey
* Jon Haitz Legarreta Gorroño
* Katrin Leinweber

License reprise
===============
Expand Down
2 changes: 1 addition & 1 deletion nibabel/affines.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import numpy as np

from six.moves import reduce
from .testing import setup_test # flake8: noqa F401
from .testing import setup_test # noqa


class AffineError(ValueError):
Expand Down
6 changes: 2 additions & 4 deletions nibabel/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,10 +1057,8 @@ def to_file_map(self, file_map=None):
# Store consumable values for later restore
offset = hdr.get_data_offset()
# Scalars of slope, offset to get immutable values
slope = (np.asscalar(hdr['scl_slope']) if hdr.has_data_slope
else np.nan)
inter = (np.asscalar(hdr['scl_inter']) if hdr.has_data_intercept
else np.nan)
slope = hdr['scl_slope'].item() if hdr.has_data_slope else np.nan
inter = hdr['scl_inter'].item() if hdr.has_data_intercept else np.nan
# Check whether to calculate slope / inter
scale_me = np.all(np.isnan((slope, inter)))
if scale_me:
Expand Down
4 changes: 2 additions & 2 deletions nibabel/casting.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from platform import processor, machine

import numpy as np
from .testing import setup_test # flake8: noqa F401
from .testing import setup_test # noqa


class CastingError(Exception):
Expand Down Expand Up @@ -268,7 +268,7 @@ def type_info(np_type):
# 80) but in calculations nexp in fact appears to be 11 as for float64
ret.update(dict(width=width))
return ret
if vals == (105, 11, 16): # correctly detected double double
if vals == (105, 11, 16): # correctly detected double double
ret.update(dict(nmant=nmant, nexp=nexp, width=width))
return ret
# Oh dear, we don't recognize the type information. Try some known types
Expand Down
4 changes: 2 additions & 2 deletions nibabel/cifti2/tests/test_cifti2.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_cifti2_metadata():
assert_equal(md.data, dict(metadata_test))

assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test))))

md.update({'a': 'aval', 'b': 'bval'})
assert_equal(md.data, dict(metadata_test))

Expand Down Expand Up @@ -310,7 +310,7 @@ def test_matrix():

assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none)
assert_equal(m.mapped_indices, [])

h = ci.Cifti2Header(matrix=m)
assert_equal(m.mapped_indices, [])
m.insert(0, mim_0)
Expand Down
4 changes: 2 additions & 2 deletions nibabel/cmdline/parrec2nii.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def get_opt_parser():
p.add_option(
Option("--minmax", action="store", nargs=2, dest="minmax",
help=one_line(
"""Mininum and maximum settings to be stored in the NIfTI
"""Minimum and maximum settings to be stored in the NIfTI
header. If any of them is set to 'parse', the scaled data is
scanned for the actual minimum and maximum. To bypass this
potentially slow and memory intensive step (the data has to
Expand All @@ -103,7 +103,7 @@ def get_opt_parser():
default=False,
help=one_line(
"""If set, all information from the PAR header is stored in
an extension ofthe NIfTI file header. Default: off""")))
an extension of the NIfTI file header. Default: off""")))
p.add_option(
Option("--scaling", action="store", dest="scaling", default='dv',
help=one_line(
Expand Down
4 changes: 2 additions & 2 deletions nibabel/ecat.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,8 +657,8 @@ def data_from_fileobj(self, frame=0, orientation=None):
subhdr = self.subheaders[frame]
raw_data = self.raw_data_from_fileobj(frame, orientation)
# Scale factors have to be set to scalars to force scalar upcasting
data = raw_data * np.asscalar(header['ecat_calibration_factor'])
data = data * np.asscalar(subhdr['scale_factor'])
data = raw_data * header['ecat_calibration_factor'].item()
data = data * subhdr['scale_factor'].item()
return data


Expand Down
16 changes: 8 additions & 8 deletions nibabel/externals/netcdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

import numpy as np # noqa
from ..py3k import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import frombuffer, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce

Expand Down Expand Up @@ -519,7 +519,7 @@ def _read(self):
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]

# Read file headers and set data.
self._read_numrecs()
Expand Down Expand Up @@ -608,7 +608,7 @@ def _read_var_array(self):
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes:
data = fromstring(b'\x00'*a_size, dtype=dtype_)
data = frombuffer(b'\x00'*a_size, dtype=dtype_)
elif self.use_mmap:
mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
data = ndarray.__new__(ndarray, shape, dtype=dtype_,
Expand All @@ -622,7 +622,7 @@ def _read_var_array(self):
buf = self.fp.read(a_size)
if len(buf) < a_size:
buf = b'\x00'*a_size
data = fromstring(buf, dtype=dtype_)
data = frombuffer(buf, dtype=dtype_)
data.shape = shape
self.fp.seek(pos)

Expand All @@ -644,7 +644,7 @@ def _read_var_array(self):
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)

Expand Down Expand Up @@ -687,7 +687,7 @@ def _read_values(self):
self.fp.read(-count % 4) # read padding

if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
values = frombuffer(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
Expand All @@ -705,14 +705,14 @@ def _pack_int(self, value):
_pack_int32 = _pack_int

def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int

def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())

def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
return frombuffer(self.fp.read(8), '>q')[0]

def _pack_string(self, s):
count = len(s)
Expand Down
4 changes: 2 additions & 2 deletions nibabel/gifti/parse_gifti_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
dec = base64.b64decode(data.encode('ascii'))
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(dec, dtype=dt)
newarr = np.frombuffer(dec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand All @@ -59,7 +59,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
zdec = zlib.decompress(dec)
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(zdec, dtype=dt)
newarr = np.frombuffer(zdec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand Down
7 changes: 4 additions & 3 deletions nibabel/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
_version_major = 2
_version_minor = 3
_version_micro = 2
_version_extra = 'dev'
# _version_extra = ''
# _version_extra = 'dev'
_version_extra = ''

# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
__version__ = "%s.%s.%s%s" % (_version_major,
Expand Down Expand Up @@ -209,4 +209,5 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__):
ISRELEASE = _version_extra == ''
VERSION = __version__
PROVIDES = ["nibabel", 'nisext']
REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION]
REQUIRES = ["numpy>=%s" % NUMPY_MIN_VERSION,
'bz2file; python_version < "3.0"']
2 changes: 1 addition & 1 deletion nibabel/nicom/csareader.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
'IS': int, # integer string
}

MAX_CSA_ITEMS = 199
MAX_CSA_ITEMS = 1000


class CSAError(Exception):
Expand Down
2 changes: 1 addition & 1 deletion nibabel/nicom/dwiparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
'''
import numpy as np
import numpy.linalg as npl
from ..testing import setup_test as setup_module # flake8: noqa F401
from ..testing import setup_test as setup_module # noqa


def B2q(B, tol=None):
Expand Down
Binary file added nibabel/nicom/tests/data/csa_str_1001n_items.bin
Binary file not shown.
Binary file removed nibabel/nicom/tests/data/csa_str_200n_items.bin
Binary file not shown.
10 changes: 5 additions & 5 deletions nibabel/nicom/tests/test_csareader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
CSA2_B1000 = open(pjoin(IO_DATA_PATH, 'csa2_b1000.bin'), 'rb').read()
CSA2_0len = gzip.open(pjoin(IO_DATA_PATH, 'csa2_zero_len.bin.gz'), 'rb').read()
CSA_STR_valid = open(pjoin(IO_DATA_PATH, 'csa_str_valid.bin'), 'rb').read()
CSA_STR_200n_items = open(pjoin(IO_DATA_PATH, 'csa_str_200n_items.bin'), 'rb').read()
CSA_STR_1001n_items = open(pjoin(IO_DATA_PATH, 'csa_str_1001n_items.bin'), 'rb').read()


@dicom_test
Expand Down Expand Up @@ -70,15 +70,15 @@ def test_csa_len0():

def test_csa_nitem():
# testing csa.read's ability to raise an error when n_items >= 200
assert_raises(csa.CSAReadError, csa.read, CSA_STR_200n_items)
# OK when < 200
assert_raises(csa.CSAReadError, csa.read, CSA_STR_1001n_items)
# OK when < 1000
csa_info = csa.read(CSA_STR_valid)
assert_equal(len(csa_info['tags']), 1)
# OK after changing module global
n_items_thresh = csa.MAX_CSA_ITEMS
try:
csa.MAX_CSA_ITEMS = 1000
csa_info = csa.read(CSA_STR_200n_items)
csa.MAX_CSA_ITEMS = 2000
csa_info = csa.read(CSA_STR_1001n_items)
assert_equal(len(csa_info['tags']), 1)
finally:
csa.MAX_CSA_ITEMS = n_items_thresh
Expand Down
13 changes: 6 additions & 7 deletions nibabel/nifti1.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from .spm99analyze import SpmAnalyzeHeader
from .casting import have_binary128
from .pydicom_compat import have_dicom, pydicom as pdcm
from .testing import setup_test # flake8: noqa F401
from .testing import setup_test # noqa

# nifti1 flat header definition for Analyze-like first 348 bytes
# first number in comments indicates offset in file header in bytes
Expand Down Expand Up @@ -579,7 +579,7 @@ def from_fileobj(klass, fileobj, size, byteswap):
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
ext_def = np.frombuffer(ext_def, dtype=np.int32)
if byteswap:
ext_def = ext_def.byteswap()
# be extra verbose
Expand Down Expand Up @@ -1330,7 +1330,7 @@ def get_intent(self, code_repr='label'):
raise TypeError('repr can be "label" or "code"')
n_params = len(recoder.parameters[code]) if known_intent else 0
params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params))
name = asstr(np.asscalar(hdr['intent_name']))
name = asstr(hdr['intent_name'].item())
return label, tuple(params), name

def set_intent(self, code, params=(), name='', allow_unknown=False):
Expand Down Expand Up @@ -1679,7 +1679,7 @@ def _chk_qfac(hdr, fix=False):
@staticmethod
def _chk_magic(hdr, fix=False):
rep = Report(HeaderDataError)
magic = np.asscalar(hdr['magic'])
magic = hdr['magic'].item()
if magic in (hdr.pair_magic, hdr.single_magic):
return hdr, rep
rep.problem_msg = ('magic string "%s" is not valid' %
Expand All @@ -1693,8 +1693,8 @@ def _chk_magic(hdr, fix=False):
def _chk_offset(hdr, fix=False):
rep = Report(HeaderDataError)
# for ease of later string formatting, use scalar of byte string
magic = np.asscalar(hdr['magic'])
offset = np.asscalar(hdr['vox_offset'])
magic = hdr['magic'].item()
offset = hdr['vox_offset'].item()
if offset == 0:
return hdr, rep
if magic == hdr.single_magic and offset < hdr.single_vox_offset:
Expand Down Expand Up @@ -1788,7 +1788,6 @@ def __init__(self, dataobj, affine, header=None,
has been created - see those methods, and the :ref:`manual
<default-sform-qform-codes>` for more details. '''


def update_header(self):
''' Harmonize header with image data and affine
Expand Down
Loading

0 comments on commit e2b5a90

Please sign in to comment.