Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sampling using njoy #22

Merged
merged 23 commits into from
May 13, 2019
17 changes: 11 additions & 6 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,19 @@
language: python

dist: xenial

git:
depth: 3

addons:
apt:
packages:
- gfortran
- cmake
- git

env:
- NJOY=${TRAVIS_BUILD_DIR}/NJOY2016/build/njoy

python:
- "3.6"
Expand All @@ -15,25 +22,23 @@ branches:
except: gh-pages

before_install:
- git clone https://github.com/njoy/NJOY2016.git && (cd NJOY2016 && mkdir build && cd build && cmake .. && make) # install NJOY
- rm -rf NJOY2016/tests
- pip install sphinx sphinx_rtd_theme codecov numpydoc

install:
- python setup.py install

script:
- pytest
- (git clone https://gist.github.com/luca-fiorito-11/ad352e80b01f81fe12599c079f7cb9d7 && cd ad352e80b01f81fe12599c079f7cb9d7 && python sandy_get_eigenvalues.py ../sandy/data/U5/u235.endf)
# - (git clone https://gist.github.com/luca-fiorito-11/ad352e80b01f81fe12599c079f7cb9d7 && cd ad352e80b01f81fe12599c079f7cb9d7 && python sandy_get_eigenvalues.py ../sandy/data/U5/u235.endf)

after_success:
- codecov --file coverage/lcov.info --disable search

# Set up sphinx configuration file and produce SANDY documentation in html.
before_deploy:
- sphinx-quickstart --sep --dot _ --language en --suffix .rst --master index --makefile --batchfile --use-make-mode --author "Luca Fiorito" --project "SANDY API" -v 1 --release 0 --ext-autodoc --ext-doctest --ext-githubpages --ext-mathjax --extensions numpydoc api_docs
- sed -i '13i\ sandy' api_docs/source/index.rst
- sed -i "s/alabaster/sphinx_rtd_theme/" api_docs/source/conf.py
- sphinx-apidoc --separate --force --module-first -o api_docs/source sandy
- cd api_docs && make html >/dev/null 2>&1
- bash make_docs.sh html

# Deploy SANDY docs to gihub pages
deploy:
Expand Down
14 changes: 14 additions & 0 deletions make_docs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
set -e

# Set up docs folder
sphinx-quickstart --sep --dot _ --language en --suffix .rst --master index --makefile --batchfile --use-make-mode --author "Luca Fiorito" --project "SANDY API" -v 1 --release 0 --ext-autodoc --ext-doctest --ext-githubpages --ext-mathjax --extensions numpydoc api_docs

# Change configuration file
sed -i '13i\ sandy' api_docs/source/index.rst
sed -i "s/alabaster/sphinx_rtd_theme/" api_docs/source/conf.py

# Create rst files
sphinx-apidoc --separate --force --module-first -o api_docs/source sandy

# Run make
[[ -n $1 ]] && (cd api_docs && make $1) >/dev/null 2>&1
4 changes: 3 additions & 1 deletion pytest.ini
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[pytest]
python_files =
tests/*.py
addopts = -s -v --basetemp=sandy_tests -m "not slow and not plot and not njoy_exe"
addopts = -s -v --basetemp=sandy_tests -m "not slow and not plot"
filterwarnings =
ignore::UserWarning
ignore::DeprecationWarning
Expand All @@ -26,3 +26,5 @@ markers =
errorr:
sampling:
slow:
njoy:
njoy_exe:
4 changes: 3 additions & 1 deletion sandy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,6 @@ def filter(self, record):
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(ShutdownHandler(level=40))
#logging.getLogger().addFilter(DuplicateFilter())
#logging.getLogger().addFilter(DuplicateFilter())

__version__ = '0.9.0'
9 changes: 6 additions & 3 deletions sandy/formats/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .endf6 import *
from .errorr import *
# from .errorr import *
from .groupr import *
from .utils import *
from .records import *
Expand Down Expand Up @@ -33,7 +33,10 @@ def get_file_format(file):
elif flag == -1:
ftype = "gendf"
else:
ftype = "endf6"
if C.L1 == 2:
ftype = "pendf"
else:
ftype = "endf6"
return ftype


Expand All @@ -51,7 +54,7 @@ def read_formatted_file(file, listmat=None, listmf=None, listmt=None):
return Errorr.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)
elif ftype is "gendf":
return Gendf.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)
elif ftype is "endf6":
elif ftype is "endf6" or ftype is "pendf":
return Endf6.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)
else:
raise SandyError("file '{}' not in a known format".format(file))
60 changes: 56 additions & 4 deletions sandy/formats/endf6.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import numpy as np
import pandas as pd

from sandy.formats.records import read_cont
from sandy.formats import (mf1,
mf3,
mf4,
Expand Down Expand Up @@ -74,6 +75,11 @@ class _BaseFile(pd.DataFrame):

- TEXT : (`string`) MAT/MF/MT section reported as a single string

Attributes
----------
labels : `list` of `str`
index labels MAT, MT and MT

Methods
-------
add_sections
Expand All @@ -86,6 +92,13 @@ class _BaseFile(pd.DataFrame):
Create dataframe by reading a endf6 file
from_text
Create dataframe from endf6 text in string

Raises
------
`SandyError`
if the tape is empty
`SandyError`
if the same combination MAT/MF/MT is found more than once
"""

labels = ['MAT', 'MF', 'MT']
Expand Down Expand Up @@ -175,7 +188,7 @@ def delete_sections(self, *tuples):
----------
tuples : sequence of `tuple`
each tuple should have the format (MAT, MF, MT)
To delete, say a given MF, independentently from the MAT and MT, assign `None`
To delete, say, a given MF independentently from the MAT and MT, assign `None`
to the MAT and MT position in the tuple.

Returns
Expand Down Expand Up @@ -209,11 +222,11 @@ def filter_by(self, listmat=None, listmf=None, listmt=None):
Parameters
----------
listmat : `list` or `None`
list of requested MAT values
list of requested MAT values (default is `None`: use all MAT)
listmf : `list` or `None`
list of requested MF values
list of requested MF values (default is `None`: use all MF)
listmt : `list` or `None`
list of requested MT values
list of requested MT values (default is `None`: use all MT)

Returns
-------
Expand All @@ -240,6 +253,32 @@ def mf(self):
@property
def mt(self):
return sorted(self.index.get_level_values("MT").unique())

def get_file_format(self):
"""Determine ENDF-6 format type by reading flags "NLIB" and "LRP" of first MAT in file:

* `NLIB = -11 | NLIB = -12` : errorr
* `NLIB = -1` : gendf
* `LRP = 2` : pendf
* `LRP != 2` : endf6

Returns
-------
`str`
type of ENDF-6 format
"""
lines = self.TEXT.loc[self.mat[0], 1, 451].splitlines()
C, i = read_cont(lines, 0)
if C.N1 == -11 or C.N1 == -12:
ftype = "errorr"
elif C.N1 == -1:
ftype = "gendf"
else:
if C.L1 == 2:
ftype = "pendf"
else:
ftype = "endf6"
return ftype



Expand All @@ -260,6 +299,19 @@ class Endf6(_BaseFile):
-------
"""

def get_nsub(self):
"""Determine ENDF-6 sub-library type by reading flag "NSUB" of first MAT in file:

* `NSUB = 10` : Incident-Neutron Data
* `NSUB = 11` : Neutron-Induced Fission Product Yields

Returns
-------
`int`
NSUB value
"""
return self.read_section(self.mat[0], 1, 451)["NSUB"]

def read_section(self, mat, mf, mt):
"""Parse MAT/MF/MT section.
"""
Expand Down
6 changes: 3 additions & 3 deletions sandy/formats/mf1.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def read_errorr(text):
out = {"MAT" : MAT, "MF" : MF, "MT" : MT}
i = 0
C, i = read_cont(str_list, i)
out.update({"ZA" : C.C1, "AWR" : C.C2, "ERRFLAG" :C.N1})
out.update({"ZA" : C.C1, "AWR" : C.C2, "LRP" :C.N1})
L, i = read_list(str_list, i)
out.update({"EG" : L.B})
return out
Expand All @@ -44,7 +44,7 @@ def read_groupr(text):
out = {"MAT" : MAT, "MF" : MF, "MT" : MT}
i = 0
C, i = read_cont(str_list, i)
out.update({"ZA" : C.C1, "AWR" : C.C2, "NZ" : C.L2, "GROUPRFLAG" : C.N1, "NTW" : C.N2})
out.update({"ZA" : C.C1, "AWR" : C.C2, "NZ" : C.L2, "LRP" : C.N1, "NTW" : C.N2})
L, i = read_list(str_list, i)
out.update({"TEMPIN" : L.C1, "NGN" : L.L1, "NGG" : L.L2})
out["TITLE"] = L.B[:out["NTW"]]; del L.B[:out["NTW"]]
Expand Down Expand Up @@ -77,7 +77,7 @@ def read_info(text):
TEXT.append(T)
out.update({ "TEXT" : TEXT })
# This part is not given in PENDF files
if out["LRP"] != 2:
if out["LRP"] != 2 and len(TEXT) > 0:
# groups = TEXT[0][:11].split("-")
# out["Z"] = int(groups[0])
# out["SYM"] = groups[1].strip()
Expand Down
68 changes: 33 additions & 35 deletions sandy/formats/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,10 +619,6 @@ def perturb(self, pert, method=2, **kwargs):



###############################################################################
# Nuclear Data Covariance Objects (NDCO)
###############################################################################

class BaseCov(pd.DataFrame):
"""Base covariance class inheriting from `pandas.DataFrame`.
Must be used as superclass by all other Nuclear Data Covariance Objects.
Expand Down Expand Up @@ -667,7 +663,7 @@ def to_matrix(self):
covariance matrix as a `numpy` array
"""
return Cov(self.values)

def eig(self):
"""Extract eigenvalues in descending order.

Expand Down Expand Up @@ -781,8 +777,8 @@ def _from_list(cls, iterable):

Returns
-------
`XsCov`
global cross section/nubar covariance matrix
`XsCov` or `pandas.DataFrame`
global cross section/nubar covariance matrix (empty dataframe if no covariance matrix was found)
"""
columns = ["KEYS_ROWS", "KEYS_COLS", "COV"]
# Reindex the cross-reaction matrices
Expand All @@ -804,6 +800,9 @@ def _from_list(cls, iterable):
ey = covs[keys_cols,keys_cols].columns.values
covs[keys_rows,keys_cols] = cov.change_grid(ex, ey)
covs.dropna(inplace=True)
if covs.empty:
logging.warn("covariance matrix is empty")
return pd.DataFrame()
# Create index for global matrix
rows_levels = covs.index.levels[0]
indexlist = [(*keys,e) for keys in rows_levels for e in covs[(keys,keys)].index.values]
Expand All @@ -816,7 +815,6 @@ def _from_list(cls, iterable):
matrix[ix.start:ix.stop,ix1.start:ix1.stop] = cov
if keys_rows != keys_cols:
matrix[ix1.start:ix1.stop,ix.start:ix.stop] = cov.T
# pdb.set_trace()
return cls(matrix, index=index, columns=index)


Expand Down Expand Up @@ -867,11 +865,11 @@ def __init__(self, *args, **kwargs):
self.index.names = self.labels
self.columns.names = self.labels

def get_samples(self, nsmp, eig=0):
def get_samples(self, nsmp, eig=0, seed=None):
cov = self.to_matrix()
frame = pd.DataFrame(cov.sampling(nsmp) + 1, index=self.index, columns=range(1,nsmp+1))
frame = pd.DataFrame(cov.sampling(nsmp, seed=seed) + 1, index=self.index, columns=range(1,nsmp+1))
frame.columns.name = 'SMP'
if eig > 0:
if eig > 0 and nsmp > 1:
eigs = cov.eig()[0]
idxs = np.abs(eigs).argsort()[::-1]
dim = min(len(eigs), eig)
Expand Down Expand Up @@ -920,11 +918,11 @@ def _change_energy_grid(self, mat, mt, new_grid):

@classmethod
def from_endf6(cls, endf6):
"""Extract cross section/nubar covariance from ```Endf6``` instance.
"""Extract cross section/nubar covariance from `Endf6` instance.

Parameters
----------
endf6 : `Endf6`
endf6 : `sandy.formats.endf6.Endf6`
`Endf6` instance containing covariance sections

Returns
Expand Down Expand Up @@ -985,35 +983,35 @@ def from_errorr(cls, errorr):

Parameters
----------
errorr : `Errorr`
errorr : `sandy.formats.endf6.Errorr`
`Errorr` instance containing covariance sections

Returns
-------
`XsCov`
global xs/nubar covariance matrix from ERRORR file
"""
mat = errorr.mat[0]
eg = errorr.read_section(mat,1,451)["EG"]
List = []
for (mat,mf,mt),text in errorr.TEXT.iteritems():
if mf not in [31, 33]:
continue
X = errorr.read_section(mat,mf,mt)
for mt1,y in X["RP"].items():
List.append([mat, X["MT"], mat, mt1, y])
frame = pd.DataFrame(List, columns=('MAT', 'MT','MAT1', 'MT1', 'COV'))
mi = [(mat,mt,e) for mat,mt in sorted(set(zip(frame.MAT, frame.MT))) for e in eg]
index = pd.MultiIndex.from_tuples(mi, names=("MAT", "MT", "E"))
# initialize union matrix
matrix = np.zeros((len(index),len(index)))
for i,row in frame.iterrows():
ix = index.get_loc((row.MAT,row.MT))
ix1 = index.get_loc((row.MAT1,row.MT1))
matrix[ix.start:ix.stop-1,ix1.start:ix1.stop-1] = row.COV
i_lower = np.tril_indices(len(index), -1)
matrix[i_lower] = matrix.T[i_lower] # make the matrix symmetric
return XsCov(matrix, index=index, columns=index)
tape = errorr.filter_by(listmf=[31,33])
eg = errorr.energy_grid
data = []
# Loop MF/MT
logging.debug("found {} covariance sections".format(len(tape)))
for (mat,mf,mt), text in tape.TEXT.iteritems():
X = tape.read_section(mat, mf, mt)
# Loop subsections
logging.debug("reading section MAT={}/MF={}/MT={}".format(mat, mf, mt))
logging.debug("found {} subsections".format(len(X["RP"])))
for mt1,cov in X["RP"].items():
logging.debug("\treading subsection MAT1={}/MT1={}".format(mat, mt1))
# add zero row and column at the end of the matrix (this must be done for ERRORR covariance matrices)
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
cov = EnergyCov(cov, index=eg, columns=eg)
data.append([(mat, mt), (mat, mt1), cov])
if not data:
logging.warn("no xs covariance was found")
return pd.DataFrame()
return cls._from_list(data)



Expand Down
Loading