Skip to content

Commit

Permalink
fix zbound bug in MAP rec (niv_geomlib didnt agree with truncated obs…
Browse files Browse the repository at this point in the history
… maps), small refactors
  • Loading branch information
Sebastian-Belkner committed Mar 6, 2024
1 parent ace7c81 commit 3f57735
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 30 deletions.
22 changes: 12 additions & 10 deletions delensalot/config/transformer/lerepi2dlensalot.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,8 +368,9 @@ def _process_Itrec(dl, it):

dl.lenjob_geominfo = it.lenjob_geominfo
dl.lenjob_geomlib = get_geom(it.lenjob_geominfo)
thtbounds = (np.arccos(dl.zbounds[1]), np.arccos(dl.zbounds[0]))
dl.lenjob_geomlib.restrict(*thtbounds, northsouth_sym=False, update_ringstart=True)
# FIXME not sure if we should restrict here
# thtbounds = (np.arccos(dl.zbounds[1]), np.arccos(dl.zbounds[0]))
# dl.lenjob_geomlib.restrict(*thtbounds, northsouth_sym=False, update_ringstart=True)

# TODO this needs cleaner implementation
if dl.version == '' or dl.version == None:
Expand Down Expand Up @@ -558,6 +559,8 @@ def _process_Itrec(dl, it):

dl.lenjob_geominfo = it.lenjob_geominfo
dl.lenjob_geomlib = get_geom(it.lenjob_geominfo)
thtbounds = (np.arccos(dl.zbounds[1]), np.arccos(dl.zbounds[0]))
dl.lenjob_geomlib.restrict(*thtbounds, northsouth_sym=False, update_ringstart=True)

if dl.version == '' or dl.version == None:
dl.mf_dirname = opj(dl.TEMP, l2T_Transformer.ofj('mf', {'Nmf': dl.Nmf}))
Expand Down Expand Up @@ -796,14 +799,13 @@ def _process_Madel(dl, ma):
dl.masks[maskflavour][maskid] = dl.masks_fromfn[maskid]
dl.binmasks[maskflavour][maskid] = np.where(dl.masks[maskflavour][maskid]>0,1,0)

## Binning and power spectrum calculator specific preparation
if ma.Cl_fid == 'ffp10':
dl.cls_unl = camb_clfile(cf.analysis.cls_unl)
dl.cls_len = camb_clfile(cf.analysis.cls_len)
dl.clg_templ = dl.cls_len['ee']
dl.clc_templ = dl.cls_len['bb']
dl.clg_templ[0] = 1e-32
dl.clg_templ[1] = 1e-32
## Binning and power spectrum calculator specific preparation.
dl.cls_unl = camb_clfile(cf.analysis.cls_unl)
dl.cls_len = camb_clfile(cf.analysis.cls_len)
dl.clg_templ = dl.cls_len['ee']
dl.clc_templ = dl.cls_len['bb']
dl.clg_templ[0] = 1e-32
dl.clg_templ[1] = 1e-32

dl.binning = ma.binning
if dl.binning == 'binned':
Expand Down
39 changes: 26 additions & 13 deletions delensalot/core/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,9 @@ def postrun_sky(self):


class Noise_modeller(Basejob):

'''
CURRENTLY NOT USED
'''
def __init__(self, dlensalot_model):
super().__init__(dlensalot_model)

Expand Down Expand Up @@ -547,13 +549,15 @@ def __init__(self, dlensalot_model):
marge_monopole=True, marge_dipole=True, marge_maps=[])
transf_elm_loc = gauss_beam(self.beam / 180 / 60 * np.pi, lmax=self.lm_max_ivf[0])
if self.OBD == 'OBD':
nivjob_geomlib_ = get_geom(self.nivjob_geominfo)
# FIXME not sure which nivjob_geomlib to pass here, restricted or not?
# nivjob_geomlib_ = get_geom(self.nivjob_geominfo)
self.cinv_p = cinv_p_OBD.cinv_p(opj(self.libdir_QE, 'cinv_p'),
self.lm_max_ivf[0], self.nivjob_geominfo[1]['nside'], self.cls_len,
transf_elm_loc[:self.lm_max_ivf[0]+1], self.nivp_desc, geom=nivjob_geomlib_, #self.nivjob_geomlib,
transf_elm_loc[:self.lm_max_ivf[0]+1], self.nivp_desc, geom=self.nivjob_geomlib, #self.nivjob_geomlib,
chain_descr=self.chain_descr(self.lm_max_ivf[0], self.cg_tol), bmarg_lmax=self.lmin_teb[2],
zbounds=(-1,1), _bmarg_lib_dir=self.obd_libdir, _bmarg_rescal=self.obd_rescale,
zbounds=self.zbounds, _bmarg_lib_dir=self.obd_libdir, _bmarg_rescal=self.obd_rescale,
sht_threads=self.tr)
# (-1,1)
else:
self.cinv_p = filt_cinv.cinv_p(opj(self.TEMP, 'cinv_p'),
self.lm_max_ivf[0], self.nivjob_geominfo[1]['nside'], self.cls_len,
Expand Down Expand Up @@ -749,19 +753,21 @@ def run(self, task=None):

if task == 'calc_phi':
for idx in self.jobs[taski][mpi.rank::mpi.size]:
log.info("get_sim_qlm..")
self.qlms_dd.get_sim_qlm(self.k, int(idx))
log.info("get_sim_qlm done.")
if np.all(self.simulationdata.obs_lib.maps == DEFAULT_NotAValue):
self.simulationdata.purgecache()
mpi.barrier()

for idx in self.jobs[taski][mpi.rank::mpi.size]:
## If meanfield subtraction is done, only one task must calculate the meanfield first before get_plm() is called, otherwise read-errors because all tasks try calculating/accessing it at once.
## If meanfield subtraction is requested, only one task must calculate the meanfield first before get_plm() is called, otherwise read-errors because all tasks try calculating/accessing it at once.
## The way I fix this (the next two lines) is a bit unclean.
if self.QE_subtract_meanfield:
self.qlms_dd.get_sim_qlm_mf(self.k, [int(simidx_mf) for simidx_mf in self.simidxs_mf])
self.get_plm(idx, self.QE_subtract_meanfield)
if np.all(self.simulationdata.obs_lib.maps == DEFAULT_NotAValue):
self.simulationdata.purgecache()

self.simulationdata.purgecache()

if task == 'calc_meanfield':
if len(self.jobs[taski])>0:
Expand Down Expand Up @@ -824,6 +830,7 @@ def get_mchain(self, simidx, key, it=0):
def get_meanfield(self, simidx):
# Either return MC MF, filter.qlms_mf, or mfvar
ret = np.zeros_like(self.qlms_dd.get_sim_qlm(self.k, 0))
fn_mf = opj(self.libdir_QE, 'mf_allsims.npy')
if np.dtype(self.mfvar) == str:
if self.mfvar == 'qlms_mf':
# calculate MF estimate using Lewis&Carron trick
Expand All @@ -833,13 +840,15 @@ def get_meanfield(self, simidx):
if self.mfvar == None:
# MC MF, and exclude the current simidx
ret = self.qlms_dd.get_sim_qlm_mf(self.k, [int(simidx_mf) for simidx_mf in self.simidxs_mf])
np.save(fn_mf, ret) # plancklens already stores that in qlms_dd/ but I want to have this more conveniently without the naming gibberish
if simidx in self.simidxs_mf:
ret = (ret - self.qlms_dd.get_sim_qlm(self.k, int(simidx)) / self.Nmf) * (self.Nmf / (self.Nmf - 1))
else:
# mfvar, and exclude the current simidx from the mfvar simset (qlms_dd_mfvar)
ret = hp.read_alm(self.mfvar)
if simidx in self.simidxs_mf:
ret = (ret - self.qlms_dd_mfvar.get_sim_qlm(self.k, int(simidx)) / self.Nmf) * (self.Nmf / (self.Nmf - 1))

return ret

return ret
Expand Down Expand Up @@ -993,11 +1002,10 @@ def __init__(self, dlensalot_model):
self.simulationdata = self.simgen.simulationdata
self.qe = QE_lr(dlensalot_model, caller=self)
self.qe.simulationdata = self.simgen.simulationdata # just to be sure, so we have a single truth in MAP_lr.


if self.OBD == 'OBD':
nivjob_geomlib_ = get_geom(self.nivjob_geominfo)
self.tpl = template_dense(self.lmin_teb[2], nivjob_geomlib_, self.tr, _lib_dir=self.obd_libdir, rescal=self.obd_rescale)
# FIXME not sure why this was here.. that caused mismatch for calc_gradlik niv_job geom and qumaps when truncated
# nivjob_geomlib_ = get_geom(self.nivjob_geominfo)
self.tpl = template_dense(self.lmin_teb[2], self.nivjob_geomlib, self.tr, _lib_dir=self.obd_libdir, rescal=self.obd_rescale)
else:
self.tpl = None

Expand All @@ -1017,6 +1025,7 @@ def __init__(self, dlensalot_model):
elif self.it_filter_directional == 'isotropic':
self.sims_MAP = self.simulationdata
self.filter = self.get_filter()
log.info('------ init done ----')

# # @base_exception_handler
@log_on_start(logging.DEBUG, "MAP.map.collect_jobs() started")
Expand Down Expand Up @@ -1250,11 +1259,16 @@ def _prepare_job(self):
# @log_on_start(logging.DEBUG, "get_basemap() started")
# @log_on_end(logging.DEBUG, "get_basemap() finished")
def get_basemap(self, simidx):
'''
Return a B-map to be delensed. Can be the map handled in the sims_lib library (basemap='lens'), 'lens_ffp10' (this potentially is the same as sims_lib currently uses ffp10 as baseline),
or the observed map itself, in which case the residual foregrounds and noise will still be in there.
'''
# TODO depends if data comes from delensalot simulations or from external.. needs cleaner implementation
if self.basemap == 'lens':
return almxfl(alm_copy(self.simulationdata.get_sim_sky(simidx, space='alm', spin=0, field='polarization')[1], self.simulationdata.lmax, *self.lm_max_blt), self.ttebl['e'], self.lm_max_blt[0], inplace=False)
elif self.basemap == 'lens_ffp10':
return almxfl(alm_copy(planck2018_sims.cmb_len_ffp10.get_sim_blm(simidx), None, lmaxout=self.lm_max_blt[0], mmaxout=self.lm_max_blt[1]), gauss_beam(2.3 / 180 / 60 * np.pi, lmax=self.lm_max_blt[1]))
return almxfl(alm_copy(planck2018_sims.cmb_len_ffp10.get_sim_blm(simidx), None, lmaxout=self.lm_max_blt[0], mmaxout=self.lm_max_blt[1]), gauss_beam(self.beam / 180 / 60 * np.pi, lmax=self.lm_max_blt[1]))
else:
# only checking for map to save some memory..
if np.all(self.simulationdata.maps == DEFAULT_NotAValue):
Expand All @@ -1263,7 +1277,6 @@ def get_basemap(self, simidx):
return hp.map2alm_spin(self.simulationdata.get_sim_obs(simidx, space='map', spin=2, field='polarization'), spin=2, lmax=self.lm_max_blt[0], mmax=self.lm_max_blt[1])[1]



@log_on_start(logging.DEBUG, "_delens() started")
@log_on_end(logging.DEBUG, "_delens() finished")
def delens(self, simidx, outputdata):
Expand Down
1 change: 0 additions & 1 deletion delensalot/core/iterator/cs_iterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,6 @@ def calc_gradlik(self, itr, key, iwantit=False):
soltn, it_soltn = self.load_soltn(itr, key)
if it_soltn < itr - 1:
soltn *= self.soltn_cond

mchain.solve(soltn, self.dat_maps, dot_op=self.filter.dot_op())
fn_wf = 'wflm_%s_it%s' % (key.lower(), itr - 1)
log.info("caching " + fn_wf)
Expand Down
31 changes: 25 additions & 6 deletions delensalot/core/opfilt/bmodes_ninv.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,12 +305,31 @@ def __init__(self, geom, n_inv, b_transf, lmax_marg=0, zbounds=(-1., 1.), blm_ra
self.templates.append(template_bfilt(lmax_marg=lmax_marg, geom=geom, sht_threads=sht_threads, _lib_dir=_bmarg_lib_dir))
if len(self.templates) > 0:
if _bmarg_lib_dir is not None and os.path.exists( os.path.join(_bmarg_lib_dir, 'tniti.npy')):
if mpi.rank == 0:
log.info("Loading " + os.path.join(_bmarg_lib_dir, 'tniti.npy'))
self.tniti = np.load(os.path.join(_bmarg_lib_dir, 'tniti.npy'))
else:
pass
self.tniti = mpi.bcast(self.tniti, root=0)
nmodes_ = int((lmax_marg + 1) * lmax_marg + lmax_marg + 1 - 4)
log.info("Loading " + os.path.join(_bmarg_lib_dir, 'tniti.npy'))
self.tniti = np.load(os.path.join(_bmarg_lib_dir, 'tniti.npy'))
log.info("done")
# from mpi4py import MPI
# # np.frombuffer(buf, dtype=np.double, count=data_size)
# if mpi.rank == 0:
# win = MPI.Win.Allocate_shared(nmodes_*nmodes_, MPI.DOUBLE, comm=MPI.COMM_WORLD)
# self.tniti, _ = win.Shared_query(0)
# log.info("Loading " + os.path.join(_bmarg_lib_dir, 'tniti.npy'))
# self.tniti[:] = np.load(os.path.join(_bmarg_lib_dir, 'tniti.npy'))
# print("tniti shape: {}".format(self.tniti.shape))
# log.info('sharing tniti across slurm workers..')
# win.Fence()
# else:
# self.tniti = np.zeros(shape=(nmodes_,nmodes_))
# win = MPI.Win.Allocate_shared(0, MPI.DOUBLE, comm=MPI.COMM_WORLD)
# size = win.Shared_query(0)[0]
# buf, _ = win.Shared_query(0)
# win.Fence()
# self.tniti[:] = buf[:]

# for rowi, row in enumerate(self.tniti):
# self.tniti[rowi] = mpi.bcast(self.tniti[rowi], root=0)
# log.info('sharing tniti across slurm workers.. DONE.')
if _bmarg_rescal != 1.:
log.info("**** RESCALING tiniti with %.4f"%_bmarg_rescal)
self.tniti *= _bmarg_rescal
Expand Down

0 comments on commit 3f57735

Please sign in to comment.