Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
CharlotteRosenstroem authored Jun 11, 2019
1 parent 8973544 commit 82265f4
Show file tree
Hide file tree
Showing 8 changed files with 1,306 additions and 0 deletions.
106 changes: 106 additions & 0 deletions Likelihood/Full_likelihood.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
from Prob_dist_astro import *
from Prob_dist_atm import *



Initialize_All_Cross_Sections(prefix_dsdy_nu_nucleon='dsdy_ct14nn',
prefix_dsdy_nu_electron='dsdy_electron',
prefix_cs_nu_electron='cs_electron', kx=1, ky=1, k=1, s=0,
verbose=0)


def Partial_likelihood_showers(N_a, N_conv, N_pr, N_mu, g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes,
costhz_val, costhz_npts, energy_dep, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose):

pdastro_sh = Prob_dist_astro(g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,
log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose, flag_compute_shower_rate = True, flag_compute_track_rate = False)

pdatm_conv_sh = Prob_dist_atm_conv_pr(nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,log10_energy_dep_int_min, log10_energy_dep_int_max,
log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose,
flag_use_atm_fluxes_conv = True, flag_use_atm_fluxes_pr = False, flag_apply_self_veto = True, flag_compute_shower_rate = True, flag_compute_track_rate = False)


pdatm_pr_sh = Prob_dist_atm_conv_pr(nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,log10_energy_dep_int_min, log10_energy_dep_int_max,
log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose,
flag_use_atm_fluxes_conv = False, flag_use_atm_fluxes_pr = True, flag_apply_self_veto = True, flag_compute_shower_rate = True, flag_compute_track_rate = False)


pdatm_muon_sh = 0 #Probabillity distribution of atmospheric muons (showers)

likelihood = N_a * pdastro_sh + N_conv * pdatm_conv_sh + N_pr * pdatm_pr_sh + N_mu * pdatm_muon_sh

return likelihood



def Partial_likelihood_tracks(N_a, N_conv, N_pr, N_mu, g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes,
costhz_val, costhz_npts, energy_dep, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose):

pdastro_tr = Prob_dist_astro(g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,
log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose, flag_compute_shower_rate = False, flag_compute_track_rate = True)

pdatm_conv_tr = Prob_dist_atm_conv_pr(nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,log10_energy_dep_int_min, log10_energy_dep_int_max,
log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose,
flag_use_atm_fluxes_conv = True, flag_use_atm_fluxes_pr = False, flag_apply_self_veto = True, flag_compute_shower_rate = False, flag_compute_track_rate = True)


pdatm_pr_tr = Prob_dist_atm_conv_pr(nu_energy_min, nu_energy_max, nu_energy_num_nodes, costhz_val, costhz_npts, energy_dep,log10_energy_dep_int_min, log10_energy_dep_int_max,
log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose,
flag_use_atm_fluxes_conv = False, flag_use_atm_fluxes_pr = True, flag_apply_self_veto = True, flag_compute_shower_rate = False, flag_compute_track_rate = True)


pdatm_muon_tr = Prob_dist_atm_muon(energy_dep, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, epsabs, epsrel, verbose)

likelihood = N_a * pdastro_tr + N_conv * pdatm_conv_tr + N_pr * pdatm_pr_tr + N_mu * pdatm_muon_tr

return likelihood


def Full_likelihood(N_a, N_conv, N_pr, N_mu, g, M, gamma, nu_energy_min, nu_energy_max, z_min, z_max, E_min, E_max, E_npts, nu_energy_num_nodes,
costhz_npts, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose):


ID_sh, lst_energy_sh, uncertainty_minus_sh, uncertainty_plus_sh, time_sh, declination_sh, RA_sh, Med_sh = Read_Data_File(os.getcwd()+'/'+'data_shower.txt')

ID_tr, lst_energy_tr, uncertainty_minus_tr, uncertainty_plus_tr, time_tr, declination_tr, RA_tr, Med_tr = Read_Data_File(os.getcwd()+'/'+'data_track.txt')

FL_sh = 1
for i in range(3): #len(lst_energy_sh)):
costhz_val = np.cos((declination_sh[i] + 90)*np.pi/180)
energy_dep = lst_energy_sh[i]*1000

FL_sh = FL_sh * Partial_likelihood_showers(N_a, N_conv, N_pr, N_mu, g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes,
costhz_val, costhz_npts, energy_dep, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose)


FL_tr = 1

for i in range(3): #len(lst_energy_tr)):
costhz_val = np.cos((declination_tr[i] + 90)*np.pi/180)
energy_dep = lst_energy_tr[i]*1000

FL_tr = FL_tr * Partial_likelihood_tracks(N_a, N_conv, N_pr, N_mu, g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes,
costhz_val, costhz_npts, energy_dep, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts,
time_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose)

FL = np.exp(- N_a - N_conv - N_pr - N_mu) * FL_sh * FL_tr

return FL

"""
log10_nu_energy_min = 2.8
log10_nu_energy_max = 9.2
test = Full_likelihood(20, 20, 20, 20, 0.03, 0.01, 2, nu_energy_min = 10**log10_nu_energy_min, nu_energy_max = 10**log10_nu_energy_max,
z_min = 0, z_max = 4, E_min = 3, E_max = 8, E_npts = 10, nu_energy_num_nodes = 150,
costhz_npts = 2, log10_energy_dep_int_min = 4, log10_energy_dep_int_max = 7, log10_energy_dep_min = 3.8, log10_energy_dep_max = 7.2, log10_energy_dep_npts = 50,
time_det_yr = 8, volume_total = 6.44e14, energy_nu_max = 1e8, epsabs =1e-3, epsrel = 1e-3, verbose=1)
print(test)
"""
23 changes: 23 additions & 0 deletions Likelihood/Likelihood_analysis.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/bin/bash

#SBATCH --job-name=likelihood # shows up in the output of 'squeue'
#SBATCH --time=4-23:59:59 # specify the requested wall-time
#SBATCH --partition=astro_long # specify the partition to run on
#SBATCH --nodes=4 # number of nodes allocated for this job
#SBATCH --ntasks-per-node=20 # number of MPI ranks per node
#SBATCH --cpus-per-task=1 # number of OpenMP threads per MPI rank
#SBATCH --mail-type=ALL,TIME_LIMIT_90,TIME_LIMIT,ARRAY_TASKS
#SBATCH --mail-user=vkc652@alumni.ku.dk
#SBATCH -o %A_%a.out # Standard output
#SBATCH -e %A_%a.err # Standard error
##SBATCH --exclude=<node list> # avoid nodes (e.g. --exclude=node786)


# Move to directory job was submitted from
cd $SLURM_SUBMIT_DIR

# Command to run
mpiexec -n 4 python Likelihood_analysis_parser.py --E_npts=2 --nu_energy_num_nodes=10 --costhz_npts=2 --log10_energy_dep_npts=5 --epsabs=1.e-1 --epsrel=1.e-1 --n_live_points=5 --evidence_tolerance=0.5



146 changes: 146 additions & 0 deletions Likelihood/Likelihood_analysis_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import json
import numpy
from numpy import *
import scipy.stats, scipy
import pymultinest
from Full_likelihood import *
import argparse

parser = argparse.ArgumentParser(description='Likelihood Analysis')

parser.add_argument("--z_min", help="Redshift at which flux is generated", type=int, default=0)

parser.add_argument("--z_max", help="Initial value redshift", type=int, default=4)

parser.add_argument("--E_min", help="Minimum energy in array", type=int, default=3)

parser.add_argument("--E_max", help="Maximum energy in array", type=int, default=8)

parser.add_argument("--E_npts", help="Number of energy bins in array", type=int, default=200)

parser.add_argument("--log10_nu_energy_min", help="Default: 2.8", type=float, default=2.8)

parser.add_argument("--log10_nu_energy_max", help="Default: 9.2", type=float, default=9.2)

parser.add_argument("--nu_energy_num_nodes", help="Default: 150", type=int, default=150)

parser.add_argument("--costhz_npts", help="Default: 50", type=int, default=50)

parser.add_argument("--log10_energy_dep_min", help="Default: 3.8", type=float, default=3.8)

parser.add_argument("--log10_energy_dep_max", help="Default: 7.2", type=float, default=7.2)

parser.add_argument("--log10_energy_dep_npts", help="Default: 50", type=int, default=50)

parser.add_argument("--log10_energy_dep_int_min", help="Default: 4.0", type=float, default=4.0)

parser.add_argument("--log10_energy_dep_int_max", help="Default: 7.0", type=float, default=7.0)

parser.add_argument("--time_det_yr", help="Default: 8.0", type=float, default=8.0)

parser.add_argument("--volume_total", help="Default: 6.440e14", type=float, default=6.440e14)

parser.add_argument("--energy_nu_max", help="Default: 1.e8", type=float, default=1.e8)

parser.add_argument("--epsabs", help="Default: 1.e-3", type=float, default=1.e-3)

parser.add_argument("--epsrel", help="Default: 1.e-3", type=float, default=1.e-3)

parser.add_argument("--verbose", help="Default: 0", type=int, default=0)

parser.add_argument("--n_live_points", help="Default: 100", type=int, default=100)

parser.add_argument("--evidence_tolerance", help="Default: 0.1", type=float, default=0.1)


args = parser.parse_args()

z_min = args.z_min
z_max = args.z_max
E_min = args.E_min
E_max = args.E_max
E_npts = args.E_npts
log10_nu_energy_min = args.log10_nu_energy_min
log10_nu_energy_max = args.log10_nu_energy_max
nu_energy_num_nodes = args.nu_energy_num_nodes
costhz_npts = args.costhz_npts
log10_energy_dep_min = args.log10_energy_dep_min
log10_energy_dep_max = args.log10_energy_dep_max
log10_energy_dep_npts = args.log10_energy_dep_npts
log10_energy_dep_int_min = args.log10_energy_dep_int_min
log10_energy_dep_int_max = args.log10_energy_dep_int_max
time_det_yr = args.time_det_yr
volume_total = args.volume_total
energy_nu_max = args.energy_nu_max
epsabs = args.epsabs
epsrel = args.epsrel
verbose = args.verbose
n_live_points = args.n_live_points
evidence_tolerance = args.evidence_tolerance



def Prior(cube, ndim, nparams):

#Spectral index. Uniform prior between 2 and 3.
cube[0] = cube[0] + 2

"""
#Mass of mediator. Log uniform prior between 10^-5 and 10^2
cube[1] = 10**(cube[1]*7 - 5)
#Coupling constant. Log uniform prior between 10^-3 and 1.
cube[2] = 10**(cube[2]*3 -3)
#Expected number of astrophysical neutrinos. Uniform distribution between 0 and 80.
cube[3] = cube[3] * 80
#Expected number of conv. atm. neutrinos. Uniform distribution between 0 and 80.
cube[4] = cube[4] * 80
#Expected number of prompt atm. neutrinos. Uniform distribution between 0 and 80.
cube[5] = cube[5] * 80
#Expected number of atm. muons. Uniform distribution between 0 and 80.
cube[6] = cube[6] * 80
"""
return 0


def Log_Like(cube, ndim, nparams):

gamma = cube[0]
M = 0.01 #cube[1]
g = 0.03 #cube[2]
N_a = 20 #cube[3]
N_conv = 20 #cube[4]
N_pr = 20 #cube[5]
N_mu = 20 #cube[6]

nu_energy_min = 10**log10_nu_energy_min
nu_energy_max = 10**log10_nu_energy_max

likelihood = Full_likelihood(N_a, N_conv, N_pr, N_mu, g, M, gamma, nu_energy_min, nu_energy_max, z_min=z_min, z_max=z_max, E_min=E_min, E_max=E_max, E_npts=E_npts,
nu_energy_num_nodes=nu_energy_num_nodes, costhz_npts=costhz_npts, log10_energy_dep_int_min=log10_energy_dep_int_min, log10_energy_dep_int_max=log10_energy_dep_int_max,
log10_energy_dep_min=log10_energy_dep_min, log10_energy_dep_max=log10_energy_dep_max, log10_energy_dep_npts=log10_energy_dep_npts,
time_det_yr=time_det_yr, volume_total=volume_total, energy_nu_max=energy_nu_max, epsabs=epsabs, epsrel=epsrel, verbose=verbose)

log_l = np.log10(likelihood)

return log_l




parameters = ["gamma"]#, "M", "g", "N_a", "N_conv", "N_pr", "N_mu"]
n_params = len(parameters)




# Run MultiNest
pymultinest.run(Log_Like, Prior, n_params, outputfiles_basename='Likelihood_out_1D/',
resume=True, verbose=True, n_live_points=n_live_points, seed=1,
evidence_tolerance=evidence_tolerance, importance_nested_sampling=True)

json.dump(parameters, open('Likelihood_out_1D/params.json', 'w')) # Save parameter names
99 changes: 99 additions & 0 deletions Likelihood/Neutrino_Flux_Earth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
from __future__ import division
import numpy as np
from scipy.integrate import ode

c=299792458*100

def nt(z):
return 56*(1+z)**3

def L0(energy_nu, z, gamma, k=1, E_max=1.0e7):
return k*np.power(energy_nu,-gamma)*np.exp(-energy_nu/E_max)


def W(z, a=3.4 , b=-0.3 , c1=-3.5 , B=5000 , C=9 , eta=-10):
return ((1+z)**(a*eta)+((1+z)/B)**(b*eta)+((1+z)/C)**(c1*eta))**(1/eta)

def L(z, energy_nu, gamma):
return W(z)*L0(energy_nu, z, gamma)

def H(z, H0=0.678/(9.777752*3.16*1e16), OM=0.308, OL=0.692):
return H0*np.sqrt(OM*(1.+z)**3. + OL)


def sigma(energy_nu, g, M, m=1.e-10):
return (g**4/(16*np.pi))*(2*energy_nu*m)/((2*energy_nu*m-M**2)**2+((M**4*g**4)/(16*np.pi**2)))* 0.389379e-27


def Adiabatic_Energy_Losses(z, energy_nu, nu_density, lst_energy_nu, lst_nu_density):
index = list(lst_energy_nu).index(energy_nu)

if index < len(lst_energy_nu)-1:
diff = (lst_nu_density[index+1]-lst_nu_density[index])/(lst_energy_nu[index+1]-lst_energy_nu[index])
else:
diff = 0
return H(z)*(nu_density + energy_nu*diff)

def Attenuation(z, energy_nu, nu_density, g, M, m=1.e-10):
return -c*nt(z)*sigma(energy_nu, g, M, m)*nu_density

def Regeneration(z, energy_nu, lst_energy_nu, lst_nu_density, g, M, m=1.e-10):
regen = 0
index = list(lst_energy_nu).index(energy_nu)

for j in range (index, len(lst_energy_nu)-1):
regen += sigma(lst_energy_nu[j], g, M, m)*lst_nu_density[j]*(lst_energy_nu[j+1]-lst_energy_nu[j])

regen=c*nt(z)*regen/(energy_nu)

return regen

def Propagation_Eq(z, nu_density, energy_nu, lst_energy_nu, lst_nu_density, g, M, gamma, m=1.e-10):
rhs = 0

rhs += Adiabatic_Energy_Losses(z, energy_nu, nu_density, lst_energy_nu, lst_nu_density)
rhs += L(z, energy_nu, gamma)
rhs += Attenuation(z, energy_nu, nu_density, g, M, m=1.e-10)
rhs += Regeneration(z, energy_nu, lst_energy_nu, lst_nu_density, g, M, m=1.e-10)

rhs = rhs/(-(1+z)*H(z))

return rhs

def Neutrino_Flux(g, M, z_min, z_max, E_min, E_max, E_npts, gamma, m=1.e-10):

def Integrand(z, nu_density, energy_nu):
return Propagation_Eq(z, nu_density, energy_nu, lst_energy_nu, lst_nu_density, g, M, gamma, m=1.e-10)

solver = ode(Integrand, jac=None).set_integrator('dop853', atol=1.e-4, rtol=1.e-4, nsteps=500, max_step=1.e-3, verbosity=1)

lst_energy_nu=np.power(10, np.linspace(E_min, E_max, E_npts))

lst_nu_density = [0.0]*len(lst_energy_nu)

dz = 1.e-1

z = z_max

while (z > z_min):
lst_nu_density_new = np.zeros(lst_energy_nu.size)

for i in range(len(lst_energy_nu)):

solver.set_initial_value(lst_nu_density[i], z)
solver.set_f_params(lst_energy_nu[i])
sol = solver.integrate(solver.t-dz)

lst_nu_density_new[i]=sol

lst_nu_density = [x for x in lst_nu_density_new]

z = z-dz

save_array = np.zeros([E_npts, 2])
save_array[:,0] = lst_energy_nu
save_array[:,1] = lst_nu_density

#np.savetxt(external_flux_filename, save_array)

return save_array
Loading

0 comments on commit 82265f4

Please sign in to comment.