Skip to content

Commit

Permalink
Add support for a fragmentation scale
Browse files Browse the repository at this point in the history
  • Loading branch information
t7phy committed Oct 4, 2024
1 parent 2a4f79c commit 7569391
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 21 deletions.
74 changes: 64 additions & 10 deletions pineappl/src/convolutions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ pub struct ConvolutionCache<'a> {
alphas_cache: Vec<f64>,
mur2_grid: Vec<f64>,
muf2_grid: Vec<f64>,
mua2_grid: Vec<f64>,
x_grid: Vec<f64>,
imur2: Vec<usize>,
imuf2: Vec<usize>,
imua2: Vec<usize>,
ix: Vec<Vec<usize>>,
pdg: Vec<Convolution>,
perm: Vec<Option<(usize, bool)>>,
Expand All @@ -38,16 +40,18 @@ impl<'a> ConvolutionCache<'a> {
alphas_cache: Vec::new(),
mur2_grid: Vec::new(),
muf2_grid: Vec::new(),
mua2_grid: Vec::new(),
x_grid: Vec::new(),
imur2: Vec::new(),
imuf2: Vec::new(),
imua2: Vec::new(),
ix: Vec::new(),
pdg,
perm: Vec::new(),
}
}

pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64)]) -> Result<(), ()> {
pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64, f64)]) -> Result<(), ()> {
let convolutions = grid.convolutions();

// TODO: the following code only works with exactly two convolutions
Expand Down Expand Up @@ -129,7 +133,7 @@ impl<'a> ConvolutionCache<'a> {
.flatten()
.flat_map(|ren| {
xi.iter()
.map(|(xir, _)| xir * xir * ren)
.map(|(xir, _, _)| xir * xir * ren)
.collect::<Vec<_>>()
})
.collect();
Expand Down Expand Up @@ -161,17 +165,50 @@ impl<'a> ConvolutionCache<'a> {
.flatten()
.flat_map(|fac| {
xi.iter()
.map(|(_, xif)| xif * xif * fac)
.map(|(_, xif, _)| xif * xif * fac)
.collect::<Vec<_>>()
})
.collect();
muf2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!()));
muf2_grid.dedup();

let mut mua2_grid: Vec<_> = grid
.subgrids()
.iter()
.filter_map(|subgrid| {
if subgrid.is_empty() {
None
} else {
Some(
grid.kinematics()
.iter()
.zip(subgrid.node_values())
.find_map(|(kin, node_values)| {
// TODO: generalize this for arbitrary scales
matches!(kin, &Kinematics::Scale(idx) if idx == 0)
.then_some(node_values)
})
// TODO: convert this into an error
.unwrap()
.values(),
)
}
})
.flatten()
.flat_map(|frg| {
xi.iter()
.map(|(_, _, xia)| xia * xia * frg)
.collect::<Vec<_>>()
})
.collect();
mua2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!()));
mua2_grid.dedup();

self.alphas_cache = mur2_grid.iter().map(|&mur2| (self.alphas)(mur2)).collect();

self.mur2_grid = mur2_grid;
self.muf2_grid = muf2_grid;
self.mua2_grid = mua2_grid;
self.x_grid = x_grid;

Ok(())
Expand All @@ -191,18 +228,28 @@ impl<'a> ConvolutionCache<'a> {
.filter_map(|(index, (perm, &pdg_id))| {
perm.map(|(idx, cc)| {
let ix = self.ix[index][indices[index + 1]];
let imuf2 = self.imuf2[indices[0]];
let muf2 = self.muf2_grid[imuf2];

let pid = if cc {
pids::charge_conjugate_pdg_pid(pdg_id)
} else {
pdg_id
};
let xfx = &mut self.xfx[idx];
let xfx_cache = &mut self.xfx_cache[idx];
*xfx_cache.entry((pid, ix, imuf2)).or_insert_with(|| {
let (imu2, mu2) = match self.pdg[idx] {
Convolution::UnpolPDF(_) | Convolution::PolPDF(_) => {
let imuf2 = self.imuf2[indices[0]];
(imuf2, self.muf2_grid[imuf2])
}
Convolution::UnpolFF(_) | Convolution::PolFF(_) => {
let imua2 = self.imua2[indices[0]];
(imua2, self.mua2_grid[imua2])
}
Convolution::None => unreachable!(),
};
*xfx_cache.entry((pid, ix, imu2)).or_insert_with(|| {
let x = self.x_grid[ix];
xfx(pid, x, muf2) / x
xfx(pid, x, mu2) / x
})
})
})
Expand All @@ -224,6 +271,7 @@ impl<'a> ConvolutionCache<'a> {
}
self.mur2_grid.clear();
self.muf2_grid.clear();
self.mua2_grid.clear();
self.x_grid.clear();
}

Expand All @@ -237,9 +285,6 @@ impl<'a> ConvolutionCache<'a> {
xif: f64,
xia: f64,
) {
// TODO: generalize this for fragmentation functions
assert_eq!(xia, 1.0);

self.imur2 = mu2_grid
.iter()
.map(|ren| {
Expand All @@ -258,6 +303,15 @@ impl<'a> ConvolutionCache<'a> {
.unwrap_or_else(|| unreachable!())
})
.collect();
self.imua2 = mu2_grid
.iter()
.map(|frg| {
self.mua2_grid
.iter()
.position(|&mua2| mua2 == xia * xia * frg)
.unwrap_or_else(|| unreachable!())
})
.collect();

// TODO: generalize this for arbitrary orderings of x
self.ix = (0..grid.convolutions().len())
Expand Down
25 changes: 14 additions & 11 deletions pineappl/src/grid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,13 +232,6 @@ impl Grid {
channel_mask: &[bool],
xi: &[(f64, f64, f64)],
) -> Vec<f64> {
assert!(xi
.iter()
.all(|&(_, _, xia)| approx_eq!(f64, xia, 1.0, ulps = 2)));
let xi = xi
.iter()
.map(|&(xir, xif, _)| (xir, xif))
.collect::<Vec<_>>();
convolution_cache.setup(self, &xi).unwrap();

let bin_indices = if bin_indices.is_empty() {
Expand All @@ -250,11 +243,14 @@ impl Grid {
let normalizations = self.bin_info().normalizations();
let pdg_channels = self.channels_pdg();

for (xi_index, &(xir, xif)) in xi.iter().enumerate() {
for (xi_index, &(xir, xif, xia)) in xi.iter().enumerate() {
for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() {
let order = &self.orders[ord];

if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) {
if ((order.logxir > 0) && (xir == 1.0))
|| ((order.logxif > 0) && (xif == 1.0))
|| ((order.logxia > 0) && (xia == 1.0))
{
continue;
}

Expand Down Expand Up @@ -316,6 +312,10 @@ impl Grid {
value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap());
}

if order.logxia > 0 {
value *= (xia * xia).ln().powi(order.logxia.try_into().unwrap());
}

bins[xi_index + xi.len() * bin_index] += value / normalizations[bin];
}
}
Expand All @@ -339,8 +339,7 @@ impl Grid {
channel: usize,
(xir, xif, xia): (f64, f64, f64),
) -> ArrayD<f64> {
assert_eq!(xia, 1.0);
convolution_cache.setup(self, &[(xir, xif)]).unwrap();
convolution_cache.setup(self, &[(xir, xif, xia)]).unwrap();

let normalizations = self.bin_info().normalizations();
let pdg_channels = self.channels_pdg();
Expand Down Expand Up @@ -395,6 +394,10 @@ impl Grid {
array *= (xif * xif).ln().powi(order.logxif.try_into().unwrap());
}

if order.logxia > 0 {
array *= (xia * xia).ln().powi(order.logxia.try_into().unwrap());
}

array /= normalizations[bin];
array
}
Expand Down

0 comments on commit 7569391

Please sign in to comment.