Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removed obsolete code #21

Merged
merged 8 commits into from
Aug 23, 2024
3 changes: 2 additions & 1 deletion docs/tasks.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
### Open

- [ ] Perform a final execution of the project validating its integrity
- [ ] Investigate if the first `for` loop in `make()` of `nb.ipynb` is necessary
- [ ] Clean up all files by removing obsolete code and adding more and any missing comments
- [ ] Clean up all files by removing obsolete code and adding more and any missing comments (partially closed 23 August 2024 by #21)
- [ ] Double check all file headers and code comments for accuracy and consistency
- [ ] Add all Python packages and verisons to the software credit in the article
- [ ] Determine the units of the Lyα maps and verify the units stated in the documentation and code comments
Expand Down
72 changes: 2 additions & 70 deletions esc.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 2,
"id": "9173d5ef",
"metadata": {},
"outputs": [],
Expand All @@ -36,7 +36,7 @@
"from astropy.convolution import convolve_fft, Gaussian2DKernel\n",
"\n",
"from astropy.stats import SigmaClip\n",
"from photutils.background import Background2D, MedianBackground, SExtractorBackground, ModeEstimatorBackground, MeanBackground, MMMBackground, BiweightLocationBackground\n",
"from photutils.background import Background2D, MedianBackground\n",
"\n",
"from reproject import reproject_interp\n",
"\n",
Expand All @@ -58,57 +58,6 @@
" Measure the LyC escape fractions of the MagE apertures\n",
" '''\n",
"\n",
" '''\n",
" def convolve_uncertainties(uncertainty, stdv, shape):\n",
"\n",
" Convolve the uncertainties of an image\n",
"\n",
" Parameters:\n",
" uncertainty : numpy.float64\n",
" The estimated uncertainty of the pixels in the image\n",
" stdv : numpy.float64\n",
" The standard deviation of the Gaussian convolution kernel in pixels\n",
" shape : tuple\n",
" The dimensions of the image\n",
"\n",
" Returns:\n",
" uncertainty_convolved : numpy.ndarray\n",
" The convolved estimated uncertainty of the pixels in the image, \n",
" broadcasted to the shape of the image\n",
" \n",
" # Set the kernel size to 3 standard deviations of the \n",
" # width of the time-averaged seeing conditions\n",
" kernel_size = int(3 * stdv)\n",
"\n",
" # Make a dummy array of the convolved uncertainties with the same shape as the image\n",
" uncertainty_convolved = np.ones(shape)\n",
"\n",
" # Set the squared sum of the weighted uncertainties to zero, to be added to in the double loop below\n",
" squared_sum = 0.0\n",
"\n",
" # The double loop below calculates the convolved uncertainty of the pixels in the image. \n",
" # It relies on some assumptions. First, since the original uncertainty is assumed true \n",
" # for the entire image, the loop does not calculate the convolved uncertainty at each \n",
" # pixel in the image. It just calculates one instance (since they will all be the same, \n",
" # ignoring edge effects), and then broadcasts that value to the entire image\n",
"\n",
" # For each pixel within the kernel size\n",
" for i in range(-kernel_size, kernel_size + 1):\n",
" for j in range(-kernel_size, kernel_size + 1):\n",
"\n",
" # Calculate the weight of the pixel\n",
" weight = np.exp(-(i**2 + j**2) / (2 * stdv**2))\n",
" weight /= 2 * np.pi * stdv**2\n",
"\n",
" # Add the weighted uncertainty to the total squared sum of uncertainties\n",
" squared_sum += (weight * uncertainty)**2\n",
"\n",
" # Multiply the dummy array of the convolved uncertainties by the convolved uncertainty\n",
" uncertainty_convolved = uncertainty_convolved * np.sqrt(squared_sum)\n",
"\n",
" return uncertainty_convolved\n",
" '''\n",
"\n",
" @njit\n",
" def convolve_uncertainties(uncertainty, mask, stdv, psf_stdv, shape):\n",
"\n",
Expand Down Expand Up @@ -164,23 +113,6 @@
"\n",
" return uncertainty_convolved\n",
"\n",
" # Dictionary of the slit IDs, containing, from left to right: the \n",
" # file name of the data, redshift, and FWHM of the time-averaged \n",
" # seeing conditions of the observation\n",
" '''\n",
" slits = {\n",
" 'M5' : ['sunburst_M-5-comb1_MWdr.txt', 2.37086, 0.97],\n",
" 'M4' : ['sunburst_M-4-comb1_MWdr.txt', 2.37073, 0.71],\n",
" 'M6' : ['sunburst_M-6-comb1_MWdr.txt', 2.37021, 0.76],\n",
" 'M3' : ['sunburst_M-3-comb1_MWdr.txt', 2.37025, 0.70],\n",
" 'M0' : ['sunburst_M-0-comb1_MWdr.txt', 2.37014, 1.34],\n",
" 'M2' : ['sunburst_M-2-comb1_MWdr.txt', 2.37017, 0.77],\n",
" 'M7' : ['sunburst_M-7-comb1_MWdr.txt', 2.37044, 0.73],\n",
" 'M8' : ['sunburst_M-8-comb1_MWdr.txt', 2.37024, 0.7],\n",
" 'M9' : ['sunburst_M-9-comb1_MWdr.txt', 2.37030, 0.68]\n",
" }\n",
" '''\n",
"\n",
" # Dictionary of the MagE slit aperture IDs, listing, from left to right: the FWHM of the seeing conditions \n",
" # in arcseconds, airmasses of each exposure, written as sec(z), and the exposure times of each exposure\n",
" slits = {\n",
Expand Down
5 changes: 1 addition & 4 deletions limit.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@
"from photutils.segmentation import SourceFinder\n",
"\n",
"from astropy.io import fits\n",
"from astropy.wcs import WCS\n",
"from astropy.stats import SigmaClip\n",
"\n",
"import matplotlib.pyplot as plt"
"from astropy.wcs import WCS"
]
},
{
Expand Down
74 changes: 1 addition & 73 deletions lya.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@
" locs : numpy.ndarray\n",
" The location of the maxima of the Lya peaks corresponding to the input parameters\n",
" '''\n",
"\n",
" def lin_interp(x, y, i):\n",
" return x[i] + (x[i+1] - x[i]) * y[i] / (y[i+1] - y[i])\n",
"\n",
Expand Down Expand Up @@ -239,18 +240,12 @@
" # Calculate the Lya peculiar velocities\n",
" v = 299792.458 * (w / 1215.67 - 1)\n",
"\n",
" # Instantiate a cosmology with 30% matter-based energy density and an expansion rate of 70 km/s/Mpc\n",
" #cosmology = FlatLambdaCDM(70,0.3)\n",
"\n",
" # Determine the luminosity distance to the redshift in units of centimeters (to match the flux density unit)\n",
" l_dist = cosmology.luminosity_distance(z).value * 3.086e24\n",
"\n",
" # Create a boolean mask designating the wavelength range used to compute the local continuum\n",
" cntm_mask = (w >= 1221) & (w <= 1225)\n",
"\n",
" # Create a boolean mask designating the integration range when computing the equivalent width\n",
" #ew_mask = (w >= 1212) & (w <= 1221)\n",
"\n",
" # Create a boolean mask designating the integration range when computing the Lya equivalent width and luminosity\n",
" lya_profile_mask = (w >= 1212) & (w <= 1221)\n",
" \n",
Expand All @@ -259,9 +254,6 @@
" v100_mask = (v >= -100) & (v <= 100)\n",
" v1000_mask = (v >= -1000) & (v <= 1000)\n",
"\n",
" # Mask for the observed-frame wavelength boundaries when computing the luminosity of the Lya profile\n",
" #l_mask = (w * (1 + z) >= 1212 * (1 + z)) & (w * (1 + z) <= 1221 * (1 + z))\n",
"\n",
" # Set a string for the flux density unit of the data\n",
" flux_density_unit = 'erg/s/cm^2/Å' if 'M' in slit_id else 'normalized wavelength-space flux density'\n",
"\n",
Expand Down Expand Up @@ -547,10 +539,8 @@
" iters = len(np.loadtxt(f'{results}/lya_fits/NL/NL_mc_sim_lya_measurements.txt', usecols=(0)))\n",
"\n",
" # Get the LyC escape fraction measurements and uncertainties from the results file\n",
" #f_esc = [np.nan, np.nan, 2.3, -0.6, 3, 2.3, 17, 18, 12, 15, 14]\n",
" f_escs, n_f_escs = np.loadtxt(f'{results}/f_esc_lyc_measurements.txt', delimiter=' ', usecols=(4,5), unpack=True)\n",
" f_escs, n_f_escs = np.insert(f_escs, 0, [np.nan, np.nan]), np.insert(n_f_escs, 0, [np.nan, np.nan])\n",
" #ne_esc = [np.nan, np.nan, 0.8, 0.2, 1, 0.8, 6, 7, 5, 6, 5]\n",
"\n",
" # Make an empty list that will contain the statistical correlation results\n",
" corr_coefs = []\n",
Expand Down Expand Up @@ -752,13 +742,6 @@
" # Array containing the padding between the plot and the row unit labels\n",
" labelpads = np.array([27,20,27,20,20,20,20,20], dtype=int)\n",
"\n",
" # The LyC escape fractions and associated uncertainties of each spectrum\n",
" #f_esc = [np.nan, np.nan, 2.3, -0.6, 3, 2.3, 17, 18, 12, 15, 14]\n",
" #ne_esc = [np.nan, np.nan, 0.8, 0.2, 1, 0.8, 6, 7, 5, 6, 5]\n",
"\n",
" #f_escs, n_f_escs = np.loadtxt(f'{results}/f_esc_lyc_measurements.txt', delimiter=' ', usecols=(4,5), unpack=True)\n",
" #f_escs, n_f_escs = np.insert(f_escs, 0, [np.nan, np.nan]), np.insert(n_f_escs, 0, [np.nan, np.nan])\n",
"\n",
" # Establish common directories\n",
" home = os.getcwd()\n",
" data = f'{home}/data'\n",
Expand Down Expand Up @@ -1180,61 +1163,6 @@
" # Round the median to the same digit as the smaller bound's only significant figure\n",
" median = sigfig.round(median, len(str(median).replace('-','').split('.')[0]) - len(ref) + 1, type=str)\n",
"\n",
" '''\n",
" # If the median or either bound is less than 0.0001 (this is Python's default limit to begin \n",
" # printing numbers in scientific notation)\n",
" if any('0.0000' in i for i in np.array([median, lower, upper], dtype=str)):\n",
"\n",
" # Get the smallest quantity of the median and bounds\n",
" ref = min(np.array([median, lower, upper], dtype=str), key=float)\n",
"\n",
" # Determine the necesessary scientific notation exponent so that the first significant \n",
" # figure of the smallest quantity is the first digit to the left of the decimal place\n",
" exp = -len(ref.split('.')[1])\n",
"\n",
" # Write the scientific notation factor as a string\n",
" factor = f'\\\\times 10^{{{exp}}}'\n",
"\n",
" if '.' in median:\n",
" if len(median.split('.')[1]) < abs(exp):\n",
" median += '0' * (abs(exp) - len(median.split('.')[1]))\n",
" median = median.split('.')[0] + median.split('.')[1]\n",
" median = median.lstrip('0')\n",
" elif len(median.split('.')[1]) == abs(exp):\n",
" median = median.split('.')[0] + median.split('.')[1]\n",
" median = median.lstrip('0')\n",
" elif len(median.split('.')[1]) > abs(exp): \n",
" median = median.split('.')[0] + median.split('.')[1][0:abs(exp)] + '.' + median.split('.')[1][abs(exp):]\n",
" elif '.' not in median:\n",
" median += '0' * abs(exp)\n",
"\n",
" if '.' in lower:\n",
" if len(lower.split('.')[1]) < abs(exp):\n",
" lower += '0' * (abs(exp) - len(lower.split('.')[1]))\n",
" lower = lower.split('.')[0] + lower.split('.')[1]\n",
" lower = lower.lstrip('0')\n",
" elif len(lower.split('.')[1]) == abs(exp):\n",
" lower = lower.split('.')[0] + lower.split('.')[1]\n",
" lower = lower.lstrip('0')\n",
" elif len(lower.split('.')[1]) > abs(exp): \n",
" lower = lower.split('.')[0] + lower.split('.')[1][0:abs(exp)] + '.' + lower.split('.')[1][abs(exp):]\n",
" elif '.' not in lower:\n",
" lower += '0' * abs(exp)\n",
" \n",
" if '.' in upper:\n",
" if len(upper.split('.')[1]) < abs(exp):\n",
" upper += '0' * (abs(exp) - len(upper.split('.')[1]))\n",
" upper = upper.split('.')[0] + upper.split('.')[1]\n",
" upper = upper.lstrip('0')\n",
" elif len(upper.split('.')[1]) == abs(exp):\n",
" upper = upper.split('.')[0] + upper.split('.')[1]\n",
" upper = upper.lstrip('0')\n",
" elif len(upper.split('.')[1]) > abs(exp): \n",
" upper = upper.split('.')[0] + upper.split('.')[1][0:abs(exp)] + '.' + upper.split('.')[1][abs(exp):]\n",
" elif '.' not in upper:\n",
" lower += '0' * abs(exp)\n",
" '''\n",
"\n",
" return median, lower, upper\n",
" \n",
" # Dictionary containing the spectral resolutions of the slits\n",
Expand Down
26 changes: 3 additions & 23 deletions nb.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,9 @@
},
"outputs": [],
"source": [
"#import jrr\n",
"import pandas \n",
"import stsynphot # new packages, v2\n",
"import synphot # new packages, v2\n",
"import stsynphot\n",
"import synphot\n",
"from synphot.models import Empirical1D\n",
"import os\n",
"import warnings\n",
Expand All @@ -41,7 +40,6 @@
"from astropy import units as u\n",
"from astropy import wcs\n",
"import extinction\n",
"#import pyregion\n",
"\n",
"from regions import Regions\n",
"\n",
Expand Down Expand Up @@ -278,9 +276,7 @@
" for i, filter in enumerate(['F390W', 'F555W']):\n",
"\n",
" data_off, header_off = fits.getdata(f'{data}/hst/V5.0_PSZ1G311.65-18.48_{filter}_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_sci.fits', header=True)\n",
" #wht_off = fits.getdata(f'{data}/hst/V5.0_PSZ1G311.65-18.48_{filter}_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_wht.fits')\n",
" data_on, header_on = fits.getdata(f'{data}/hst/V5.0_PSZ1G311.65-18.48_F410M_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_sci.fits', header=True)\n",
" #wht_on, header_wht = fits.getdata(f'{data}/hst/V5.0_PSZ1G311.65-18.48_F410M_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_wht.fits', header=True)\n",
"\n",
" # Compute the median values of the masked sky patch\n",
" med_val_off = np.nanmedian(data_off * mask.data)\n",
Expand All @@ -297,25 +293,13 @@
" ( (data_off - med_val_off) * cont_scale_mean[i] * np.sqrt( (np.sqrt((np.nanstd(data_off * mask.data))**2 + (np.nanstd(data_off * mask.data))**2) / (data_off - med_val_off))**2 + (cont_scale_std[i] / cont_scale_mean[i])**2 ) )**2\n",
" )\n",
"\n",
" #hdu_f = fits.PrimaryHDU(data_cont_sub, hdr_f, 'DATA')\n",
" #hdu_n = fits.ImageHDU(n, hdr_n, 'UNCERTAINTY')\n",
" #hdul = fits.HDUList([hdu_f, hdu_n])\n",
"\n",
" #header_on['CNTSCAL'] = (cont_scale_mean[i],'JR continuum subtraction factor')\n",
" #header_on['CNTSCALU'] = (cont_scale_std[i], 'JR stdev on CNTSCAL')\n",
" #header_on['CNTMATH'] = (cont_math, 'jrigby continuum math')\n",
"\n",
" #fits.writeto(cont_sub_filename, data_cont_sub, header_on, overwrite=True) \n",
"\n",
" # Initiate headers for the two extensions of the new file from the WCS\n",
" # it will use. We will then build on the headers from there.\n",
" header_f = wcs.WCS(header_on).to_header()\n",
" header_n = wcs.WCS(header_on).to_header()\n",
"\n",
" cd_matrix = wcs.WCS(header_on).wcs.cd\n",
"\n",
" #print(repr(header_f))\n",
"\n",
" # For the headers of the data and uncertainty extensions\n",
" for j, header in enumerate([header_f, header_n]):\n",
"\n",
Expand Down Expand Up @@ -344,11 +328,7 @@
" hdu_n = fits.ImageHDU(n, header_n, 'UNCERTAINTY')\n",
" hdul = fits.HDUList([hdu_f, hdu_n])\n",
"\n",
" hdul.writeto(f'{results}/lya_maps/Lya_cont_sub_{filter}.fits', overwrite=True)\n",
"\n",
" #wht_contsub = 1./(1./wht_on + scale_this_offband/wht_off)\n",
" #wht_filename = f'{home}/results/Lya_wht_{filter}.fits'\n",
" #fits.writeto(wht_filename, wht_contsub, header_wht, overwrite=True)"
" hdul.writeto(f'{results}/lya_maps/Lya_cont_sub_{filter}.fits', overwrite=True)"
]
},
{
Expand Down
Loading