Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improved folder structure and organization #11

Merged
merged 9 commits into from
Aug 13, 2024
4 changes: 2 additions & 2 deletions esc.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -266,10 +266,10 @@
" n_f814w_fluxes.append(n_f814w_flux)\n",
"\n",
" # Get the Starburst99 fit's wavelength bins and flux densities\n",
" w_sb99, f_sb99 = np.loadtxt(f'{data}/spectra/mage/cfit/planckarc-{slit_id}-sb99-fit.txt', comments='#', usecols=(0,3), unpack=True)\n",
" w_sb99, f_sb99 = np.loadtxt(f'{data}/mage/stellar_continuum_fits/planckarc-{slit_id}-sb99-fit.txt', comments='#', usecols=(0,3), unpack=True)\n",
"\n",
" # Get the wavelength bins and flux densities and associated uncertainties of the MagE spectrum\n",
" w, f, n = np.loadtxt(f'{data}/spectra/mage/{file}', delimiter='\\t', comments=('#','w'), usecols=(0,1,2), unpack=True)\n",
" w, f, n = np.loadtxt(f'{data}/mage/{file}', delimiter='\\t', comments=('#','w'), usecols=(0,1,2), unpack=True)\n",
" \n",
" # Create a mask excluding extreme flux density outliers\n",
" f_mask = f < 1e-20 \n",
Expand Down
10 changes: 5 additions & 5 deletions lya.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,8 @@
" # magnification of the slit, a tuple specifying a range of Lya peculiar velocities used to set \n",
" # initial parameter estimates of the central Lya peak, spectral resolution, and uncertainty in the spectral resolution\n",
" slits = {\n",
" 'NL' : ['rest_sba-nonleaker-no_m3_MWdr.txt', 0, 1, [60,140], 5400, 200],\n",
" 'L' : ['rest_sba-leaker-no_m0_MWdr.txt', 0, 1, [20,130], 5300, 200],\n",
" 'NL' : ['sunburst_arc_nonleaker_stack_mage.txt', 0, 1, [60,140], 5400, 200],\n",
" 'L' : ['sunburst_arc_leaker_stack_mage.txt', 0, 1, [20,130], 5300, 200],\n",
" 'M5' : ['sunburst_M-5-comb1_MWdr.txt', 2.37086, 51, [0,100], 5500, 400],\n",
" 'M4' : ['sunburst_M-4-comb1_MWdr.txt', 2.37073, 14.6, [0,85], 5400, 300],\n",
" 'M6' : ['sunburst_M-6-comb1_MWdr.txt', 2.37021, 147, [10,130], 5300, 300],\n",
Expand Down Expand Up @@ -224,7 +224,7 @@
" factor = 1e16 if 'M' in slit_id else 1\n",
"\n",
" # Extract the data from the .txt file\n",
" w, f, n = extract_data(f'{data}/spectra/mage/{file_name}')\n",
" w, f, n = extract_data(f'{data}/mage/{file_name}')\n",
"\n",
" # Cut the data to just the relevant portion of the spectrum\n",
" f = f[(w >= 1195 * (1 + z)) & (w <= 1235 * (1 + z))]\n",
Expand Down Expand Up @@ -784,7 +784,7 @@
" z, mag, c, model = slits[slit_id][1], slits[slit_id][2], slits[slit_id][3], slits[slit_id][4]\n",
"\n",
" # Get the data\n",
" w, f, n = extract_data(f'{data}/spectra/mage/{slits[slit_id][0]}')\n",
" w, f, n = extract_data(f'{data}/mage/{slits[slit_id][0]}')\n",
"\n",
" # Convert the data to the rest frame\n",
" w, f, n = w / (1 + z), f * (1 + z) / mag, n * (1 + z) / mag\n",
Expand Down Expand Up @@ -861,7 +861,7 @@
" z, c, model = slits[slit_id][1], slits[slit_id][3], slits[slit_id][4]\n",
"\n",
" # Get the data\n",
" w, f, n = extract_data(f'{data}/spectra/mage/{slits[slit_id][0]}')\n",
" w, f, n = extract_data(f'{data}/mage/{slits[slit_id][0]}')\n",
"\n",
" # Convert the data to the rest frame\n",
" w, f, n = w / (1 + z), f * (1 + z), n * (1 + z)\n",
Expand Down
6 changes: 3 additions & 3 deletions lya_and_lyc_maps.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@
" # Establish file paths to the necessary files\n",
" hst_f275w_file = f'{data}/hst/V5.0_PSZ1G311.65-18.48_F275W_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_sci.fits'\n",
" hst_f606w_file = f'{data}/hst/V5.0_PSZ1G311.65-18.48_F606W_0.03g0.6_crsc1.2_0.7crsn3.5_3.0_drc_sci.fits'\n",
" lya_nb_f390w_file = f'{results}/Lya_cont_sub_F390W.fits'\n",
" lya_nb_f555w_file = f'{results}/Lya_cont_sub_F555W.fits'\n",
" lya_nb_f390w_file = f'{results}/lya_maps/Lya_cont_sub_F390W.fits'\n",
" lya_nb_f555w_file = f'{results}/lya_maps/Lya_cont_sub_F555W.fits'\n",
"\n",
" # Put the files we will display data from into an array. We will loop over this array\n",
" # to make the figure\n",
Expand Down Expand Up @@ -406,7 +406,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
39 changes: 31 additions & 8 deletions lya_nb_m3.ipynb

Large diffs are not rendered by default.

11 changes: 5 additions & 6 deletions masks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@
" # Save the mask with the new header\n",
" fits.writeto(f'{results}/masks/{slit_id}_mask_v5.fits', mask, header=slit_mask_header, overwrite=True)\n",
"\n",
" # The file path to the mask of the two largest arcs in the v4 HST reduction WCS\n",
" arc_mask_file = f'{data}/hst/masks/arcMask.fits'\n",
" # The file path to the mask of the two largest arcs in a VLT/MUSE IFU footprint\n",
" arc_mask_file = f'{data}/psz1550_1909_v2_mask.fits'\n",
"\n",
" # Reproject and interpolate the mask to the v5 HST reduction WCS\n",
" arc_mask, _ = reproject_interp(arc_mask_file, header, hdu_in=0)\n",
Expand All @@ -131,9 +131,8 @@
" arc_mask_header['DATE'] = (date.today().strftime('%d %B %Y'), 'The date this file was created')\n",
" arc_mask_header['FILENAME'] = (f'arc_mask_v5.fits', 'Name of file')\n",
" arc_mask_header['DESC'] = ('This file is a binary mask of the Sunburst Arc, aligned to the WCS of the \\\n",
" v5 reduction of the HST observations. The original mask was made for the VLT/MUSE footprint, and \\\n",
" then reprojected to the v4 HST reduction WCS. This file was created from the mask reprojected to \\\n",
" the v4 HST reduction WCS.', 'Description of file')\n",
" v5 reduction of the HST observations. The original mask was made for the VLT/MUSE footprint.', \n",
" 'Description of file')\n",
"\n",
" # Save the new arc mask\n",
" hdu = fits.PrimaryHDU(data=arc_mask, header=header)\n",
Expand Down Expand Up @@ -168,7 +167,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
29 changes: 19 additions & 10 deletions nb.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 22,
"id": "a8da6247",
"metadata": {},
"outputs": [],
Expand All @@ -19,7 +19,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 23,
"id": "f6755dd9",
"metadata": {
"scrolled": true
Expand Down Expand Up @@ -52,7 +52,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 24,
"id": "0d3c414b",
"metadata": {},
"outputs": [],
Expand All @@ -64,7 +64,7 @@
" results = f'{home}/results'\n",
"\n",
" # The filepaths of the SEDs fitted to complete images of the entire Sunburst Arc galaxy\n",
" SEDs = np.array([i for i in sorted(glob.glob(f'{data}/bestfit_model_image*_spec_filter6_cont.txt'))], dtype=str)\n",
" SEDs = np.array([i for i in sorted(glob.glob(f'{data}/sed_fits/bestfit_model_image*_spec_filter6_cont.txt'))], dtype=str)\n",
"\n",
" # Create a blank dictionary\n",
" df = {}\n",
Expand Down Expand Up @@ -152,7 +152,7 @@
" for i, label in enumerate(['nonleaker', 'leaker']):\n",
"\n",
" # Get the file path\n",
" file = glob.glob(f'{data}/spectra/mage/sunburst_arc_{label}_rest_no_m*.txt')[0]\n",
" file = glob.glob(f'{data}/mage/sunburst_arc_{label}_stack_mage.txt')[0]\n",
"\n",
" # Get the wavelength bins and flux densities of the stacked spectrum\n",
" w, f = np.loadtxt(file, delimiter='\\t', comments='#', usecols=(0,1), unpack=True)\n",
Expand Down Expand Up @@ -271,7 +271,7 @@
"\n",
" # Make a mask of the region and save it\n",
" mask = pixel_region.to_mask().to_image((v5_header['NAXIS2'], v5_header['NAXIS1']))\n",
" fits.writeto(f'{home}/results/box_for_median_imcoords_mask_v5.fits', mask.data, header=v5_header, overwrite=True)\n",
" fits.writeto(f'{home}/results/masks/box_for_median_imcoords_mask_v5.fits', mask.data, header=v5_header, overwrite=True)\n",
"\n",
" # Subtract the continuum using both the F390W and F555W-based continuum estimates, and for all ...\n",
" #for i, linelabel in enumerate(linelabels):\n",
Expand Down Expand Up @@ -312,8 +312,17 @@
" header_f = wcs.WCS(header_on).to_header()\n",
" header_n = wcs.WCS(header_on).to_header()\n",
"\n",
" cd_matrix = wcs.WCS(header_on).wcs.cd\n",
"\n",
" #print(repr(header_f))\n",
"\n",
" # For the headers of the data and uncertainty extensions\n",
" for i, header in enumerate([header_f, header_n]):\n",
" for j, header in enumerate([header_f, header_n]):\n",
"\n",
" header['CD1_1'] = cd_matrix[0,0]\n",
" header['CD1_2'] = cd_matrix[0,1]\n",
" header['CD2_1'] = cd_matrix[1,0]\n",
" header['CD2_2'] = cd_matrix[1,1]\n",
"\n",
" header.insert(1, ('CREATOR', 'M. Riley Owens', 'Creator of this file'))\n",
" header.insert(2, ('CREATOR_EMAIL', 'm.riley.owens@gmail.com', 'Email of the creator of this file'))\n",
Expand All @@ -326,7 +335,7 @@
" header['SCALEU'] = (cont_scale_std[i], 'The standard deviation of the continuum scaling factor, as inferred from the distribution of scaling factors derived from the 4 SED fits and 2 stacked MagE spectra')\n",
" \n",
" # For each keyword in an array of useful keywords that the observations share\n",
" for j, keyword in enumerate(np.array(['FILETYPE', 'TELESCOP', 'INSTRUME', 'EQUINOX', 'IMAGETYP',\n",
" for k, keyword in enumerate(np.array(['FILETYPE', 'TELESCOP', 'INSTRUME', 'EQUINOX', 'IMAGETYP',\n",
" 'PRIMESI', 'OBSTYPE', 'BUNIT', ], dtype=str)):\n",
"\n",
" header[keyword] = (header_on[keyword], header_on.comments[keyword])\n",
Expand All @@ -344,7 +353,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 25,
"id": "b7314114",
"metadata": {},
"outputs": [],
Expand All @@ -369,7 +378,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions seeing.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@
"\n",
" # File path to the mask of the two largest arcs of the Sunburst Arc\n",
" # in the v5 HST data reduction WCS\n",
" arc_mask_file = f'{results}/arc_mask_v5.fits'\n",
" arc_mask_file = f'{results}/masks/arc_mask_v5.fits'\n",
"\n",
" # Get a cutout of the arc mask. This is necessary because we will\n",
" # convolve the Lya maps later, which is computationally expensive for\n",
Expand Down Expand Up @@ -212,7 +212,7 @@
" #table = table + f'{slit_id} & '\n",
" \n",
" # File path to the slit mask\n",
" slit_mask_file = f'{results}/{slit_id}_mask_v5.fits'\n",
" slit_mask_file = f'{results}/masks/{slit_id}_mask_v5.fits'\n",
"\n",
" # Get a cutout of the mask\n",
" slit_mask = fits.getdata(slit_mask_file)[4000:5600, 4400:5700]\n",
Expand Down Expand Up @@ -481,7 +481,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
"version": "3.12.2"
},
"orig_nbformat": 4
},
Expand Down
4 changes: 2 additions & 2 deletions source_plane.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
" slits = np.array(['M5', 'M4', 'M6', 'M3'], dtype=str)\n",
"\n",
" # Make an array of the filepaths to the source images\n",
" files = np.array([f'{home}/figs/artist_impression_sunburst_source_labels_and_slilts_Riley_{slit}.png' for slit in slits], dtype=str)\n",
" files = np.array([f'{home}/figs/source_plane_reconstructions/artist_impression_sunburst_source_labels_and_slilts_Riley_{slit}.png' for slit in slits], dtype=str)\n",
"\n",
" # Instantiate the figure and axes\n",
" fig, ax = plt.subplots(2,2, figsize=(5,5), constrained_layout=True)\n",
Expand Down Expand Up @@ -100,7 +100,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
8 changes: 4 additions & 4 deletions stack.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@
"\n",
" # Establish basic directories\n",
" home = os.getcwd()\n",
" data = home + '/data/spectra/mage'\n",
" data = f'{home}/data/mage'\n",
"\n",
" # Get the individual MagE spectra\n",
" files = glob.glob(data + '/*.txt')\n",
" files = glob.glob(f'{data}/*.txt')\n",
"\n",
" # Dictionary of the spectroscopic redshifts of each spectrum,\n",
" # as measured by Mainali et al. (2022) (bibliographic information \n",
Expand Down Expand Up @@ -202,11 +202,11 @@
" n_nonleaker = n_nonleaker / 3\n",
"\n",
" # Save the stacked spectra to .txt files\n",
" np.savetxt(data + '/sunburst_arc_leaker_stack_mage.txt', np.array([w_leaker, f_leaker, n_leaker]).T, delimiter='\\t', encoding='utf-8',\n",
" np.savetxt(f'{data}/mage/sunburst_arc_leaker_stack_mage.txt', np.array([w_leaker, f_leaker, n_leaker]).T, delimiter='\\t', encoding='utf-8',\n",
" header=f'Stacked spectrum of the LyC-leaking region of the Sunburst Arc from the MagE slits M2, M7, M8, M9, created by stack.ipynb.' + '\\n' \n",
" + 'The stack excludes slit M0 because of poor observing conditions that caused a poor fluxing.' + '\\n' + '\\n'\n",
" + 'The columns below are, from left to right: rest wavelength (angstroms), flux density (arbitrary units in wavelength space), and the 1σ Gaussian uncertainty of the flux density.')\n",
" np.savetxt(data + '/sunburst_arc_nonleaker_stack_mage.txt', np.array([w_nonleaker, f_nonleaker, n_nonleaker]).T, delimiter='\\t', encoding='utf-8',\n",
" np.savetxt(f'{data}/mage/sunburst_arc_nonleaker_stack_mage.txt', np.array([w_nonleaker, f_nonleaker, n_nonleaker]).T, delimiter='\\t', encoding='utf-8',\n",
" header=f'Stacked spectrum of the non-LyC-leaking regions of the Sunburst Arc from the MagE slits M5, M4, M6, created by stack.ipynb.' + '\\n' \n",
" + 'The stack excludes slit M3 because it captures a highly compact region of the galaxy which may not be representative of it\\'s surrounding environment.' + '\\n'\n",
" + 'Consult Vanzella et al. (2020) (MNRAS:Letters, 499, 1, L67), Diego et al. (2022) (A&A, 665, A134), or Sharon et al. (2022) (ApJ, 941, 2, 203) for more information about the compact region.' + '\\n' + '\\n'\n",
Expand Down