This commit is contained in:
Yael-II
2025-01-02 22:43:21 +01:00
parent c8ae670ea7
commit ca4274fc55
7 changed files with 569 additions and 92 deletions

View File

@@ -4,7 +4,6 @@ import data_reduction_tools as tools
import astroalign as aa
TYPE = ["image", "spectroscopy"][1]
target = "NGC_40"
IN_DIR = "./Input/"
OUT_DIR = "./Output/"
LIGHT_DIR = IN_DIR + "Light/"
@@ -13,17 +12,20 @@ FLAT_DIR = IN_DIR + "Flat/"
BIAS_DIR = IN_DIR + "Bias/"
THAR_DIR = IN_DIR + "ThAr/"
def spectro_reduction():
def spectro_reduction(target: str):
# BIAS
filelist = tools.list_fits(BIAS_DIR)
tools.stack(filelist, BIAS_DIR, "master_bias.fits")
# DARK
#filelist = tools.list_fits(DARK_DIR)
#tools.debias(filelist, DARK_DIR)
#filelist = ["debiased_" + fname for fname in filelist \
# if not "debiased_" in fname]
#tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
filelist = tools.list_fits(DARK_DIR)
has_dark = False
if len(filelist) > 0:
has_dark = True
tools.debias(filelist, DARK_DIR)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
# FLAT
filelist = tools.list_fits(FLAT_DIR)
@@ -41,25 +43,33 @@ def spectro_reduction():
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, LIGHT_DIR, "master_debiased_light.fits")
tools.reduction_operation(target + ".fits",
master_light = "master_debiased_light.fits",
if has_dark:
tools.reduction_individual(["master_debiased_light.fits"],
master_dark = "master_debiased_dark.fits",
master_flat = "master_normalized_debiased_flat.fits")
else:
tools.reduction_individual(["master_debiased_light.fits"],
master_flat = "master_normalized_debiased_flat.fits")
# MAIN
# tools.plot_spectrum("master_debiased_flat.fits", FLAT_DIR)
# tools.plot_spectrum("reduced_" + target + ".fits")
return 0
def spectro_calibration():
def spectro_calibration(target: str):
# BIAS
filelist = tools.list_fits(BIAS_DIR)
tools.stack(filelist, BIAS_DIR, "master_bias.fits")
# DARK
#filelist = tools.list_fits(DARK_DIR)
#tools.debias(filelist, DARK_DIR)
#filelist = ["debiased_" + fname for fname in filelist \
# if not "debiased_" in fname]
#tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
filelist = tools.list_fits(DARK_DIR)
has_dark = False
if len(filelist) > 0:
has_dark = True
tools.debias(filelist, DARK_DIR)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
# FLAT
filelist = tools.list_fits(FLAT_DIR)
@@ -77,28 +87,55 @@ def spectro_calibration():
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, THAR_DIR, "master_debiased_ThAr.fits")
tools.reduction_operation("ThAr.fits",
if has_dark:
tools.reduction_individual(["master_debiased_ThAr.fits"],
light_directory = THAR_DIR,
master_dark = "master_debiased_dark.fits",
master_flat = "master_normalized_debiased_flat.fits")
else:
tools.reduction_individual(["master_debiased_ThAr.fits"],
light_directory = THAR_DIR,
master_light = "master_debiased_ThAr.fits",
master_flat = "master_normalized_debiased_flat.fits")
# MAIN
tools.plot_spectrum("reduced_ThAr.fits")
filename = "reduced_master_debiased_light.fits"
tools.wavelength_calibration(filename,
"calibrated_" + target + ".csv")
return 0
def image_reduction():
def image_reduction(target: str):
filter_list = [d for d in os.listdir(LIGHT_DIR) if not d[0] == "."]
# BIAS
filelist = tools.list_fits(BIAS_DIR)
tools.stack(filelist, BIAS_DIR, "master_bias.fits")
# DARK
dark_list = [d for d in os.listdir(DARK_DIR) if not d[0] == "."]
filelist = tools.list_fits(DARK_DIR)
tools.debias(filelist, DARK_DIR)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
has_dark = False
has_filter_dark = False
if np.array_equal(np.sort(dark_list), np.sort(filter_list)):
has_dark = True
has_filter_dark = True
for filter_ in filter_list:
filter_dir = filter_ + "/"
filelist = tools.list_fits(DARK_DIR + filter_dir)
tools.debias(filelist, DARK_DIR + filter_dir)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, DARK_DIR + filter_dir,
"master_debiased_dark_{}.fits".format(filter_))
elif len(filelist) > 0:
has_dark = True
tools.debias(filelist, DARK_DIR)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist, DARK_DIR, "master_debiased_dark.fits")
# FLAT
filter_list = [d for d in os.listdir(LIGHT_DIR) if not d[0] == "."]
for filter_ in filter_list:
filter_dir = filter_ + "/"
filelist = tools.list_fits(FLAT_DIR + filter_dir)
@@ -112,32 +149,117 @@ def image_reduction():
"master_normalized_debiased_flat_{}.fits".format(filter_))
# LIGHT
ref_dir = filter_list[0] + "/"
ref_file = tools.list_fits(LIGHT_DIR + ref_dir)[0]
ref_file = "reduced_debiased_" + ref_file
good = []
bad = []
for filter_ in filter_list:
filter_dir = filter_ + "/"
filelist = tools.list_fits(LIGHT_DIR + filter_dir)
tools.debias(filelist, LIGHT_DIR + filter_dir)
filelist = ["debiased_" + fname for fname in filelist \
if not "debiased_" in fname]
tools.stack(filelist,
if has_dark:
if has_filter_dark:
tools.reduction_individual(filelist,
light_directory = LIGHT_DIR + filter_dir,
master_dark = "master_debiased_dark_{}.fits".format(filter_),
master_flat = "master_normalized_" \
+ "debiased_flat_{}.fits".format(filter_),
dark_directory = DARK_DIR + filter_dir,
flat_directory = FLAT_DIR + filter_dir)
else:
tools.reduction_individual(filelist,
light_directory = LIGHT_DIR + filter_dir,
master_dark = "master_debiased_dark.fits",
master_flat = "master_normalized_" \
+ "debiased_flat_{}.fits".format(filter_),
flat_directory = FLAT_DIR + filter_dir)
else:
tools.reduction_individual(filelist,
light_directory = LIGHT_DIR + filter_dir,
#master_dark = "master_debiased_dark.fits",
master_flat = "master_normalized_" \
+ "debiased_flat_{}.fits".format(filter_),
flat_directory = FLAT_DIR + filter_dir)
filelist = ["reduced_" + fname for fname in filelist \
if not "reduced_" in fname]
align = tools.align(filelist,
ref_file,
LIGHT_DIR + filter_dir,
LIGHT_DIR + ref_dir)
if len(align) > 1:
good.append(filter_)
filelist = ["aligned_" + fname for fname in align \
if not "aligned_" in fname]
tools.stack(filelist,
LIGHT_DIR + filter_dir,
"master_debiased_light_{}.fits".format(filter_))
tools.reduction_operation(target + "_" + filter_ + ".fits",
master_light = "master_debiased_light_{}.fits".format(filter_),
light_directory = LIGHT_DIR + filter_dir,
master_dark = "master_debiased_dark.fits",
master_flat = "master_normalized_" \
+ "debiased_flat_{}.fits".format(filter_),
flat_directory = FLAT_DIR + filter_dir)
"master_aligned_reduced_debiased_light_{}_{}.fits".format(
target, filter_), out_directory = OUT_DIR)
else:
bad.append(filter_)
tools.stack(filelist,
LIGHT_DIR + filter_dir,
"master_reduced_debiased_light_{}_{}.fits".format(
target, filter_), out_directory = OUT_DIR)
if len(good) > 0 and len(bad) > 0:
filter_ref = good[0]
filter_ref_file = [fname for fname in tools.list_fits(OUT_DIR)
if "aligned_" in fname][0]
print(filter_ref, filter_ref_file)
print(good)
print(bad)
for filter_ in bad:
tools.align(["master_reduced_debiased_light_{}_{}.fits".format(
target, filter_)],
filter_ref_file,
OUT_DIR,
OUT_DIR)
return 0
def main():
if TYPE == "image":
image_reduction()
if TYPE == "spectroscopy":
spectro_reduction()
spectro_calibration()
print("Select the type of reduction")
print("\t1. Images")
print("\t2. Spectra")
i = int(input("Choice [1 or 2]: "))
if i == 1:
print(("IMPORTANT: Please put raw files in the Input/ directory "
"(with any name)"))
print("Input/")
print(" ├ Bias/")
print(" │ └ bias_1.fits, ...")
print(" ├ Dark/")
print(" │ └ dark_1.fits, ...")
print(" ├ Flat/")
print(" │ ├ B/")
print(" │ │ └ flat_B_1.fits, ...")
print(" │ └ V/...")
print(" └ Light/")
print(" ├ B/")
print(" │ └ raw_B_1.fits")
print(" └ V/ ...")
if i == 2:
print(("IMPORTANT: Please put raw files in the Input/ directory "
"(with any name)"))
print("Input/")
print(" ├ Bias/")
print(" │ └ bias1.fits, ...")
print(" ├ Dark/ (optional)")
print(" │ └ dark_1.fits, ...")
print(" ├ Flat/")
print(" │ └ flat_1.fits")
print(" ├ Light/")
print(" │ └ raw_1.fits")
print(" └ ThAr/")
print(" └ thar_raw_1.fits")
name = input("Target name: ")
if i == 1:
image_reduction(name)
if i == 2:
spectro_reduction(name)
spectro_calibration(name)
return 0
if __name__ == "__main__":

View File

@@ -1,8 +1,10 @@
import os
import numpy as np
import matplotlib.pyplot as plt
import astroalign as aa
from astropy.io import fits
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
IN_DIR = "./Input/"
OUT_DIR = "./Output/"
@@ -13,6 +15,7 @@ BIAS_DIR = IN_DIR + "Bias/"
THAR_DIR = IN_DIR + "ThAr/"
PLT_STYLE = "YII_light_1"
if PLT_STYLE in plt.style.available: plt.style.use(PLT_STYLE)
def list_fits(directory: str = LIGHT_DIR):
"""
@@ -35,18 +38,19 @@ def stack(filelist: list,
if out_directory == None:
out_directory = in_directory
N = len(filelist)
for file in filelist:
if not "master_" in file:
in_data.append(fits.getdata(in_directory + file))
in_data = np.array(in_data)
if method == "mean":
out_data = np.mean(in_data, axis=0)
if method == "median":
out_data = np.median(in_data, axis=0)
header = fits.getheader(in_directory+file)
header["history"] = "stacking with {} files".format(N)
fits.writeto(out_directory \
+ out_name, out_data, header, overwrite=True)
if N > 0:
for file in filelist:
if not "master_" in file:
in_data.append(fits.getdata(in_directory + file))
in_data = np.array(in_data)
if method == "mean":
out_data = np.mean(in_data, axis=0)
if method == "median":
out_data = np.median(in_data, axis=0)
header = fits.getheader(in_directory+file)
header["history"] = "stacking with {} files".format(N)
fits.writeto(out_directory \
+ out_name, out_data, header, overwrite=True)
return 0
def debias(filelist: list,
@@ -88,7 +92,7 @@ def normalize_spectrum_flat(filelist: str,
in_directory: str = FLAT_DIR,
out_directory: str = None,
fit_degree: int = 3,
d_lim: int = 10000):
d_lim: int = 100):
if out_directory == None:
out_directory = in_directory
@@ -103,39 +107,40 @@ def normalize_spectrum_flat(filelist: str,
fit_coefs = poly.chebfit(pixel, data, fit_degree, w=mask)
fit_data = poly.chebval(pixel, fit_coefs)
normalized_data = data/fit_data
normalized_data[np.argwhere(normalized_data<0.5)] = np.nan
#normalized_data[np.argwhere(normalized_data<0.5)] = np.nan
fits.writeto(out_directory + "normalized_" + file,
normalized_data, overwrite=True)
return 0
def reduction_operation(target: str,
master_light: list,
light_directory: str = LIGHT_DIR,
master_dark: str = None,
dark_directory: str = DARK_DIR,
master_flat: str = None,
flat_directory: str = FLAT_DIR,
out_directory: str = OUT_DIR):
data = fits.getdata(light_directory + master_light)
head = fits.getheader(light_directory + master_light)
if master_dark != None:
dark = fits.getdata(dark_directory + master_dark)
data = data - dark
head["history"] = "removed dark with {}".format(master_dark)
if master_flat != None:
flat = fits.getdata(flat_directory + master_flat)
data = data / flat
head["history"] = "flatten with {}".format(master_flat)
fits.writeto(out_directory + "reduced_" + target,
data, head, overwrite=True)
def reduction_individual(lightlist: list,
light_directory: str = LIGHT_DIR,
master_dark: str = None,
dark_directory: str = DARK_DIR,
master_flat: str = None,
flat_directory: str = FLAT_DIR,
out_directory: str = None):
if out_directory == None:
out_directory = light_directory
for light in lightlist:
if not "reduced_" in light:
data = fits.getdata(light_directory + light)
head = fits.getheader(light_directory + light)
if master_dark != None:
dark_data = fits.getdata(dark_directory + master_dark)
dark_head = fits.getheader(dark_directory + master_dark)
exp = head["EXPTIME"]
dark_exp = dark_head["EXPTIME"]
data = data - dark_data * exp/dark_exp
head["history"] = "removed dark with {}".format(master_dark)
if master_flat != None:
flat = fits.getdata(flat_directory + master_flat)
data = data / flat
head["history"] = "flatten with {}".format(master_flat)
fits.writeto(out_directory + "reduced_" + light,
data, head, overwrite=True)
return 0
def plot_spectrum(reduced: str,
directory: str = OUT_DIR):
if PLT_STYLE in plt.style.available: plt.style.use(PLT_STYLE)
data = fits.getdata(directory + reduced).flatten()
head = fits.getheader(directory + reduced)
pixel = list(range(len(data)))
@@ -144,16 +149,44 @@ def plot_spectrum(reduced: str,
plt.show(block=True)
return 0
def find_peaks_highest(data: list,
N: int = 10):
"""Find the N highest peaks in the data"""
peaks = []
mask = np.
for i in range(N):
None
def align(filelist: str,
ref_file: str,
directory: str = LIGHT_DIR,
ref_dir: str = LIGHT_DIR):
data_ref = fits.getdata(ref_dir + ref_file)
good = []
if len(filelist) > 0:
for file in filelist:
data = fits.getdata(directory + file)
head = fits.getheader(directory + file)
try:
aligned_data, fp = aa.register(np.float32(data), np.float32(data_ref))
good.append(file)
head["history"] = "aligned with {}".format(ref_file)
fits.writeto(directory + "aligned_" + file,
aligned_data, head,
overwrite = True)
except:
print("alignement failed with {}".format(file))
return good
def wavelength_calibration(file: str,
out_name: str,
directory: str = LIGHT_DIR,
reference: str = "values.csv",
ref_directory: str = IN_DIR,
out_directory: str = OUT_DIR):
with open(ref_directory+reference) as ref_file:
vals = ref_file.readline().replace(" ", "").split(",")
a0 = float(vals[0])
a1 = float(vals[1])
in_data = fits.getdata(directory + file).flatten()
head = fits.getheader(directory + file)
pixel = np.array(range(len(in_data)))
lambda_ = a1 * pixel + a0
out_data = np.array([lambda_, in_data])
np.savetxt(out_directory + out_name, out_data)
head["history"] = ("calibrated wavelength: "
"lambda = {} + {} * pixel").format(a0, a1)
fits.writeto(out_directory + out_name.replace(".csv", ".fits"), out_data, head, overwrite=True)
return 0

171
Source/get_calib.py Executable file
View File

@@ -0,0 +1,171 @@
# => makes the plot interactive
# %matplotlib widget
# inline makes the plots static
#%matplotlib inline
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
from numpy.polynomial import chebyshev
import warnings
# filter astropy warning on fits headers
warnings.filterwarnings('ignore', category=UserWarning, append=True)
from spectro_tools import *
IN_DIR = "./Input/"
OUT_DIR = "./Output/"
LIGHT_DIR = IN_DIR + "Light/"
DARK_DIR = IN_DIR + "Dark/"
FLAT_DIR = IN_DIR + "Flat/"
BIAS_DIR = IN_DIR + "Bias/"
THAR_DIR = IN_DIR + "ThAr/"
def main(ref_file: str = "reduced_master_debiased_ThAr.fits",
ref_directory: str = THAR_DIR):
fits_file = ref_directory+ref_file
xaxis,data=read_raw_spectrum(fits_file)
spectrum=data
# Find peaks in the spectrum
# TODO THRESHOLD
peaks = find_peaks(data, height=5000.)[0] # You can adjust the 'height' threshold
# NB: 'fiducial value': height=5000
# Get the centroid (x-value) of each peak
centroid_x_values = peaks
# Positions in pixels of the peaks
# Plot the spectrum and mark the centroids
if False:
plt.plot(data)
plt.plot(centroid_x_values, data[peaks], 'ro', label='Max')
plt.hlines(5000., 0, len(data), "r")
plt.hlines(np.quantile(data,0.95), 0, len(data), "g")
plt.xlabel('pixel')
plt.ylabel('Flux')
plt.title('Spectrum with peaks and lines')
plt.grid(True)
plt.show()
# Nice, now in order to improve the precision on the line centers,
# let's fit each detected peak with a gaussian to get a better centroid position
# generate first_guess for the fitting routine
# The method below just makes up credible values for a triplet (intensity, centre, width) for each line
# (~credible) using the peaks detected
# and concatenates all that into a large vector first_guess
first_guess=generate_first_guess(peaks)
#print(first_guess)
# fit the lamp spectrum as a sum of gaussian lines using curve_fit and our first guess
params, covariance = curve_fit(gaussian, xaxis, data, p0=first_guess)
#print(np.shape(covariance))
# Reshape params into a 2D array (N, 3) for readability
num_peaks = len(params) // 3
params = np.array(params).reshape((num_peaks, 3))
allamps=params[:,0]
allcens=params[:,1] # => THIS ARRAY HAS THE FITTED GAUSSIAN CENTROILDS OF THE LINES
allwids=params[:,2]
if(0):
# remove the huge saturaed line at pixel 1987 & 6965 Angstrom
# well not 100% needed it seems we throw it away later
print(len(allcens))
ibad=np.argmin(np.abs(allcens-1987.))
print(ibad)
allcens=np.delete(allcens,ibad)
print(len(allcens))
allamps=np.delete(allamps,ibad)
allwids=np.delete(allwids,ibad)
print(allcens)
# Now plot the spectrum again
if False:
plt.plot(data)
plt.plot(centroid_x_values, data[peaks], 'ro', label='Max')
plt.xlabel('Pixel')
plt.ylabel('Flux')
plt.title('Spectrum with peaks and lines')
# plot individual gaussian fit for each line, for check
for i in range(num_peaks):
fit_params = params[i] # Extract parameters for each Gaussian
gau=gaussian(xaxis, *fit_params)
plt.plot(xaxis, gau)#, label=f'Gaussian {i+1}')
plt.text(allcens[i], np.max(gau)+3000, str(i), fontsize=12, ha='center', va='center', color='blue')
plt.legend()
plt.show()
fig, ax0 = plt.subplots(1)
#ax0 = axs[0]
#ax1 = axs[1]
ax0.plot(xaxis, data)
for i in range(num_peaks):
fit_params = params[i] # Extract parameters for each Gaussian
gau=gaussian(xaxis, *fit_params)
ax0.text(allcens[i], np.max(gau)+3000, str(i), fontsize=10,
ha='center',
va='center',
color='C3')
#atlas = np.genfromtxt("Source/atlas_linelist.csv", usecols=(0,4), delimiter=",")
#atlas_val = atlas[:,1]
#atlas_l = atlas[:,0]
#w = np.argwhere(atlas_val > 0)
#atlas_val = atlas_val[w]
#atlas_l = atlas_l[w]
#ax1.plot(atlas_l, atlas_val)
plt.show(block=False)
ans = "!"
print("use: https://github.com/pocvirk/astronomical_data_reduction/blob/main/doc/line_atlas_ThAr.pdf")
print("[number] [wavelength (nm)] ; enter ok when done")
numbers = []
lambdas = []
while not ans == "ok":
ans = input("> ")
if ans not in ["ok", "", " "]:
try:
n, l = ans.split(" ")
numbers.append(int(n)), lambdas.append(float(l))
print("ok !")
except:
print("error")
pixel_lambda = []
for i in range(len(numbers)):
pixel_lambda.append([allcens[numbers[i]], lambdas[i]])
pixel_lambda = np.array(pixel_lambda)
plt.close()
# Now derive the full dispersion law as a polynomial fit through the points above
# Fit a Chebyshev polynomial of degree 1 (linear)
degree = 1
coeffs = chebyshev.chebfit(pixel_lambda[:,0], pixel_lambda[:,1], degree)
# Evaluate the Chebyshev polynomial across xaxis
y_fit = chebyshev.chebval(xaxis, coeffs)
print("{},{}".format(coeffs[0], coeffs[1]))
with open("./Input/values.csv", "w+") as values_file:
values_files.write("{},{}".format(coeffs[0], coeffs[1]))
# plot the fit with our calibration points:
plt.figure(figsize=(5,5))
plt.scatter(pixel_lambda[:,0],pixel_lambda[:,1])
plt.xlabel('pixel')
plt.ylabel('Angstrom')
plt.plot(xaxis, y_fit, label=f'Chebyshev Polynomial (Degree {degree})', color='red')
plt.show()
# thats a pretty good fit.
# to see how good it is, we will check the residuals in the next cell
return None
if __name__ == "__main__":
main()

147
Source/spectro_tools.py Normal file
View File

@@ -0,0 +1,147 @@
# => makes the plot interactive
#%matplotlib notebook
# inline makes the plots static
#%matplotlib inline
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
import warnings
# filter astropy warning on fits headers
warnings.filterwarnings('ignore', category=UserWarning, append=True)
def plot_spectrum_panels(xaxis,data,num_panels=2):
# not that useful, interactive plotting is better
spectrum_data=np.copy(data)
# Divide the spectrum into four panels
panel_height = len(spectrum_data) // num_panels
# Create subplots
fig, axs = plt.subplots(num_panels, 1, figsize=(15, 6*num_panels))
# Plot each panel
for i in range(num_panels):
start_idx = i * panel_height
end_idx = (i + 1) * panel_height
xaxis= list(range(len(spectrum_data)))
axs[i].plot(xaxis[start_idx:end_idx],spectrum_data[start_idx:end_idx])
# axs[i].set_xlabel('Wavelength (Angstroms)')
axs[i].set_ylabel('counts(ADU)')
axs[i].set_title(f'Panel {i + 1}')
axs[i].grid(True)
def read_raw_spectrum(fits_file,get_header=0):
# print("yoyoyo")
# returns an index and the fluxes
# Open the FITS file
hdulist = fits.open(fits_file)
header = hdulist[0].header
# Assuming the spectrum data is in the first extension (HDU index 0)
data = hdulist[0].data[0][0]
xaxis= list(range(len(data)))
if(get_header==0):
return xaxis,data
if(get_header==1):
return xaxis,data,header
def write_fits_spectrum(target_file,data,header=[]):
if(header==[]):
hdu=fits.hdu.hdulist.PrimaryHDU([[data]])
else:
hdu=fits.hdu.hdulist.PrimaryHDU([[data]],header)
hdul = fits.hdu.HDUList([hdu])
hdul.writeto(target_file,overwrite=True)
print('wrote ',target_file)
def read_raw_spectrum_old(fits_file,get_header=0):
# returns an index and the fluxes
# Open the FITS file
hdulist = fits.open(fits_file)
header = hdulist[0].header
# Assuming the spectrum data is in the first extension (HDU index 0)
data = hdulist[0].data[0]
xaxis= list(range(len(data)))
if(get_header==0):
return xaxis,data
if(get_header==1):
return xaxis,data,header
def write_fits_spectrum_old(target_file,data,header=[]):
if(header==[]):
hdu=fits.hdu.hdulist.PrimaryHDU([data])
else:
hdu=fits.hdu.hdulist.PrimaryHDU([data],header)
hdul = fits.hdu.HDUList([hdu])
hdul.writeto(target_file,overwrite=True)
print('wrote ',target_file)
def read_calibrated_spectrum(fits_file):
# Open the FITS file
with fits.open(fits_file) as hdul:
# Extract the data and header from the primary HDU
data = hdul[0].data[0]
header = hdul[0].header
# Extract necessary header keywords for calibration
crval1 = header['CRVAL1'] # Reference value of the first pixel (wavelength)
cdelt1 = header['CDELT1'] # Pixel scale (wavelength per pixel)
naxis1 = header['NAXIS1'] # Number of pixels in the spectral axis
# Create an array of calibrated wavelengths
wavelengths = crval1 + np.arange(naxis1) * cdelt1
return wavelengths,data
# Define the Gaussian function
def gaussian(x, *params):
result= np.full(len(x),0.)
for i in range(0, len(params), 3):
amp, cen, wid = params[i:i+3]
result += amp * np.exp(-(x - cen) ** 2 / (2 * wid ** 2))
return result
# Define the Gaussian function
def gaussian_plus_bg_2(x, *params):
# just like the gaussian but with a constant background under ti
# should help fitting emission lines in objects
# this is bad because there are multiple backgrounds => degenerate
result= np.full(len(x),0.)
for i in range(0, len(params), 4):
amp, cen, wid , bg = params[i:i+4]
result += amp * np.exp(-(x - cen) ** 2 / (2 * wid ** 2)) + bg
return result
def gaussian_plus_bg(x, *params):
# just like the gaussian but with a constant background under ti
# should help fitting emission lines in objects
result= np.full(len(x),0.)
bg=params[0]
result=result+bg
for i in range(1, len(params), 3):
amp, cen, wid = params[i:i+3]
result += amp * np.exp(-(x - cen) ** 2 / (2 * wid ** 2))
return result
def generate_first_guess(peaks):
# npeaks=len(peaks[:3])
npeaks=len(peaks)
initial_cen=peaks
initial_amp=np.full(npeaks,20000.)
initial_wid=np.full(npeaks,1.)
# print(initial_cen)
# print(initial_amp)
# print(initial_wid)
first_guess=np.full((npeaks,3),0.)
for i in range(npeaks):
first_guess[i,0]=initial_amp[i]
first_guess[i,1]=initial_cen[i]
first_guess[i,2]=initial_wid[i]
first_guess=np.reshape(first_guess,3*npeaks)
# print(first_guess)
return first_guess

2
get_calib.sh Executable file
View File

@@ -0,0 +1,2 @@
source activate.sh
python Source/get_calib.py

2
reduction.sh Executable file
View File

@@ -0,0 +1,2 @@
source activate.sh
python Source/FIDR.py