attoworld.personal.marco
1from .trace_handler import TraceHandler, MultiTraceHandler 2from .UVSpectrumAnalysis import ( 3 load_calibration_data, 4 smooth, 5 tukey_f, 6 tukey_window, 7 read_csd_file, 8 plot_spectra, 9 read_spectrometer_excel, 10 calibrate, 11 plot_spectra_UVsp, 12) 13from .VISSpectrumAnalysis import ( 14 eliminate_outliers, 15 read_spectrum_maya, 16 read_spectrum_ocean_optics, 17 asymmetric_tukey_f, 18 asymmetric_tukey_window, 19 SpectrumHandler, 20 MultiSpectrumHandler, 21) 22from .profileAndIntensity import profile_analysis 23 24__all__ = [ 25 "profile_analysis", 26 "TraceHandler", 27 "MultiTraceHandler", 28 "load_calibration_data", 29 "smooth", 30 "tukey_f", 31 "tukey_window", 32 "read_csd_file", 33 "plot_spectra", 34 "read_spectrometer_excel", 35 "calibrate", 36 "plot_spectra_UVsp", 37 "eliminate_outliers", 38 "read_spectrum_maya", 39 "read_spectrum_ocean_optics", 40 "asymmetric_tukey_f", 41 "asymmetric_tukey_window", 42 "SpectrumHandler", 43 "MultiSpectrumHandler", 44]
153def profile_analysis( 154 profile_file, 155 trace_file=None, 156 magnification=1.0, 157 pixelsize=5.0, 158 power=20.0, 159 reprate=4.0, 160 ROI_diam: int = 100, 161 forced_background=None, 162 trace_cutoff_radius=50.0, 163 cutoff_gaus_fit_profile=25.0, 164): 165 """if trace_file is None, this function only loads, fits and plots the beam profile; if trace_file is given, the function additionally calculates the peak intensity and field. 166 167 Args: 168 profile_file: name of the file containing the beam profile; the file (txt or csv) contains the 2D array of the beam profile 169 trace_file: None; name of the file containing the trace; the file (txt or csv) contains the time and field arrays as tab-separated columns 170 magnification: 1.; ratio of lens-camera/lens-z0 lengths; This is used to give the correct size of the beam profile at z0 in um 171 pixelsize: 5. um; pixel size of the camera 172 power: 20. mW; power of the laser beam; this is used to calculate the peak intensity 173 reprate: 4 kHz; repetition rate of the laser; this is used to calculate the peak intensity 174 ROI_diam: 100; diameter of the region of interest in pixels; the center of the ROI is the maximum of the camera data (which is assumed to be not saturated) 175 cutoff_gaus_fit_profile: 25. pixels; the 2d gaussian is fitted to the beam profile up to this distance from the peak 176 forced_background: None; if not None, the subtracted background is set to this value; otherwise it's computed from the outer ring of the ROI (camera data) 177 trace_cutoff_radius: 50. fs; for intensity calculation, the integral of the trace is computed only in the range t[peak]-trace_cutoff_radius < t < t[peak]+trace_cutoff_radius""" 178 179 pixelsize = pixelsize / magnification 180 data = pandas.read_csv(profile_file) 181 182 dataval = np.array(data.values) 183 # print('eliminating fixed broken pixels rayCi camera') 184 # dataval=eliminate_broken_pixels(dataval) 185 maxindex = np.unravel_index(np.argmax(dataval), dataval.shape) 186 print("diameter of Region Of Interest (ROI): ", ROI_diam, " pixels") 187 dataval = np.array( 188 data.values[ 189 maxindex[0] - int(ROI_diam / 2) : maxindex[0] + int(ROI_diam / 2), 190 maxindex[1] - int(ROI_diam / 2) : maxindex[1] + int(ROI_diam / 2), 191 ] 192 ) 193 maxindex = np.unravel_index(np.argmax(dataval), dataval.shape) 194 195 background = 0.0 196 count = 0 197 print( 198 "calculating background intensity outside a circle whose diameter is 3/4 of the ROI diameter" 199 ) 200 for i, j in np.ndindex(dataval.shape): 201 if (i - maxindex[0]) ** 2 + (j - maxindex[1]) ** 2 > (ROI_diam * 3 / 8) ** 2: 202 background += dataval[i, j] 203 count += 1 204 background /= count 205 206 if forced_background is not None: 207 background = forced_background 208 print("subtracting background intensity: ", background) 209 else: 210 print("subtracting background intensity: ", background) 211 212 dataval = np.maximum(dataval - background, dataval * 0.0) 213 maxval = np.max(dataval) 214 215 # gaussian fit 216 param = moments_peak(dataval) 217 param = fitgaussian(cut_tail(dataval, cutoff_gaus_fit_profile)) 218 X, Y = np.indices(dataval.shape) 219 fitted_funct = gaussian(*param)(X, Y) 220 print( 221 "beam w (um), resp. in x and y axis:", 222 2 * param[3] * pixelsize, 223 2 * param[4] * pixelsize, 224 ) 225 print( 226 "beam 1/e^2 diameter [um]: ", 4 * param[3] * pixelsize, 4 * param[4] * pixelsize 227 ) 228 229 if trace_file is not None: 230 trace = pandas.read_csv(trace_file, sep="\t") 231 232 # time in fs, field in a.u. 233 intensity = ( 234 power 235 * 1e-3 236 / reprate 237 * 1e-3 238 / integral2d(fitted_funct, pixelsize=pixelsize) 239 * np.max(fitted_funct) 240 * 1e8 241 / trace_integral( 242 trace["delay (fs)"], 243 cut_trace( 244 trace["delay (fs)"], trace["field (a.u.)"] ** 2, trace_cutoff_radius 245 ), 246 ) 247 * np.max(trace["field (a.u.)"] ** 2) 248 * 1e15 249 ) 250 251 intensityFromRawCameraData = ( 252 power 253 * 1e-3 254 / reprate 255 * 1e-3 256 / integral2d( 257 dataval[ 258 maxindex[0] - int(ROI_diam / 2) : maxindex[0] + int(ROI_diam / 2), 259 maxindex[1] - int(ROI_diam / 2) : maxindex[1] + int(ROI_diam / 2), 260 ], 261 pixelsize=pixelsize, 262 ) 263 * np.max(dataval) 264 * 1e8 265 / trace_integral( 266 trace["delay (fs)"], 267 cut_trace( 268 trace["delay (fs)"], trace["field (a.u.)"] ** 2, trace_cutoff_radius 269 ), 270 ) 271 * np.max(trace["field (a.u.)"] ** 2) 272 * 1e15 273 ) 274 275 temporal_fwhm = get_fwhm( 276 np.array(trace["delay (fs)"]), np.array(trace["field (a.u.)"]) 277 ) 278 intensityFromTemporalFWHM = ( 279 power 280 * 1e-3 281 / reprate 282 * 1e-3 283 / integral2d( 284 dataval[ 285 maxindex[0] - int(ROI_diam / 2) : maxindex[0] + int(ROI_diam / 2), 286 maxindex[1] - int(ROI_diam / 2) : maxindex[1] + int(ROI_diam / 2), 287 ], 288 pixelsize=pixelsize, 289 ) 290 * np.max(dataval) 291 * 1e8 292 / (np.sqrt(2 * np.pi) * temporal_fwhm / np.sqrt(8 * np.log(2))) 293 * 1e15 294 ) 295 296 print( 297 "\nusing the gaussian fit of the beam profile and the integral of the trace^2:\n " 298 "peak intensity (sub-cycle, W/cm2): {i:.3e}".format(**{"i": intensity}) 299 ) 300 peak_field = np.sqrt( 301 intensity * 1e4 / constants.speed_of_light / constants.epsilon_0 302 ) 303 print("peak field (sub-cycle, V/m): {f:.3e}".format(**{"f": peak_field})) 304 305 print( 306 "\nusing directly camera data, and the integral of the trace^2:\n " 307 "peak intensity (sub-cycle, W/cm2): {i:.3e}".format( 308 **{"i": intensityFromRawCameraData} 309 ) 310 ) 311 peak_field = np.sqrt( 312 intensityFromRawCameraData 313 * 1e4 314 / constants.speed_of_light 315 / constants.epsilon_0 316 ) 317 print("peak field (sub-cycle, V/m): {f:.3e}".format(**{"f": peak_field})) 318 319 print( 320 "\nusing directly camera data, and the temporal FWHM retrieved from the trace:\n " 321 "peak intensity (sub-cycle, W/cm2): {i:.3e}".format( 322 **{"i": intensityFromTemporalFWHM} 323 ) 324 ) 325 peak_field = np.sqrt( 326 intensityFromTemporalFWHM 327 * 1e4 328 / constants.speed_of_light 329 / constants.epsilon_0 330 ) 331 print("peak field (sub-cycle, V/m): {f:.3e}".format(**{"f": peak_field})) 332 333 fig, ax = plt.subplots(1, 1) 334 time = trace["delay (fs)"] 335 field = trace["field (a.u.)"] 336 i_peak = np.argmax(field**2) 337 ax.plot( 338 time[np.abs(time - time[i_peak]) < trace_cutoff_radius], 339 field[np.abs(time - time[i_peak]) < trace_cutoff_radius] ** 2, 340 ) 341 plt.show() 342 343 fig, ax = plt.subplots(1, 1) 344 ax.imshow(dataval) 345 plt.show() 346 347 plot_crosssect(dataval, fitfunct=gaussian(*param))
if trace_file is None, this function only loads, fits and plots the beam profile; if trace_file is given, the function additionally calculates the peak intensity and field.
Arguments:
- profile_file: name of the file containing the beam profile; the file (txt or csv) contains the 2D array of the beam profile
- trace_file: None; name of the file containing the trace; the file (txt or csv) contains the time and field arrays as tab-separated columns
- magnification: 1.; ratio of lens-camera/lens-z0 lengths; This is used to give the correct size of the beam profile at z0 in um
- pixelsize: 5. um; pixel size of the camera
- power: 20. mW; power of the laser beam; this is used to calculate the peak intensity
- reprate: 4 kHz; repetition rate of the laser; this is used to calculate the peak intensity
- ROI_diam: 100; diameter of the region of interest in pixels; the center of the ROI is the maximum of the camera data (which is assumed to be not saturated)
- cutoff_gaus_fit_profile: 25. pixels; the 2d gaussian is fitted to the beam profile up to this distance from the peak
- forced_background: None; if not None, the subtracted background is set to this value; otherwise it's computed from the outer ring of the ROI (camera data)
- trace_cutoff_radius: 50. fs; for intensity calculation, the integral of the trace is computed only in the range t[peak]-trace_cutoff_radius < t < t[peak]+trace_cutoff_radius
129class TraceHandler: 130 """Loads and stores the trace (field vs time) saved to file (two columns, tab separated) or given in the form of time and field [or wavelengths, spectrum and phase] arrays. 131 132 ALL THE TIMES ARE IN fs, FREQUENCIES IN PHz, WAVELENGTHS IN nm. 133 134 Most of the class methods modify the object data in place, and don't return anything. 135 136 Attributes: 137 fieldTimeV: (WAVEFORM DATA in the time domain) time array (fs) 138 fieldV: (WAVEFORM DATA in the time domain) field array 139 fieldStdevV: (WAVEFORM DATA in the time domain) field standard deviation (or sigma) array 140 141 frequencyAxis: (WAVEFORM DATA in the frequency domain) frequency axis of the FFT in PHz (should be np.fft compatible, i.e., equally spaced, in the form [0, df, 2*df, ..., fmax, -fmax[-df], ..., -df]) 142 fftFieldV: (WAVEFORM DATA in the frequency domain) FFT field (complex) 143 complexFieldTimeV: (WAVEFORM DATA in the frequency domain) time axis of the complex field (fs) 144 complexFieldV: (WAVEFORM DATA in the frequency domain) complex field ( = IFFT{FFT(ω)θ(ω)}(t), where θ(ω) = 1 if ω >= 0, θ(ω) = 0 otherwise ) 145 wvlAxis: (WAVEFORM DATA in the frequency domain) wavelength axis for the FFT spectrum (nm) 146 fftSpectrum: (WAVEFORM DATA in the frequency domain) wavelength-dependent positive FFT spectrum ( = |FFT{field}|^2 * df/dλ, λ > 0 ) 147 fftphase: (WAVEFORM DATA in the frequency domain) wavelength-dependent spectral phase 148 149 wvlSpectrometer: (SPECTROMETER DATA) wavelength (nm) 150 ISpectrometer: (SPECTROMETER DATA) spectral intensity 151 152 fsZeroPadding: (= 150 by default) time span in fs for zero padding (appending zeroes both before and after the trace, in order to get a smoother fft) 153 filename: filename to load the trace from 154 filename_spectrum: filename to load the spectrum from 155 normalization_trace: normalization factor for the trace 156 zero_delay: time zero, corresponds to the maximum envelope 157 """ 158 159 def __init__( 160 self, 161 filename=None, 162 filename_spectrum=None, 163 time=None, 164 field=None, 165 stdev=None, 166 wvl=None, 167 spectrum=None, 168 wvl_FFT_trace=None, 169 spectrum_FFT_trace=None, 170 phase_FFT_trace=None, 171 ): 172 """Constructor; loads the trace from file (or from time-field or wavelegth-spectrum-phase arrays) and stores it in the class. 173 174 One of the following must be provided (as a default all args are None): 175 * filename: the name of the file containing the trace (two columns, tab separated) 176 * time and field: the time vector (fs) and electric field vector (a.u.) 177 * wvl_FFT_trace, spectrum_FFT_trace and phase_FFT_trace: the wavelength (nm) spectrum (a.u.) and phase (rad) vectors 178 179 Args: 180 filename (str): the name of the file containing the trace (two columns - time and field, tab separated) 181 filename_spectrum (str): the name of the file containing the spectrum (two columns - wavelength and spectral intensity, tab separated) 182 this does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare to the loaded trace 183 time (array): the time vector (fs) 184 field (array): the electric field vector (a.u.) 185 stdev (array): the standard deviation (or sigma) of the electric field vector (a.u.) 186 wvl (array): the wavelength vector (nm) (spectrometer) 187 spectrum (array): the spectral intensity vector (a.u.) (spectrometer) 188 wvl_FFT_trace (array): the wavelength vector (nm) of the FFT trace (to reconstruct a trace from spectral data) 189 spectrum_FFT_trace (array): the spectral intensity vector (a.u.) of the FFT trace (to reconstruct a trace from spectral data) 190 phase_FFT_trace (array): the spectral phase vector (rad) of the FFT trace (to reconstruct a trace from spectral data) 191 """ 192 193 self.fsZeroPadding = 150 194 self.filename = filename 195 self.filename_spectrum = filename_spectrum 196 197 self.fieldTimeV = None 198 self.fieldV = None 199 self.fieldStdevV = None 200 201 self.frequencyAxis = None 202 self.fftFieldV = None 203 self.complexFieldTimeV = None 204 self.complexFieldV = None 205 self.wvlAxis = None 206 self.fftSpectrum = None 207 self.fftphase = None 208 209 self.wvlSpectrometer = None 210 self.ISpectrometer = None 211 212 self.normalization_trace = None 213 self.zero_delay = None 214 215 if filename is not None: 216 self.load_trace() 217 elif time is not None and field is not None: 218 self.load_trace_from_arrays(time, field, stdev) 219 elif ( 220 wvl_FFT_trace is not None 221 and spectrum_FFT_trace is not None 222 and phase_FFT_trace is not None 223 ): 224 self.load_trace_from_spectral_data( 225 wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace 226 ) 227 else: 228 print( 229 "\n\nWARNING: in function TraceHandler.__init__() no data was loaded\n\n" 230 ) 231 232 if self.filename_spectrum is not None: 233 self.load_spectrum() 234 elif wvl is not None and spectrum is not None: 235 self.load_spectrum_from_arrays(wvl, spectrum) 236 237 def load_spectrum_from_arrays(self, wvl, spectrum): 238 """loads the spectrum from wavelength and spectrum arrays and stores it in the class. 239 240 The spectrum does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare with the loaded trace 241 242 Args: 243 wvl: wavelength array (nm) 244 spectrum: spectral intensity array 245 """ 246 """wavelength is in nm""" 247 check_equal_length(wvl, spectrum) 248 self.wvlSpectrometer = np.array(wvl) 249 self.ISpectrometer = np.array(spectrum) 250 251 def load_trace_from_arrays(self, fieldTimeV, fieldV, fieldStdevV=None): 252 """loads the trace from time and field arrays and stores it in the class. 253 254 Args: 255 fieldTimeV: time array (fs) 256 fieldV: field array 257 fieldStdevV: standard deviation - or sigma - to be associated to the trace (optional, default = None) 258 """ 259 if fieldTimeV is None or fieldV is None: 260 raise ValueError( 261 "\nin function TraceHandler.load_from_arrays() too many arguments were None\n" 262 "it might be that the constructor was erroneously used with no or too few arguments\n" 263 ) 264 if np.max(np.abs(fieldTimeV)) < 1.0: 265 print( 266 "\n\nWARNING: do you remember that the time axis of TraceHandler is in fs?\n\n" 267 ) 268 self.fieldTimeV = np.array(fieldTimeV) 269 self.fieldV = np.array(fieldV) 270 if fieldStdevV is not None: 271 self.fieldStdevV = np.array(fieldStdevV) 272 self.normalization_trace = np.max(np.abs(self.fieldV)) 273 self.update_fft() 274 275 def load_trace(self, fname=None): 276 """loads from file""" 277 if fname is not None: 278 self.filename = fname 279 data = pandas.read_csv(self.filename, sep="\t") 280 self.fieldTimeV = data["delay (fs)"].to_numpy() 281 self.fieldV = data["field (a.u.)"].to_numpy() 282 self.fieldStdevV = data["stdev field"].to_numpy() 283 self.normalization_trace = np.max(np.abs(self.fieldV)) 284 self.update_fft() 285 286 def load_trace_from_spectral_data( 287 self, wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace 288 ): 289 """loads the trace from wavelength, spectrum, and spectral phase arrays and stores it in the class. 290 291 This function has a different job than load_spectrum_from_arrays(): in fact, it retrieves the trace from the given spectral data via fourier transform. 292 293 Args: 294 wvl_FFT_trace: wavelength array in nm 295 spectrum_FFT_trace: (wavelength-)spectral intensity array. This is assumed to be |FFT{field}|^2 * df/dλ 296 phase_FFT_trace: spectral phase in rad 297 """ 298 check_equal_length(wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace) 299 300 # check that the wvl_FFT_trace array is monotonically increasing 301 if np.any(np.diff(wvl_FFT_trace) < 0): 302 if np.all(np.diff(wvl_FFT_trace) < 0): 303 wvl_FFT_trace = wvl_FFT_trace[::-1] 304 spectrum_FFT_trace = spectrum_FFT_trace[::-1] 305 phase_FFT_trace = phase_FFT_trace[::-1] 306 else: 307 raise ValueError( 308 "in function TraceHandler.load_from_spectral_data() wavelength array is not monotonous" 309 ) 310 311 # frequency spectrum (monotonically increasing) 312 freq = constants.speed_of_light / wvl_FFT_trace[::-1] * 1e-6 313 spectrum_freq = spectrum_FFT_trace[::-1] / ( 314 constants.speed_of_light / wvl_FFT_trace[::-1] ** 2 * 1e-6 315 ) 316 phase_FFT_trace = phase_FFT_trace[::-1] 317 318 # create proper freq axis for the fourier transform 319 df = (freq[1] - freq[0]) / 4 320 nf = int(np.ceil(3 * freq[-1] / df)) 321 self.frequencyAxis = np.concatenate((np.arange(0, nf), np.arange(-nf, 0))) * df 322 # corresponding time window 323 twin = 1.0 / (df) 324 325 # complex fourier transform 326 spectrum_freq = np.sqrt(spectrum_freq) * np.exp(1.0j * phase_FFT_trace) 327 328 # fill with zeros 329 initial_zero_freq = np.linspace( 330 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 331 ) 332 initial_zeros = np.zeros(len(initial_zero_freq)) 333 final_zero_freq = np.linspace( 334 2 * freq[-1] - freq[-2], 335 4 * freq[-1], 336 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 337 ) 338 final_zeros = np.zeros(len(final_zero_freq)) 339 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 340 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 341 # add negative frequencies 342 freq = np.concatenate((-freq[::-1], freq)) 343 spectrum_freq = np.concatenate( 344 (np.conjugate(spectrum_freq[::-1]), spectrum_freq) 345 ) 346 347 # interpolate the spectrum to the frequency axis of the fft 348 self.fftFieldV = np.interp(self.frequencyAxis, freq, spectrum_freq) 349 350 # add linear phase to center the pulse in the time window; this has to be done after interpolation because the spectral phase will be fast oscillating 351 self.fftFieldV = self.fftFieldV * np.exp( 352 1.0j * 2 * np.pi * self.frequencyAxis * twin / 2 353 ) 354 355 self.update_fft_spectrum() 356 self.update_trace_from_fft() 357 358 def load_spectrum(self, fname=None): 359 """loads spectrum from file. 360 361 The spectrum does not correspond to the fourier transform of the trace, but to the spectrometer data that you would like to compare with the loaded trace 362 363 Args: 364 fname: filename 365 """ 366 if fname is not None: 367 self.filename_spectrum = fname 368 data = pandas.read_csv(self.filename_spectrum, sep="\t") 369 self.wvlSpectrometer = data["wavelength (nm)"].to_numpy() 370 self.ISpectrometer = data["intensity (a.u.)"].to_numpy() 371 self.normalize_spectrum() 372 373 def update_fft(self, zero_pad_field=True): 374 """updates the fft of the trace from the time domain data. 375 376 Args: 377 zero_pad_field: bool; if true (default) add zeros before and after the trace before computing fft (for a smoother spectrum) 378 The length of appended zeros is defined by self.fsZeroPadding. 379 """ 380 zp_fieldTimeV, zp_fieldV = deepcopy(self.fieldTimeV), deepcopy(self.fieldV) 381 if zero_pad_field: 382 zp_fieldTimeV, zp_fieldV = zero_padding( 383 self.fieldTimeV, self.fieldV, fsExtension=self.fsZeroPadding 384 ) 385 self.frequencyAxis, self.fftFieldV = fourier_transform(zp_fieldTimeV, zp_fieldV) 386 self.update_fft_spectrum() 387 388 def update_fft_spectrum(self): 389 """Updates the wavelength-dependent intensity spectrum. Results are stored in self.wvlAxis and self.fftSpectrum. 390 391 This is (derived from but) different from the fourier transform because: 392 * it considers the squared modulus of the field 393 * it multiplies the frequency-dependent spectral density by df/dλ = c/λ² 394 * it considers only positive wavelengths 395 396 The function internally calls update_spectral_phase() as well. 397 """ 398 self.wvlAxis = ( 399 constants.speed_of_light 400 / self.frequencyAxis[self.frequencyAxis > 0] 401 * 1.0e-6 402 ) 403 self.fftSpectrum = ( 404 np.abs(self.fftFieldV[self.frequencyAxis > 0]) ** 2 405 * constants.speed_of_light 406 / self.wvlAxis**2 407 ) 408 self.compute_complex_field() 409 self.update_spectral_phase() 410 self.normalize_fft_spectrum() 411 412 def update_spectral_phase(self): 413 """computes the spectral phase of the trace. The result is stored in self.fftphase. 414 415 An attempt of automatic linear phase subtraction is made. Phase is unwrapped. 416 """ 417 # determine the time distance (delay) of the waveform peak from the beginning of the trace (more precisely, from the beginning of the ifft) 418 tmax = self.get_zero_delay() 419 deltaT1 = tmax - self.complexFieldTimeV[0] 420 deltaT2 = self.complexFieldTimeV[-1] - tmax 421 ifft_twindow = 1 / (self.frequencyAxis[1] - self.frequencyAxis[0]) 422 total_delay = ( 423 ifft_twindow / 2 + (deltaT2 - deltaT1) / 2 424 ) # total estimated delay of the peak from the beginning of the ifft trace 425 426 # compute the phase, use the computed 'delay' to correct for the non-interesting linear component, and unwrap it 427 self.fftphase = np.angle(self.fftFieldV[self.frequencyAxis > 0]) 428 self.fftphase = ( 429 self.fftphase 430 - total_delay * self.frequencyAxis[self.frequencyAxis > 0] * 2 * np.pi 431 ) 432 self.fftphase = np.unwrap(self.fftphase) 433 434 # subtract constant phase offset (m * 2*π) 435 n_pi = np.sum(self.fftSpectrum * self.fftphase) / np.sum(self.fftSpectrum) 436 n_pi = int(n_pi / (2 * np.pi)) 437 self.fftphase -= n_pi * 2 * np.pi 438 439 def update_trace_from_fft(self): # remember to use together with strip_from_trace 440 # careful: the complex part of the IFFT is discarded 441 """Updates the field trace from fft 442 443 In most cases, you might want to call strip_from_trace() afterward, to eliminate the zero-padding from the trace. 444 """ 445 self.fieldTimeV, self.fieldV = inverse_fourier_transform( 446 self.frequencyAxis, self.fftFieldV 447 ) 448 self.fieldV = np.real(self.fieldV) 449 self.normalization_trace = np.max(np.abs(self.fieldV)) 450 self.get_zero_delay() 451 452 def strip_from_trace(self, timeRange=None): 453 """eliminates the zeros appended to the trace (zero-padding) when computing the FFT. 454 455 Useful when retrieving the trace via ifft (method update_trace_from_fft()). 456 457 Args: 458 timeRange (float): the time range (fs) to strip from the beginning and end of the trace. If None (default), fsZeroPadding is used. 459 """ 460 if timeRange is None: 461 timeRange = self.fsZeroPadding 462 # strip the first and last fsZeroPadding time points 463 self.fieldV = self.fieldV[ 464 (self.fieldTimeV >= np.min(self.fieldTimeV) + timeRange) 465 & (self.fieldTimeV <= np.max(self.fieldTimeV) - timeRange) 466 ] 467 self.fieldTimeV = self.fieldTimeV[ 468 (self.fieldTimeV >= np.min(self.fieldTimeV) + timeRange) 469 & (self.fieldTimeV <= np.max(self.fieldTimeV) - timeRange) 470 ] 471 if self.fieldStdevV is None: 472 return True 473 if len(self.fieldStdevV) > len(self.fieldV): 474 n_remove = len(self.fieldStdevV) - len(self.fieldV) 475 print( 476 "\n\nWarning: fieldStdevV is longer than fieldV, removing", 477 n_remove, 478 " elements\n\n", 479 ) 480 while n_remove > 0: 481 if n_remove % 2 == 0: 482 self.fieldStdevV = np.delete(self.fieldStdevV, 0) 483 else: 484 self.fieldStdevV = np.delete(self.fieldStdevV, -1) 485 n_remove -= 1 486 elif len(self.fieldStdevV) < len(self.fieldV): 487 n_add = len(self.fieldV) - len(self.fieldStdevV) 488 print( 489 "\n\nWarning: fieldStdevV is shorter than fieldV, adding", 490 n_add, 491 " elements\n\n", 492 ) 493 while n_add > 0: 494 if n_add % 2 == 0: 495 self.fieldStdevV = np.insert(self.fieldStdevV, 0, 0) 496 else: 497 self.fieldStdevV = np.append(self.fieldStdevV, 0) 498 n_add -= 1 499 return True 500 501 def strip_from_complex_trace(self, timeRange=None): 502 """from the complex trace eliminates the zeros appended (zero-padding) when computing the FFT (Analogous to strip_from_trace()).""" 503 if timeRange is None: 504 timeRange = self.fsZeroPadding 505 # strip the first and last fsZeroPadding time points 506 self.complexFieldV = self.complexFieldV[ 507 (self.complexFieldTimeV >= np.min(self.complexFieldTimeV) + timeRange) 508 & (self.complexFieldTimeV <= np.max(self.complexFieldTimeV) - timeRange) 509 ] 510 self.complexFieldTimeV = self.complexFieldTimeV[ 511 (self.complexFieldTimeV >= np.min(self.complexFieldTimeV) + timeRange) 512 & (self.complexFieldTimeV <= np.max(self.complexFieldTimeV) - timeRange) 513 ] 514 515 def tukey_time_window(self, lowEdge, upEdge, lowEdgeWidth, upEdgeWidth): 516 """applies a tukey window to the trace in the time domain. 517 518 Args: 519 lowEdge, upEdge: lower and upper edge of the window upEdge-lowEdge = FWHM of the window 520 lowEdgeWidth, upEdgeWidth: width of the cosine-shaped edges (from 0 to 1 or viceversa) 521 """ 522 window = asymmetric_tukey_window( 523 np.abs(self.fieldTimeV), lowEdge, upEdge, lowEdgeWidth, upEdgeWidth 524 ) 525 self.fieldV = self.fieldV * window 526 self.update_fft() 527 528 def fourier_interpolation(self, ntimes_finer: int): 529 """Shrinks the time step of the trace in time domain by a factor ntimes_finer. 530 531 This is done by 'zero-padding' the fourier transform of the trace, i.e., by extending the frequency axis by a factor ntimes_finer, and adding corresponding zeros to the FFT field. 532 533 Args: 534 ntimes_finer (int): the factor by which to shrink the time step of the trace in time domain. Must be >= 1. 535 """ 536 if ntimes_finer < 1: 537 raise ValueError( 538 "in function TraceHandler.fourier_interpolation() ntimes_finer must be >= 1" 539 ) 540 if self.frequencyAxis is None or self.fftFieldV is None: 541 raise ValueError( 542 "in function TraceHandler.fourier_interpolation() frequency axis or fft field is not defined" 543 ) 544 # extend the frequency axis by a factor ntimes_finer 545 df = self.frequencyAxis[1] - self.frequencyAxis[0] 546 i_last_pos = int(np.ceil(len(self.frequencyAxis) / 2) - 1) 547 if self.frequencyAxis[i_last_pos] < 0 or self.frequencyAxis[i_last_pos + 1] > 0: 548 print(self.frequencyAxis[0 : i_last_pos + 1]) 549 print(self.frequencyAxis[i_last_pos + 1 :]) 550 raise Exception( 551 "in function fourier_interpolation() frequency axes was not extended correctly" 552 ) 553 554 appended_freqFFT = np.linspace( 555 self.frequencyAxis[i_last_pos] + df, 556 ntimes_finer * self.frequencyAxis[i_last_pos] + df, 557 round((ntimes_finer - 1) * self.frequencyAxis[i_last_pos] / df), 558 ) 559 prepended_freqFFT = np.linspace( 560 -(ntimes_finer - 1) * self.frequencyAxis[i_last_pos] 561 + self.frequencyAxis[i_last_pos + 1], 562 +self.frequencyAxis[i_last_pos + 1], 563 round((ntimes_finer - 1) * self.frequencyAxis[i_last_pos] / df), 564 ) 565 self.frequencyAxis = np.concatenate( 566 ( 567 self.frequencyAxis[: i_last_pos + 1], 568 appended_freqFFT, 569 prepended_freqFFT, 570 self.frequencyAxis[i_last_pos + 1 :], 571 ) 572 ) 573 self.fftFieldV = np.concatenate( 574 ( 575 self.fftFieldV[: i_last_pos + 1], 576 np.zeros(len(appended_freqFFT)), 577 np.zeros(len(prepended_freqFFT)), 578 self.fftFieldV[i_last_pos + 1 :], 579 ) 580 ) 581 582 self.update_trace_from_fft() 583 self.strip_from_trace() 584 self.update_fft_spectrum() 585 586 def differentiate_trace(self, spectrally=True): 587 """computes the time derivative of the trace. 588 589 Args: 590 spectrally: bool; if True (default) the derivative will be computed in the spectral domain (maybe more stable numerically) 591 """ 592 if spectrally: 593 self.fftFieldV = 1j * self.frequencyAxis * self.fftFieldV 594 self.update_trace_from_fft() 595 self.update_fft_spectrum() 596 else: 597 self.fieldV = np.gradient(self.fieldV, self.fieldTimeV) 598 self.update_fft() 599 600 def integrate_trace(self, spectrally=True): 601 """integrates the trace in time. 602 603 Args: 604 spectrally (bool): if True (default) the integral will be computed in the spectral domain (maybe more stable numerically) 605 """ 606 if spectrally: 607 if self.frequencyAxis[0] != 0: 608 print( 609 "WARNING: in function TraceHandler.integrate_trace() the FFT axis' first element is not zero" 610 ) 611 self.fftFieldV[1:] = -1j / self.frequencyAxis[1:] * self.fftFieldV[1:] 612 self.update_trace_from_fft() 613 self.update_fft_spectrum() 614 else: 615 self.fieldV = np.cumsum(self.fieldV) * np.mean(np.diff(self.fieldTimeV)) 616 self.update_fft() 617 618 def save_trace_to_file(self, filename, low_lim=None, up_lim=None): 619 """Saves the trace to file 620 621 Args: 622 filename 623 low_lim, up_lim: only save the trace between low_lim and up_lim (default: None, None) 624 """ 625 if low_lim is not None and up_lim is not None: 626 fieldTimeV_to_write = self.fieldTimeV[ 627 (self.fieldTimeV >= low_lim) & (self.fieldTimeV <= up_lim) 628 ] 629 fieldV_to_write = self.fieldV[ 630 (self.fieldTimeV >= low_lim) & (self.fieldTimeV <= up_lim) 631 ] 632 else: 633 fieldTimeV_to_write = self.fieldTimeV 634 fieldV_to_write = self.fieldV 635 data = pandas.DataFrame( 636 {"delay (fs)": fieldTimeV_to_write, "field (a.u.)": fieldV_to_write} 637 ) 638 data.to_csv(filename, sep="\t", index=False) 639 640 def normalize_spectrum(self): 641 """Normalizes the comparison spectrum to its integral.""" 642 spectrum_range = [60, 930] 643 n_factor = ( 644 np.abs( 645 np.diff(self.wvlSpectrometer)[ 646 (self.wvlSpectrometer[:-1] > spectrum_range[0]) 647 & (self.wvlSpectrometer[:-1] < spectrum_range[1]) 648 ] 649 ) 650 * self.ISpectrometer[:-1][ 651 (self.wvlSpectrometer[:-1] > spectrum_range[0]) 652 & (self.wvlSpectrometer[:-1] < spectrum_range[1]) 653 ] 654 ).sum() 655 self.ISpectrometer /= n_factor 656 657 def normalize_fft_spectrum(self, spectrum_range=[60, 930]): 658 """Normalizes the spectrum of the trace (|FFT{trace}|^2 * df/dλ) to its integral (area) 659 660 Args: 661 spectrum_range (list): the range of wavelengths (nm) to consider for the normalization. Default is [60, 930] nm. 662 """ 663 n_factor = ( 664 np.abs( 665 np.diff(self.wvlAxis)[ 666 (self.wvlAxis[:-1] > spectrum_range[0]) 667 & (self.wvlAxis[:-1] < spectrum_range[1]) 668 ] 669 ) 670 * self.fftSpectrum[:-1][ 671 (self.wvlAxis[:-1] > spectrum_range[0]) 672 & (self.wvlAxis[:-1] < spectrum_range[1]) 673 ] 674 ).sum() 675 self.fftSpectrum /= n_factor 676 677 def get_trace(self): 678 """Returns the time (fs) and field arrays. Field is in a.u. unless set_fluence() was called or a calibrated field was given as an input.""" 679 return self.fieldTimeV, self.fieldV 680 681 def get_spectrum_trace(self): 682 """Returns the wavelength and spectral intensity corresponding to the fourier transform of the trace.""" 683 return self.wvlAxis, self.fftSpectrum 684 685 def get_spectral_phase(self): 686 """Returns the wavelength array and the spectral phase array corresponding to the FFT of the trace.""" 687 return self.wvlAxis, self.fftphase 688 689 def get_stdev(self): 690 """Returns only the standard deviation (sigma) array""" 691 return self.fieldStdevV 692 693 def get_positive_fft_field(self): 694 """Returns the positive frequency axis f and the corresponding FFT field (complex). 695 696 Main task of this function is to 'smooth' the phase of the fft (= remove linear phase, corresponding to a time shift in the temporal domain), since the waveform is usually centered around t = 0""" 697 if self.frequencyAxis is None or self.fftFieldV is None: 698 raise ValueError( 699 "in function TraceHandler.get_positive_fft_field() frequency axis or fft field is not defined" 700 ) 701 positive_freq = self.frequencyAxis[self.frequencyAxis >= 0] 702 positive_fft_field = self.fftFieldV[self.frequencyAxis >= 0] 703 # remove linear phase 704 tmax = self.get_zero_delay() 705 deltaT1 = tmax - self.fieldTimeV[0] 706 deltaT2 = self.fieldTimeV[-1] - tmax 707 ifft_twindow = 1 / (self.frequencyAxis[1] - self.frequencyAxis[0]) 708 total_delay = ifft_twindow / 2 + (deltaT2 - deltaT1) / 2 709 positive_fft_field = positive_fft_field * np.exp( 710 -1.0j * 2 * np.pi * positive_freq * total_delay 711 ) 712 return positive_freq, positive_fft_field 713 714 def get_fluence(self): 715 """Calculates the fluence of the trace. 716 717 convention: field [V/Å], time [fs], fluence [J/cm²] 718 F = c eps_0 integral(E^2)dt 719 720 721 Returns: 722 fluence: float 723 """ 724 F = ( 725 constants.speed_of_light 726 * constants.epsilon_0 727 * np.trapz(self.fieldV**2, self.fieldTimeV) 728 * 1e1 729 ) 730 return F 731 732 def set_fluence(self, fluence): 733 """Set the fluence of the trace. 734 735 Convention: field [V/Å], time [fs], fluence [J/cm²] 736 737 F = c eps_0 integral(E^2)dt""" 738 F = ( 739 constants.speed_of_light 740 * constants.epsilon_0 741 * np.trapz(self.fieldV**2, self.fieldTimeV) 742 * 1e1 743 ) 744 self.fieldV *= np.sqrt(fluence / F) 745 self.update_fft() 746 self.update_fft_spectrum() 747 748 def compute_complex_field(self): 749 """Computes the complex trace by inverting the positive-freq FFT and stores it in self.complexFieldV (for envelope or phase computation, for example) 750 751 Notice that the fourier transform of the trace is not computed, it is assumed to be already stored in the class. This should be true in most cases.""" 752 complex_spectrum = deepcopy(self.fftFieldV) 753 complex_spectrum[self.frequencyAxis < 0] = 0 754 self.complexFieldTimeV, self.complexFieldV = inverse_fourier_transform( 755 self.frequencyAxis, complex_spectrum 756 ) 757 self.complexFieldV = 2 * self.complexFieldV 758 self.strip_from_complex_trace() 759 760 def get_envelope(self): 761 """Get the envelope of the trace. 762 763 Returns: 764 time array (fs) 765 envelope array 766 """ 767 if self.complexFieldV is None: 768 self.compute_complex_field() 769 return self.complexFieldTimeV, np.abs(self.complexFieldV) 770 771 def get_phase(self): 772 """Get the instantaneous phase of the trace. 773 774 If complexFieldV is already computed and stored, no re-calculation occurs 775 776 Returns: 777 time array (fs) 778 instantaneous phase array (fs) 779 """ 780 if self.complexFieldV is None: 781 self.compute_complex_field() 782 return self.complexFieldTimeV, np.angle(self.complexFieldV) 783 784 def get_zero_delay(self): 785 """Get the time value corresponding to the envelope peak. 786 787 Returns: 788 789 zero_delay: float 790 """ 791 # careful: the time grid should be fine enough to resolve the maximum of the envelope 792 t, en = self.get_envelope() 793 dt = t[1] - t[0] 794 max_index, max_value = find_maximum_location(en) 795 self.zero_delay = t[0] + dt * max_index 796 return self.zero_delay 797 798 def get_FWHM(self): 799 """get the FWHM of the trace. 800 801 802 Returns: 803 FWHM: float 804 """ 805 t, en = self.get_envelope() 806 dt = t[1] - t[0] 807 return fwhm(en**2, dt) 808 809 def fft_tukey_bandpass( 810 self, lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 811 ): 812 """Applies a bandpass filter to the trace in the frequency domain using a tukey window. 813 814 The tukey window is 1 between lowWavelengthEdge+lowEdgeWidth/2 and upWavelengthEdge-highEdgeWidth/2 815 and it is reaches zero at lowWavelengthEdge-lowEdgeWidth/2 and upWavelengthEdge+highEdgeWidth/2. 816 Notice that the edges are only cosine-shaped in the frequency domain. In the wavelength domain upWavelengthEdge - lowWavelengthEdge does not coincide with the FWHM of the tukey function 817 818 Args: 819 lowWavelengthEdge: float (nm) 820 upWavelengthEdge: float (nm) 821 lowEdgeWidth: float (nm) 822 highEdgeWidth: float (nm) 823 """ 824 upFreqEdge = ( 825 constants.speed_of_light / (lowWavelengthEdge - lowEdgeWidth / 2) / 2 * 1e-6 826 + constants.speed_of_light 827 / (lowWavelengthEdge + lowEdgeWidth / 2) 828 / 2 829 * 1e-6 830 ) 831 lowFreqEdge = ( 832 constants.speed_of_light / (upWavelengthEdge - highEdgeWidth / 2) / 2 * 1e-6 833 + constants.speed_of_light 834 / (upWavelengthEdge + highEdgeWidth / 2) 835 / 2 836 * 1e-6 837 ) 838 839 upFreqEdgeWidth = ( 840 constants.speed_of_light / (lowWavelengthEdge - lowEdgeWidth / 2) * 1e-6 841 - constants.speed_of_light / (lowWavelengthEdge + lowEdgeWidth / 2) * 1e-6 842 ) 843 lowFreqEdgeWidth = ( 844 constants.speed_of_light / (upWavelengthEdge - highEdgeWidth / 2) * 1e-6 845 - constants.speed_of_light / (upWavelengthEdge + highEdgeWidth / 2) * 1e-6 846 ) 847 848 window = asymmetric_tukey_window( 849 np.abs(self.frequencyAxis), 850 lowFreqEdge, 851 upFreqEdge, 852 lowFreqEdgeWidth, 853 upFreqEdgeWidth, 854 ) 855 self.fftFieldV = self.fftFieldV * window 856 self.update_trace_from_fft() 857 self.update_fft_spectrum() 858 self.strip_from_trace() 859 860 def apply_transmission(self, wavelengths, f): 861 """Applies a spectral transmission function to the trace (e.g. spectral filter). 862 863 Args: 864 wavelengths: ndarray = wavelength array (nm) 865 f: ndarray = transmission function f(λ) 866 """ 867 if np.any(np.diff(wavelengths)) < 0: 868 if np.all(np.diff(wavelengths)) < 0: 869 wavelengths = wavelengths[::-1] 870 f = f[::-1] 871 else: 872 raise ValueError( 873 "in function TraceHandler.apply_transmission() wavelength array is not monotonous" 874 ) 875 freq = constants.speed_of_light / wavelengths[::-1] * 1e-6 876 spectrum_freq = f[::-1] 877 878 # fill with zeros 879 initial_zero_freq = np.linspace( 880 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 881 ) 882 initial_zeros = np.zeros(len(initial_zero_freq)) 883 final_zero_freq = np.linspace( 884 2 * freq[-1] - freq[-2], 885 4 * freq[-1], 886 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 887 ) 888 final_zeros = np.zeros(len(final_zero_freq)) 889 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 890 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 891 # add negative frequencies 892 freq = np.concatenate((-freq[::-1], freq)) 893 spectrum_freq = np.concatenate((spectrum_freq[::-1], spectrum_freq)) 894 895 # interpolate the spectrum to the frequency axis of the fft 896 spectrum_interp = np.interp(self.frequencyAxis, freq, spectrum_freq) 897 898 self.fftFieldV = self.fftFieldV * spectrum_interp 899 self.update_trace_from_fft() 900 self.update_fft_spectrum() 901 self.strip_from_trace() 902 903 def apply_spectrum( 904 self, 905 wvl=None, 906 spectrum=None, 907 CEP_shift: float = 0.0, 908 stripZeroPadding: bool = True, 909 ): 910 """Applies a spectrum to the phase of the trace. This means that a new trace is computed and stored in the TraceHandler object (replacing the existing one); 911 the new trace has spectral intensity equal to the applied spectrum and spectral phase equal to the phase of the existing trace. 912 913 Args: 914 wvl: wavelength array (nm). If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied. 915 spectrum: spectral intensity array. If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied. 916 CEP_shift: a possible artificial phase shift IN UNITS OF PI! (default = 0) 917 stripZeroPadding (bool): whether to eliminate the zero padding (zeros appended to the trace). Default is True""" 918 919 if wvl is None or spectrum is None: 920 wvl = self.wvlSpectrometer 921 spectrum = self.ISpectrometer 922 # check that the wvl array is monotonically increasing 923 if np.any(np.diff(wvl) < 0): 924 if np.all(np.diff(wvl) < 0): 925 wvl = wvl[::-1] 926 spectrum = spectrum[::-1] 927 else: 928 raise ValueError( 929 "in function TraceHandler.apply_spectrum() wavelength array is not monotonous" 930 ) 931 932 # frequency spectrum (monotonically increasing) 933 freq = constants.speed_of_light / wvl[::-1] * 1e-6 934 spectrum_freq = spectrum[::-1] / ( 935 constants.speed_of_light / wvl[::-1] ** 2 * 1e-6 936 ) 937 spectrum_freq = np.maximum(spectrum_freq, np.zeros(len(spectrum_freq))) 938 939 # fill with zeros 940 initial_zero_freq = np.linspace( 941 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 942 ) 943 initial_zeros = np.zeros(len(initial_zero_freq)) 944 final_zero_freq = np.linspace( 945 2 * freq[-1] - freq[-2], 946 4 * freq[-1], 947 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 948 ) 949 final_zeros = np.zeros(len(final_zero_freq)) 950 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 951 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 952 # add negative frequencies 953 freq = np.concatenate((-freq[::-1], freq)) 954 spectrum_freq = np.concatenate((spectrum_freq[::-1], spectrum_freq)) 955 956 # interpolate the spectrum to the frequency axis of the fft 957 spectrum_interp = np.interp(self.frequencyAxis, freq, spectrum_freq) 958 self.fftFieldV = np.sqrt(spectrum_interp) * np.exp( 959 1j * np.angle(self.fftFieldV) 960 + 1j * np.pi * CEP_shift * np.sign(self.frequencyAxis) 961 ) 962 963 self.update_trace_from_fft() 964 if stripZeroPadding: 965 self.strip_from_trace() 966 else: 967 self.fieldStdevV = None 968 self.update_fft_spectrum() 969 970 def fresnel_reflection( 971 self, 972 material2, 973 angle_in, 974 material1=None, 975 forward: bool = True, 976 s_polarized: bool = True, 977 path: str = "./RefractiveIndices/", 978 ): 979 """calculates the fresnel reflection of the pulse at the interface between two materials. The waveform is travelling from material 1 to material 2. 980 The first medium (material1) should be non-absorptive. As usual the resulting waveform is stored in the TraceHandler object, replacing the previous one. 981 982 Refractive index files should contain 3 space-separated columns, respectively with headers: wvl n k, where wvl is the wavelength in um, n and k resp. the real and imaginary part of the refractive index. 983 Args: 984 material2: the filename (without '.txt') of the refractive index data for the material after the interface (e.g. Si Al MgF2); wavelength is in um in the refractive index file 985 angle_in: the incidence angle in degrees 986 material1: the filename (without '.txt') of the refractive index data for the material before the interface. If None (default), vacuum is assumed 987 forward (bool): if True (default) forward reflection is computed (the result waveform is the reflection of the previously stored waveform. 988 989 if False backward reflection is computed (the previous waveform is the reflection of the result waveform) 990 s_polarized (bool): True. Reflection calculation only implemented for s-polarized light 991 path (str): path for the refractive index files. Defaults to "./RefractiveIndices/" 992 """ 993 if not s_polarized: 994 raise ValueError( 995 "in function TraceHandler.fresnel_reflection() p_polarized is not implemented yet\n" 996 ) 997 998 # read refractive index 999 refIndexData = pandas.read_table( 1000 path + material2 + ".txt", sep=" ", keep_default_na=False 1001 ) 1002 wvl2 = np.array(refIndexData["wvl"]) * 1e3 1003 n2 = np.array(refIndexData["n"]) + 1j * np.array(refIndexData["k"]) 1004 if material1 is None: 1005 wvl1 = np.array(wvl2) 1006 n1 = n2 * 0 + 1 1007 else: 1008 refIndexData = pandas.read_table( 1009 path + material1 + ".txt", sep=" ", keep_default_na=False 1010 ) 1011 wvl1 = np.array(refIndexData["wvl"]) * 1e3 1012 n1 = np.array(refIndexData["n"]) + 1j * np.array(refIndexData["k"]) 1013 1014 # check that the wvl arrays are monotonically increasing 1015 if np.any(np.diff(wvl1) < 0): 1016 if np.all(np.diff(wvl1) < 0): 1017 wvl1 = wvl1[::-1] 1018 n1 = n1[::-1] 1019 else: 1020 raise ValueError( 1021 "in function TraceHandler.fresnel_reflection() wavelength array is not monotonous" 1022 ) 1023 if np.any(np.diff(wvl2) < 0): 1024 if np.all(np.diff(wvl2) < 0): 1025 wvl2 = wvl2[::-1] 1026 n2 = n2[::-1] 1027 else: 1028 raise ValueError( 1029 "in function TraceHandler.fresnel_reflection() wavelength array is not monotonous" 1030 ) 1031 1032 # frequency spectrum (monotonically increasing) 1033 freq1 = constants.speed_of_light / wvl1[::-1] * 1e-6 1034 freq2 = constants.speed_of_light / wvl2[::-1] * 1e-6 1035 n1 = n1[::-1] 1036 n2 = n2[::-1] 1037 1038 # fill with ones 1039 initial_ones_freq = np.linspace( 1040 freq1[1] - freq1[0], 1041 freq1[0], 1042 int(np.ceil(4 * freq1[0] / (freq1[1] - freq1[0]))), 1043 ) 1044 initial_ones = np.ones(len(initial_ones_freq)) * n1[0] 1045 final_ones_freq = np.linspace( 1046 2 * freq1[-1] - freq1[-2], 1047 4 * freq1[-1], 1048 int(np.ceil(15 * freq1[-1] / (freq1[-1] - freq1[-2]))), 1049 ) 1050 final_ones = np.ones(len(final_ones_freq)) * n1[-1] 1051 freq1 = np.concatenate((initial_ones_freq, freq1, final_ones_freq)) 1052 n1 = np.concatenate((initial_ones, n1, final_ones)) 1053 # same for n2 1054 initial_ones_freq = np.linspace( 1055 freq2[1] - freq2[0], 1056 freq2[0], 1057 int(np.ceil(4 * freq2[0] / (freq2[1] - freq2[0]))), 1058 ) 1059 initial_ones = np.ones(len(initial_ones_freq)) * n2[0] 1060 final_ones_freq = np.linspace( 1061 2 * freq2[-1] - freq2[-2], 1062 4 * freq2[-1], 1063 int(np.ceil(15 * freq2[-1] / (freq2[-1] - freq2[-2]))), 1064 ) 1065 final_ones = np.ones(len(final_ones_freq)) * n2[-1] 1066 freq2 = np.concatenate((initial_ones_freq, freq2, final_ones_freq)) 1067 n2 = np.concatenate((initial_ones, n2, final_ones)) 1068 1069 # add negative frequencies 1070 freq1 = np.concatenate((-freq1[::-1], freq1)) 1071 n1 = np.concatenate((np.conjugate(n1[::-1]), n1)) 1072 freq2 = np.concatenate((-freq2[::-1], freq2)) 1073 n2 = np.concatenate((np.conjugate(n2[::-1]), n2)) 1074 1075 # interpolate the n1 and n2 to the frequency axis of the fft 1076 n1Interp = np.interp(self.frequencyAxis, freq1, n1) 1077 n2Interp = np.interp(self.frequencyAxis, freq2, n2) 1078 1079 # calculate the fresnel reflection coefficients using only the angle of incidence (NOT SURE THIS WORKS WHEN THE FIRST MEDIUM IS LOSSY) 1080 r = ( 1081 n1Interp * np.cos(angle_in * np.pi / 180) 1082 - n2Interp 1083 * np.sqrt(1 - (n1Interp / n2Interp * np.sin(angle_in * np.pi / 180)) ** 2) 1084 ) / ( 1085 n1Interp * np.cos(angle_in * np.pi / 180) 1086 + n2Interp 1087 * np.sqrt(1 - (n1Interp / n2Interp * np.sin(angle_in * np.pi / 180)) ** 2) 1088 ) 1089 if forward: 1090 self.fftFieldV = self.fftFieldV * r 1091 else: 1092 self.fftFieldV = self.fftFieldV / r 1093 1094 self.fieldStdevV = None 1095 1096 self.update_trace_from_fft() 1097 self.strip_from_trace() 1098 self.update_fft_spectrum() 1099 1100 def apply_zero_phase(self): 1101 """Applies zero-phase to the trace; this allows, for example, to retrieve the fourier limited pulse corresponding to the same FFT spectrum of the loaded trace""" 1102 tau = ( 1103 self.fsZeroPadding + (np.max(self.fieldTimeV) - np.min(self.fieldTimeV)) / 2 1104 ) 1105 self.fftFieldV = np.abs(self.fftFieldV) * np.exp( 1106 1.0j * (2 * np.pi * tau * self.frequencyAxis) 1107 ) 1108 self.update_trace_from_fft() 1109 self.strip_from_trace() 1110 self.update_fft_spectrum() 1111 1112 def time_frequency_analysis( 1113 self, sigma_time, low_lim=None, up_lim=None, low_lim_freq=None, up_lim_freq=None 1114 ): 1115 """Performs time-frequency analysis by using scipy's short time fourier transform (fourier transform of the trace convoluted by a 'sigma_time' broad gaussian). 1116 1117 Args: 1118 sigma_time: sigma of the gaussian window 1119 low_lim, up_lim (float): xaxis limits for plotting. Default None 1120 low_lim_freq, up_lim_freq (float): xaxis limits for plotting. Default None 1121 1122 """ 1123 dt = np.mean(np.diff(self.fieldTimeV)) 1124 w = scipy.signal.windows.gaussian( 1125 int(sigma_time / dt * 6) + 1, sigma_time / dt, sym=True 1126 ) 1127 TFA = scipy.signal.ShortTimeFFT( 1128 w, hop=1, fs=1.0 / dt, mfft=int(sigma_time / dt * 24), scale_to="magnitude" 1129 ) 1130 TFData = TFA.stft(self.fieldV) 1131 1132 fig, ax = plt.subplots() 1133 t_lo, t_hi, f_lo, f_hi = TFA.extent( 1134 self.fieldV.size 1135 ) # time and freq range of plot 1136 if low_lim is None: 1137 low_lim = t_lo + self.fieldTimeV[0] 1138 if up_lim is None: 1139 up_lim = t_hi + self.fieldTimeV[0] 1140 if low_lim_freq is None: 1141 low_lim_freq = 0 1142 if up_lim_freq is None: 1143 up_lim_freq = 3.0 1144 1145 ax.set(xlim=(low_lim, up_lim), ylim=(low_lim_freq, up_lim_freq)) 1146 ax.set_xlabel("Time (fs)") 1147 ax.set_ylabel("Frequency (PHz)") 1148 im1 = ax.imshow( 1149 abs(TFData) / np.max(abs(TFData)), 1150 origin="lower", 1151 aspect="auto", 1152 extent=(t_lo + self.fieldTimeV[0], t_hi + self.fieldTimeV[0], f_lo, f_hi), 1153 cmap="viridis", 1154 ) 1155 cbar = fig.colorbar(im1) 1156 1157 cbar.ax.set_ylabel("Magnitude of the field (Arb. unit)") 1158 fig.tight_layout() 1159 return TFData 1160 1161 def plot_trace(self, low_lim=None, up_lim=None, normalize: bool = True): 1162 """Plots the field trace. 1163 1164 Args: 1165 low_lim, up_lim: float = xaxis limits for plotting. Default None 1166 normalize: bool = if True (default) normalize the peak of the trace to 1 1167 """ 1168 fig, ax = plt.subplots() 1169 if normalize: 1170 norm_plot = self.normalization_trace 1171 else: 1172 norm_plot = 1 1173 main_line = ax.plot( 1174 self.fieldTimeV, self.fieldV / norm_plot, label="Field trace" 1175 ) 1176 if self.fieldStdevV is not None: 1177 ax.fill_between( 1178 self.fieldTimeV, 1179 (self.fieldV - self.fieldStdevV) / norm_plot, 1180 (self.fieldV + self.fieldStdevV) / norm_plot, 1181 color=main_line[0].get_color(), 1182 alpha=0.3, 1183 ) 1184 ax.set_xlabel("Time (fs)") 1185 ax.set_ylabel("Field (Arb. unit)") 1186 1187 if low_lim is not None and up_lim is not None: 1188 ax.set_xlim(low_lim, up_lim) 1189 return fig 1190 1191 def plot_spectrum( 1192 self, low_lim=40, up_lim=1000, no_phase: bool = False, phase_blanking_level=0.05 1193 ): 1194 """Plots the trace spectrum and phase together with the spectrometer measurement [if provided]. 1195 1196 Args: 1197 low_lim, up_lim (float): xaxis limits for plotting. Default: 40, 1000 1198 no_phase: if True, don't plot the phase. Default: False 1199 """ 1200 fig, ax = plt.subplots() 1201 1202 if not no_phase: 1203 ax2 = ax.twinx() 1204 lines = [] 1205 min_intensity = phase_blanking_level * np.max(self.fftSpectrum) 1206 lines += ax.plot( 1207 self.wvlAxis[(self.wvlAxis > low_lim) & (self.wvlAxis < up_lim)], 1208 self.fftSpectrum[(self.wvlAxis > low_lim) & (self.wvlAxis < up_lim)], 1209 label="Fourier transform", 1210 ) 1211 if not no_phase: 1212 ax2.plot([], []) 1213 if self.wvlSpectrometer is not None: 1214 ax2.plot([], []) 1215 lines += ax2.plot( 1216 self.wvlAxis[ 1217 (self.wvlAxis > low_lim) 1218 & (self.wvlAxis < up_lim) 1219 & (self.fftSpectrum > min_intensity) 1220 ], 1221 self.fftphase[ 1222 (self.wvlAxis > low_lim) 1223 & (self.wvlAxis < up_lim) 1224 & (self.fftSpectrum > min_intensity) 1225 ], 1226 "--", 1227 label="Phase", 1228 ) 1229 if self.wvlSpectrometer is not None: 1230 lines += ax.plot( 1231 self.wvlSpectrometer[ 1232 (self.wvlSpectrometer > low_lim) & (self.wvlSpectrometer < up_lim) 1233 ], 1234 self.ISpectrometer[ 1235 (self.wvlSpectrometer > low_lim) & (self.wvlSpectrometer < up_lim) 1236 ], 1237 label="Spectrometer", 1238 ) 1239 ax.set_xlabel("Wavelength (nm)") 1240 ax.set_ylabel("Intensity (Arb. unit)") 1241 ax.tick_params(axis="both") 1242 if not no_phase: 1243 ax2.set_ylabel("Phase (rad)") 1244 ax2.tick_params(axis="y") 1245 ax.legend(lines, [l.get_label() for l in lines]) 1246 return fig
Loads and stores the trace (field vs time) saved to file (two columns, tab separated) or given in the form of time and field [or wavelengths, spectrum and phase] arrays.
ALL THE TIMES ARE IN fs, FREQUENCIES IN PHz, WAVELENGTHS IN nm.
Most of the class methods modify the object data in place, and don't return anything.
Attributes:
- fieldTimeV: (WAVEFORM DATA in the time domain) time array (fs)
- fieldV: (WAVEFORM DATA in the time domain) field array
- fieldStdevV: (WAVEFORM DATA in the time domain) field standard deviation (or sigma) array
- frequencyAxis: (WAVEFORM DATA in the frequency domain) frequency axis of the FFT in PHz (should be np.fft compatible, i.e., equally spaced, in the form [0, df, 2*df, ..., fmax, -fmax[-df], ..., -df])
- fftFieldV: (WAVEFORM DATA in the frequency domain) FFT field (complex)
- complexFieldTimeV: (WAVEFORM DATA in the frequency domain) time axis of the complex field (fs)
- complexFieldV: (WAVEFORM DATA in the frequency domain) complex field ( = IFFT{FFT(ω)θ(ω)}(t), where θ(ω) = 1 if ω >= 0, θ(ω) = 0 otherwise )
- wvlAxis: (WAVEFORM DATA in the frequency domain) wavelength axis for the FFT spectrum (nm)
- fftSpectrum: (WAVEFORM DATA in the frequency domain) wavelength-dependent positive FFT spectrum ( = |FFT{field}|^2 * df/dλ, λ > 0 )
- fftphase: (WAVEFORM DATA in the frequency domain) wavelength-dependent spectral phase
- wvlSpectrometer: (SPECTROMETER DATA) wavelength (nm)
- ISpectrometer: (SPECTROMETER DATA) spectral intensity
- fsZeroPadding: (= 150 by default) time span in fs for zero padding (appending zeroes both before and after the trace, in order to get a smoother fft)
- filename: filename to load the trace from
- filename_spectrum: filename to load the spectrum from
- normalization_trace: normalization factor for the trace
- zero_delay: time zero, corresponds to the maximum envelope
159 def __init__( 160 self, 161 filename=None, 162 filename_spectrum=None, 163 time=None, 164 field=None, 165 stdev=None, 166 wvl=None, 167 spectrum=None, 168 wvl_FFT_trace=None, 169 spectrum_FFT_trace=None, 170 phase_FFT_trace=None, 171 ): 172 """Constructor; loads the trace from file (or from time-field or wavelegth-spectrum-phase arrays) and stores it in the class. 173 174 One of the following must be provided (as a default all args are None): 175 * filename: the name of the file containing the trace (two columns, tab separated) 176 * time and field: the time vector (fs) and electric field vector (a.u.) 177 * wvl_FFT_trace, spectrum_FFT_trace and phase_FFT_trace: the wavelength (nm) spectrum (a.u.) and phase (rad) vectors 178 179 Args: 180 filename (str): the name of the file containing the trace (two columns - time and field, tab separated) 181 filename_spectrum (str): the name of the file containing the spectrum (two columns - wavelength and spectral intensity, tab separated) 182 this does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare to the loaded trace 183 time (array): the time vector (fs) 184 field (array): the electric field vector (a.u.) 185 stdev (array): the standard deviation (or sigma) of the electric field vector (a.u.) 186 wvl (array): the wavelength vector (nm) (spectrometer) 187 spectrum (array): the spectral intensity vector (a.u.) (spectrometer) 188 wvl_FFT_trace (array): the wavelength vector (nm) of the FFT trace (to reconstruct a trace from spectral data) 189 spectrum_FFT_trace (array): the spectral intensity vector (a.u.) of the FFT trace (to reconstruct a trace from spectral data) 190 phase_FFT_trace (array): the spectral phase vector (rad) of the FFT trace (to reconstruct a trace from spectral data) 191 """ 192 193 self.fsZeroPadding = 150 194 self.filename = filename 195 self.filename_spectrum = filename_spectrum 196 197 self.fieldTimeV = None 198 self.fieldV = None 199 self.fieldStdevV = None 200 201 self.frequencyAxis = None 202 self.fftFieldV = None 203 self.complexFieldTimeV = None 204 self.complexFieldV = None 205 self.wvlAxis = None 206 self.fftSpectrum = None 207 self.fftphase = None 208 209 self.wvlSpectrometer = None 210 self.ISpectrometer = None 211 212 self.normalization_trace = None 213 self.zero_delay = None 214 215 if filename is not None: 216 self.load_trace() 217 elif time is not None and field is not None: 218 self.load_trace_from_arrays(time, field, stdev) 219 elif ( 220 wvl_FFT_trace is not None 221 and spectrum_FFT_trace is not None 222 and phase_FFT_trace is not None 223 ): 224 self.load_trace_from_spectral_data( 225 wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace 226 ) 227 else: 228 print( 229 "\n\nWARNING: in function TraceHandler.__init__() no data was loaded\n\n" 230 ) 231 232 if self.filename_spectrum is not None: 233 self.load_spectrum() 234 elif wvl is not None and spectrum is not None: 235 self.load_spectrum_from_arrays(wvl, spectrum)
Constructor; loads the trace from file (or from time-field or wavelegth-spectrum-phase arrays) and stores it in the class.
One of the following must be provided (as a default all args are None):
- filename: the name of the file containing the trace (two columns, tab separated)
- time and field: the time vector (fs) and electric field vector (a.u.)
- wvl_FFT_trace, spectrum_FFT_trace and phase_FFT_trace: the wavelength (nm) spectrum (a.u.) and phase (rad) vectors
Arguments:
- filename (str): the name of the file containing the trace (two columns - time and field, tab separated)
- filename_spectrum (str): the name of the file containing the spectrum (two columns - wavelength and spectral intensity, tab separated) this does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare to the loaded trace
- time (array): the time vector (fs)
- field (array): the electric field vector (a.u.)
- stdev (array): the standard deviation (or sigma) of the electric field vector (a.u.)
- wvl (array): the wavelength vector (nm) (spectrometer)
- spectrum (array): the spectral intensity vector (a.u.) (spectrometer)
- wvl_FFT_trace (array): the wavelength vector (nm) of the FFT trace (to reconstruct a trace from spectral data)
- spectrum_FFT_trace (array): the spectral intensity vector (a.u.) of the FFT trace (to reconstruct a trace from spectral data)
- phase_FFT_trace (array): the spectral phase vector (rad) of the FFT trace (to reconstruct a trace from spectral data)
237 def load_spectrum_from_arrays(self, wvl, spectrum): 238 """loads the spectrum from wavelength and spectrum arrays and stores it in the class. 239 240 The spectrum does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare with the loaded trace 241 242 Args: 243 wvl: wavelength array (nm) 244 spectrum: spectral intensity array 245 """ 246 """wavelength is in nm""" 247 check_equal_length(wvl, spectrum) 248 self.wvlSpectrometer = np.array(wvl) 249 self.ISpectrometer = np.array(spectrum)
loads the spectrum from wavelength and spectrum arrays and stores it in the class.
The spectrum does not correspond to the fourier transform of the trace, but to some spectrometer data that you would like to compare with the loaded trace
Arguments:
- wvl: wavelength array (nm)
- spectrum: spectral intensity array
251 def load_trace_from_arrays(self, fieldTimeV, fieldV, fieldStdevV=None): 252 """loads the trace from time and field arrays and stores it in the class. 253 254 Args: 255 fieldTimeV: time array (fs) 256 fieldV: field array 257 fieldStdevV: standard deviation - or sigma - to be associated to the trace (optional, default = None) 258 """ 259 if fieldTimeV is None or fieldV is None: 260 raise ValueError( 261 "\nin function TraceHandler.load_from_arrays() too many arguments were None\n" 262 "it might be that the constructor was erroneously used with no or too few arguments\n" 263 ) 264 if np.max(np.abs(fieldTimeV)) < 1.0: 265 print( 266 "\n\nWARNING: do you remember that the time axis of TraceHandler is in fs?\n\n" 267 ) 268 self.fieldTimeV = np.array(fieldTimeV) 269 self.fieldV = np.array(fieldV) 270 if fieldStdevV is not None: 271 self.fieldStdevV = np.array(fieldStdevV) 272 self.normalization_trace = np.max(np.abs(self.fieldV)) 273 self.update_fft()
loads the trace from time and field arrays and stores it in the class.
Arguments:
- fieldTimeV: time array (fs)
- fieldV: field array
- fieldStdevV: standard deviation - or sigma - to be associated to the trace (optional, default = None)
275 def load_trace(self, fname=None): 276 """loads from file""" 277 if fname is not None: 278 self.filename = fname 279 data = pandas.read_csv(self.filename, sep="\t") 280 self.fieldTimeV = data["delay (fs)"].to_numpy() 281 self.fieldV = data["field (a.u.)"].to_numpy() 282 self.fieldStdevV = data["stdev field"].to_numpy() 283 self.normalization_trace = np.max(np.abs(self.fieldV)) 284 self.update_fft()
loads from file
286 def load_trace_from_spectral_data( 287 self, wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace 288 ): 289 """loads the trace from wavelength, spectrum, and spectral phase arrays and stores it in the class. 290 291 This function has a different job than load_spectrum_from_arrays(): in fact, it retrieves the trace from the given spectral data via fourier transform. 292 293 Args: 294 wvl_FFT_trace: wavelength array in nm 295 spectrum_FFT_trace: (wavelength-)spectral intensity array. This is assumed to be |FFT{field}|^2 * df/dλ 296 phase_FFT_trace: spectral phase in rad 297 """ 298 check_equal_length(wvl_FFT_trace, spectrum_FFT_trace, phase_FFT_trace) 299 300 # check that the wvl_FFT_trace array is monotonically increasing 301 if np.any(np.diff(wvl_FFT_trace) < 0): 302 if np.all(np.diff(wvl_FFT_trace) < 0): 303 wvl_FFT_trace = wvl_FFT_trace[::-1] 304 spectrum_FFT_trace = spectrum_FFT_trace[::-1] 305 phase_FFT_trace = phase_FFT_trace[::-1] 306 else: 307 raise ValueError( 308 "in function TraceHandler.load_from_spectral_data() wavelength array is not monotonous" 309 ) 310 311 # frequency spectrum (monotonically increasing) 312 freq = constants.speed_of_light / wvl_FFT_trace[::-1] * 1e-6 313 spectrum_freq = spectrum_FFT_trace[::-1] / ( 314 constants.speed_of_light / wvl_FFT_trace[::-1] ** 2 * 1e-6 315 ) 316 phase_FFT_trace = phase_FFT_trace[::-1] 317 318 # create proper freq axis for the fourier transform 319 df = (freq[1] - freq[0]) / 4 320 nf = int(np.ceil(3 * freq[-1] / df)) 321 self.frequencyAxis = np.concatenate((np.arange(0, nf), np.arange(-nf, 0))) * df 322 # corresponding time window 323 twin = 1.0 / (df) 324 325 # complex fourier transform 326 spectrum_freq = np.sqrt(spectrum_freq) * np.exp(1.0j * phase_FFT_trace) 327 328 # fill with zeros 329 initial_zero_freq = np.linspace( 330 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 331 ) 332 initial_zeros = np.zeros(len(initial_zero_freq)) 333 final_zero_freq = np.linspace( 334 2 * freq[-1] - freq[-2], 335 4 * freq[-1], 336 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 337 ) 338 final_zeros = np.zeros(len(final_zero_freq)) 339 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 340 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 341 # add negative frequencies 342 freq = np.concatenate((-freq[::-1], freq)) 343 spectrum_freq = np.concatenate( 344 (np.conjugate(spectrum_freq[::-1]), spectrum_freq) 345 ) 346 347 # interpolate the spectrum to the frequency axis of the fft 348 self.fftFieldV = np.interp(self.frequencyAxis, freq, spectrum_freq) 349 350 # add linear phase to center the pulse in the time window; this has to be done after interpolation because the spectral phase will be fast oscillating 351 self.fftFieldV = self.fftFieldV * np.exp( 352 1.0j * 2 * np.pi * self.frequencyAxis * twin / 2 353 ) 354 355 self.update_fft_spectrum() 356 self.update_trace_from_fft()
loads the trace from wavelength, spectrum, and spectral phase arrays and stores it in the class.
This function has a different job than load_spectrum_from_arrays(): in fact, it retrieves the trace from the given spectral data via fourier transform.
Arguments:
- wvl_FFT_trace: wavelength array in nm
- spectrum_FFT_trace: (wavelength-)spectral intensity array. This is assumed to be |FFT{field}|^2 * df/dλ
- phase_FFT_trace: spectral phase in rad
358 def load_spectrum(self, fname=None): 359 """loads spectrum from file. 360 361 The spectrum does not correspond to the fourier transform of the trace, but to the spectrometer data that you would like to compare with the loaded trace 362 363 Args: 364 fname: filename 365 """ 366 if fname is not None: 367 self.filename_spectrum = fname 368 data = pandas.read_csv(self.filename_spectrum, sep="\t") 369 self.wvlSpectrometer = data["wavelength (nm)"].to_numpy() 370 self.ISpectrometer = data["intensity (a.u.)"].to_numpy() 371 self.normalize_spectrum()
loads spectrum from file.
The spectrum does not correspond to the fourier transform of the trace, but to the spectrometer data that you would like to compare with the loaded trace
Arguments:
- fname: filename
373 def update_fft(self, zero_pad_field=True): 374 """updates the fft of the trace from the time domain data. 375 376 Args: 377 zero_pad_field: bool; if true (default) add zeros before and after the trace before computing fft (for a smoother spectrum) 378 The length of appended zeros is defined by self.fsZeroPadding. 379 """ 380 zp_fieldTimeV, zp_fieldV = deepcopy(self.fieldTimeV), deepcopy(self.fieldV) 381 if zero_pad_field: 382 zp_fieldTimeV, zp_fieldV = zero_padding( 383 self.fieldTimeV, self.fieldV, fsExtension=self.fsZeroPadding 384 ) 385 self.frequencyAxis, self.fftFieldV = fourier_transform(zp_fieldTimeV, zp_fieldV) 386 self.update_fft_spectrum()
updates the fft of the trace from the time domain data.
Arguments:
- zero_pad_field: bool; if true (default) add zeros before and after the trace before computing fft (for a smoother spectrum) The length of appended zeros is defined by self.fsZeroPadding.
388 def update_fft_spectrum(self): 389 """Updates the wavelength-dependent intensity spectrum. Results are stored in self.wvlAxis and self.fftSpectrum. 390 391 This is (derived from but) different from the fourier transform because: 392 * it considers the squared modulus of the field 393 * it multiplies the frequency-dependent spectral density by df/dλ = c/λ² 394 * it considers only positive wavelengths 395 396 The function internally calls update_spectral_phase() as well. 397 """ 398 self.wvlAxis = ( 399 constants.speed_of_light 400 / self.frequencyAxis[self.frequencyAxis > 0] 401 * 1.0e-6 402 ) 403 self.fftSpectrum = ( 404 np.abs(self.fftFieldV[self.frequencyAxis > 0]) ** 2 405 * constants.speed_of_light 406 / self.wvlAxis**2 407 ) 408 self.compute_complex_field() 409 self.update_spectral_phase() 410 self.normalize_fft_spectrum()
Updates the wavelength-dependent intensity spectrum. Results are stored in self.wvlAxis and self.fftSpectrum.
This is (derived from but) different from the fourier transform because:
- it considers the squared modulus of the field
- it multiplies the frequency-dependent spectral density by df/dλ = c/λ²
- it considers only positive wavelengths
The function internally calls update_spectral_phase() as well.
412 def update_spectral_phase(self): 413 """computes the spectral phase of the trace. The result is stored in self.fftphase. 414 415 An attempt of automatic linear phase subtraction is made. Phase is unwrapped. 416 """ 417 # determine the time distance (delay) of the waveform peak from the beginning of the trace (more precisely, from the beginning of the ifft) 418 tmax = self.get_zero_delay() 419 deltaT1 = tmax - self.complexFieldTimeV[0] 420 deltaT2 = self.complexFieldTimeV[-1] - tmax 421 ifft_twindow = 1 / (self.frequencyAxis[1] - self.frequencyAxis[0]) 422 total_delay = ( 423 ifft_twindow / 2 + (deltaT2 - deltaT1) / 2 424 ) # total estimated delay of the peak from the beginning of the ifft trace 425 426 # compute the phase, use the computed 'delay' to correct for the non-interesting linear component, and unwrap it 427 self.fftphase = np.angle(self.fftFieldV[self.frequencyAxis > 0]) 428 self.fftphase = ( 429 self.fftphase 430 - total_delay * self.frequencyAxis[self.frequencyAxis > 0] * 2 * np.pi 431 ) 432 self.fftphase = np.unwrap(self.fftphase) 433 434 # subtract constant phase offset (m * 2*Ï€) 435 n_pi = np.sum(self.fftSpectrum * self.fftphase) / np.sum(self.fftSpectrum) 436 n_pi = int(n_pi / (2 * np.pi)) 437 self.fftphase -= n_pi * 2 * np.pi
computes the spectral phase of the trace. The result is stored in self.fftphase.
An attempt of automatic linear phase subtraction is made. Phase is unwrapped.
439 def update_trace_from_fft(self): # remember to use together with strip_from_trace 440 # careful: the complex part of the IFFT is discarded 441 """Updates the field trace from fft 442 443 In most cases, you might want to call strip_from_trace() afterward, to eliminate the zero-padding from the trace. 444 """ 445 self.fieldTimeV, self.fieldV = inverse_fourier_transform( 446 self.frequencyAxis, self.fftFieldV 447 ) 448 self.fieldV = np.real(self.fieldV) 449 self.normalization_trace = np.max(np.abs(self.fieldV)) 450 self.get_zero_delay()
Updates the field trace from fft
In most cases, you might want to call strip_from_trace() afterward, to eliminate the zero-padding from the trace.
452 def strip_from_trace(self, timeRange=None): 453 """eliminates the zeros appended to the trace (zero-padding) when computing the FFT. 454 455 Useful when retrieving the trace via ifft (method update_trace_from_fft()). 456 457 Args: 458 timeRange (float): the time range (fs) to strip from the beginning and end of the trace. If None (default), fsZeroPadding is used. 459 """ 460 if timeRange is None: 461 timeRange = self.fsZeroPadding 462 # strip the first and last fsZeroPadding time points 463 self.fieldV = self.fieldV[ 464 (self.fieldTimeV >= np.min(self.fieldTimeV) + timeRange) 465 & (self.fieldTimeV <= np.max(self.fieldTimeV) - timeRange) 466 ] 467 self.fieldTimeV = self.fieldTimeV[ 468 (self.fieldTimeV >= np.min(self.fieldTimeV) + timeRange) 469 & (self.fieldTimeV <= np.max(self.fieldTimeV) - timeRange) 470 ] 471 if self.fieldStdevV is None: 472 return True 473 if len(self.fieldStdevV) > len(self.fieldV): 474 n_remove = len(self.fieldStdevV) - len(self.fieldV) 475 print( 476 "\n\nWarning: fieldStdevV is longer than fieldV, removing", 477 n_remove, 478 " elements\n\n", 479 ) 480 while n_remove > 0: 481 if n_remove % 2 == 0: 482 self.fieldStdevV = np.delete(self.fieldStdevV, 0) 483 else: 484 self.fieldStdevV = np.delete(self.fieldStdevV, -1) 485 n_remove -= 1 486 elif len(self.fieldStdevV) < len(self.fieldV): 487 n_add = len(self.fieldV) - len(self.fieldStdevV) 488 print( 489 "\n\nWarning: fieldStdevV is shorter than fieldV, adding", 490 n_add, 491 " elements\n\n", 492 ) 493 while n_add > 0: 494 if n_add % 2 == 0: 495 self.fieldStdevV = np.insert(self.fieldStdevV, 0, 0) 496 else: 497 self.fieldStdevV = np.append(self.fieldStdevV, 0) 498 n_add -= 1 499 return True
eliminates the zeros appended to the trace (zero-padding) when computing the FFT.
Useful when retrieving the trace via ifft (method update_trace_from_fft()).
Arguments:
- timeRange (float): the time range (fs) to strip from the beginning and end of the trace. If None (default), fsZeroPadding is used.
501 def strip_from_complex_trace(self, timeRange=None): 502 """from the complex trace eliminates the zeros appended (zero-padding) when computing the FFT (Analogous to strip_from_trace()).""" 503 if timeRange is None: 504 timeRange = self.fsZeroPadding 505 # strip the first and last fsZeroPadding time points 506 self.complexFieldV = self.complexFieldV[ 507 (self.complexFieldTimeV >= np.min(self.complexFieldTimeV) + timeRange) 508 & (self.complexFieldTimeV <= np.max(self.complexFieldTimeV) - timeRange) 509 ] 510 self.complexFieldTimeV = self.complexFieldTimeV[ 511 (self.complexFieldTimeV >= np.min(self.complexFieldTimeV) + timeRange) 512 & (self.complexFieldTimeV <= np.max(self.complexFieldTimeV) - timeRange) 513 ]
from the complex trace eliminates the zeros appended (zero-padding) when computing the FFT (Analogous to strip_from_trace()).
515 def tukey_time_window(self, lowEdge, upEdge, lowEdgeWidth, upEdgeWidth): 516 """applies a tukey window to the trace in the time domain. 517 518 Args: 519 lowEdge, upEdge: lower and upper edge of the window upEdge-lowEdge = FWHM of the window 520 lowEdgeWidth, upEdgeWidth: width of the cosine-shaped edges (from 0 to 1 or viceversa) 521 """ 522 window = asymmetric_tukey_window( 523 np.abs(self.fieldTimeV), lowEdge, upEdge, lowEdgeWidth, upEdgeWidth 524 ) 525 self.fieldV = self.fieldV * window 526 self.update_fft()
applies a tukey window to the trace in the time domain.
Arguments:
- lowEdge, upEdge: lower and upper edge of the window upEdge-lowEdge = FWHM of the window
- lowEdgeWidth, upEdgeWidth: width of the cosine-shaped edges (from 0 to 1 or viceversa)
528 def fourier_interpolation(self, ntimes_finer: int): 529 """Shrinks the time step of the trace in time domain by a factor ntimes_finer. 530 531 This is done by 'zero-padding' the fourier transform of the trace, i.e., by extending the frequency axis by a factor ntimes_finer, and adding corresponding zeros to the FFT field. 532 533 Args: 534 ntimes_finer (int): the factor by which to shrink the time step of the trace in time domain. Must be >= 1. 535 """ 536 if ntimes_finer < 1: 537 raise ValueError( 538 "in function TraceHandler.fourier_interpolation() ntimes_finer must be >= 1" 539 ) 540 if self.frequencyAxis is None or self.fftFieldV is None: 541 raise ValueError( 542 "in function TraceHandler.fourier_interpolation() frequency axis or fft field is not defined" 543 ) 544 # extend the frequency axis by a factor ntimes_finer 545 df = self.frequencyAxis[1] - self.frequencyAxis[0] 546 i_last_pos = int(np.ceil(len(self.frequencyAxis) / 2) - 1) 547 if self.frequencyAxis[i_last_pos] < 0 or self.frequencyAxis[i_last_pos + 1] > 0: 548 print(self.frequencyAxis[0 : i_last_pos + 1]) 549 print(self.frequencyAxis[i_last_pos + 1 :]) 550 raise Exception( 551 "in function fourier_interpolation() frequency axes was not extended correctly" 552 ) 553 554 appended_freqFFT = np.linspace( 555 self.frequencyAxis[i_last_pos] + df, 556 ntimes_finer * self.frequencyAxis[i_last_pos] + df, 557 round((ntimes_finer - 1) * self.frequencyAxis[i_last_pos] / df), 558 ) 559 prepended_freqFFT = np.linspace( 560 -(ntimes_finer - 1) * self.frequencyAxis[i_last_pos] 561 + self.frequencyAxis[i_last_pos + 1], 562 +self.frequencyAxis[i_last_pos + 1], 563 round((ntimes_finer - 1) * self.frequencyAxis[i_last_pos] / df), 564 ) 565 self.frequencyAxis = np.concatenate( 566 ( 567 self.frequencyAxis[: i_last_pos + 1], 568 appended_freqFFT, 569 prepended_freqFFT, 570 self.frequencyAxis[i_last_pos + 1 :], 571 ) 572 ) 573 self.fftFieldV = np.concatenate( 574 ( 575 self.fftFieldV[: i_last_pos + 1], 576 np.zeros(len(appended_freqFFT)), 577 np.zeros(len(prepended_freqFFT)), 578 self.fftFieldV[i_last_pos + 1 :], 579 ) 580 ) 581 582 self.update_trace_from_fft() 583 self.strip_from_trace() 584 self.update_fft_spectrum()
Shrinks the time step of the trace in time domain by a factor ntimes_finer.
This is done by 'zero-padding' the fourier transform of the trace, i.e., by extending the frequency axis by a factor ntimes_finer, and adding corresponding zeros to the FFT field.
Arguments:
- ntimes_finer (int): the factor by which to shrink the time step of the trace in time domain. Must be >= 1.
586 def differentiate_trace(self, spectrally=True): 587 """computes the time derivative of the trace. 588 589 Args: 590 spectrally: bool; if True (default) the derivative will be computed in the spectral domain (maybe more stable numerically) 591 """ 592 if spectrally: 593 self.fftFieldV = 1j * self.frequencyAxis * self.fftFieldV 594 self.update_trace_from_fft() 595 self.update_fft_spectrum() 596 else: 597 self.fieldV = np.gradient(self.fieldV, self.fieldTimeV) 598 self.update_fft()
computes the time derivative of the trace.
Arguments:
- spectrally: bool; if True (default) the derivative will be computed in the spectral domain (maybe more stable numerically)
600 def integrate_trace(self, spectrally=True): 601 """integrates the trace in time. 602 603 Args: 604 spectrally (bool): if True (default) the integral will be computed in the spectral domain (maybe more stable numerically) 605 """ 606 if spectrally: 607 if self.frequencyAxis[0] != 0: 608 print( 609 "WARNING: in function TraceHandler.integrate_trace() the FFT axis' first element is not zero" 610 ) 611 self.fftFieldV[1:] = -1j / self.frequencyAxis[1:] * self.fftFieldV[1:] 612 self.update_trace_from_fft() 613 self.update_fft_spectrum() 614 else: 615 self.fieldV = np.cumsum(self.fieldV) * np.mean(np.diff(self.fieldTimeV)) 616 self.update_fft()
integrates the trace in time.
Arguments:
- spectrally (bool): if True (default) the integral will be computed in the spectral domain (maybe more stable numerically)
618 def save_trace_to_file(self, filename, low_lim=None, up_lim=None): 619 """Saves the trace to file 620 621 Args: 622 filename 623 low_lim, up_lim: only save the trace between low_lim and up_lim (default: None, None) 624 """ 625 if low_lim is not None and up_lim is not None: 626 fieldTimeV_to_write = self.fieldTimeV[ 627 (self.fieldTimeV >= low_lim) & (self.fieldTimeV <= up_lim) 628 ] 629 fieldV_to_write = self.fieldV[ 630 (self.fieldTimeV >= low_lim) & (self.fieldTimeV <= up_lim) 631 ] 632 else: 633 fieldTimeV_to_write = self.fieldTimeV 634 fieldV_to_write = self.fieldV 635 data = pandas.DataFrame( 636 {"delay (fs)": fieldTimeV_to_write, "field (a.u.)": fieldV_to_write} 637 ) 638 data.to_csv(filename, sep="\t", index=False)
Saves the trace to file
Arguments:
- filename
- low_lim, up_lim: only save the trace between low_lim and up_lim (default: None, None)
640 def normalize_spectrum(self): 641 """Normalizes the comparison spectrum to its integral.""" 642 spectrum_range = [60, 930] 643 n_factor = ( 644 np.abs( 645 np.diff(self.wvlSpectrometer)[ 646 (self.wvlSpectrometer[:-1] > spectrum_range[0]) 647 & (self.wvlSpectrometer[:-1] < spectrum_range[1]) 648 ] 649 ) 650 * self.ISpectrometer[:-1][ 651 (self.wvlSpectrometer[:-1] > spectrum_range[0]) 652 & (self.wvlSpectrometer[:-1] < spectrum_range[1]) 653 ] 654 ).sum() 655 self.ISpectrometer /= n_factor
Normalizes the comparison spectrum to its integral.
657 def normalize_fft_spectrum(self, spectrum_range=[60, 930]): 658 """Normalizes the spectrum of the trace (|FFT{trace}|^2 * df/dλ) to its integral (area) 659 660 Args: 661 spectrum_range (list): the range of wavelengths (nm) to consider for the normalization. Default is [60, 930] nm. 662 """ 663 n_factor = ( 664 np.abs( 665 np.diff(self.wvlAxis)[ 666 (self.wvlAxis[:-1] > spectrum_range[0]) 667 & (self.wvlAxis[:-1] < spectrum_range[1]) 668 ] 669 ) 670 * self.fftSpectrum[:-1][ 671 (self.wvlAxis[:-1] > spectrum_range[0]) 672 & (self.wvlAxis[:-1] < spectrum_range[1]) 673 ] 674 ).sum() 675 self.fftSpectrum /= n_factor
Normalizes the spectrum of the trace (|FFT{trace}|^2 * df/dλ) to its integral (area)
Arguments:
- spectrum_range (list): the range of wavelengths (nm) to consider for the normalization. Default is [60, 930] nm.
677 def get_trace(self): 678 """Returns the time (fs) and field arrays. Field is in a.u. unless set_fluence() was called or a calibrated field was given as an input.""" 679 return self.fieldTimeV, self.fieldV
Returns the time (fs) and field arrays. Field is in a.u. unless set_fluence() was called or a calibrated field was given as an input.
681 def get_spectrum_trace(self): 682 """Returns the wavelength and spectral intensity corresponding to the fourier transform of the trace.""" 683 return self.wvlAxis, self.fftSpectrum
Returns the wavelength and spectral intensity corresponding to the fourier transform of the trace.
685 def get_spectral_phase(self): 686 """Returns the wavelength array and the spectral phase array corresponding to the FFT of the trace.""" 687 return self.wvlAxis, self.fftphase
Returns the wavelength array and the spectral phase array corresponding to the FFT of the trace.
689 def get_stdev(self): 690 """Returns only the standard deviation (sigma) array""" 691 return self.fieldStdevV
Returns only the standard deviation (sigma) array
693 def get_positive_fft_field(self): 694 """Returns the positive frequency axis f and the corresponding FFT field (complex). 695 696 Main task of this function is to 'smooth' the phase of the fft (= remove linear phase, corresponding to a time shift in the temporal domain), since the waveform is usually centered around t = 0""" 697 if self.frequencyAxis is None or self.fftFieldV is None: 698 raise ValueError( 699 "in function TraceHandler.get_positive_fft_field() frequency axis or fft field is not defined" 700 ) 701 positive_freq = self.frequencyAxis[self.frequencyAxis >= 0] 702 positive_fft_field = self.fftFieldV[self.frequencyAxis >= 0] 703 # remove linear phase 704 tmax = self.get_zero_delay() 705 deltaT1 = tmax - self.fieldTimeV[0] 706 deltaT2 = self.fieldTimeV[-1] - tmax 707 ifft_twindow = 1 / (self.frequencyAxis[1] - self.frequencyAxis[0]) 708 total_delay = ifft_twindow / 2 + (deltaT2 - deltaT1) / 2 709 positive_fft_field = positive_fft_field * np.exp( 710 -1.0j * 2 * np.pi * positive_freq * total_delay 711 ) 712 return positive_freq, positive_fft_field
Returns the positive frequency axis f and the corresponding FFT field (complex).
Main task of this function is to 'smooth' the phase of the fft (= remove linear phase, corresponding to a time shift in the temporal domain), since the waveform is usually centered around t = 0
714 def get_fluence(self): 715 """Calculates the fluence of the trace. 716 717 convention: field [V/Å], time [fs], fluence [J/cm²] 718 F = c eps_0 integral(E^2)dt 719 720 721 Returns: 722 fluence: float 723 """ 724 F = ( 725 constants.speed_of_light 726 * constants.epsilon_0 727 * np.trapz(self.fieldV**2, self.fieldTimeV) 728 * 1e1 729 ) 730 return F
Calculates the fluence of the trace.
convention: field [V/Å], time [fs], fluence [J/cm²] F = c eps_0 integral(E^2)dt
Returns:
fluence: float
732 def set_fluence(self, fluence): 733 """Set the fluence of the trace. 734 735 Convention: field [V/Å], time [fs], fluence [J/cm²] 736 737 F = c eps_0 integral(E^2)dt""" 738 F = ( 739 constants.speed_of_light 740 * constants.epsilon_0 741 * np.trapz(self.fieldV**2, self.fieldTimeV) 742 * 1e1 743 ) 744 self.fieldV *= np.sqrt(fluence / F) 745 self.update_fft() 746 self.update_fft_spectrum()
Set the fluence of the trace.
Convention: field [V/Å], time [fs], fluence [J/cm²]
F = c eps_0 integral(E^2)dt
748 def compute_complex_field(self): 749 """Computes the complex trace by inverting the positive-freq FFT and stores it in self.complexFieldV (for envelope or phase computation, for example) 750 751 Notice that the fourier transform of the trace is not computed, it is assumed to be already stored in the class. This should be true in most cases.""" 752 complex_spectrum = deepcopy(self.fftFieldV) 753 complex_spectrum[self.frequencyAxis < 0] = 0 754 self.complexFieldTimeV, self.complexFieldV = inverse_fourier_transform( 755 self.frequencyAxis, complex_spectrum 756 ) 757 self.complexFieldV = 2 * self.complexFieldV 758 self.strip_from_complex_trace()
Computes the complex trace by inverting the positive-freq FFT and stores it in self.complexFieldV (for envelope or phase computation, for example)
Notice that the fourier transform of the trace is not computed, it is assumed to be already stored in the class. This should be true in most cases.
760 def get_envelope(self): 761 """Get the envelope of the trace. 762 763 Returns: 764 time array (fs) 765 envelope array 766 """ 767 if self.complexFieldV is None: 768 self.compute_complex_field() 769 return self.complexFieldTimeV, np.abs(self.complexFieldV)
Get the envelope of the trace.
Returns:
time array (fs) envelope array
771 def get_phase(self): 772 """Get the instantaneous phase of the trace. 773 774 If complexFieldV is already computed and stored, no re-calculation occurs 775 776 Returns: 777 time array (fs) 778 instantaneous phase array (fs) 779 """ 780 if self.complexFieldV is None: 781 self.compute_complex_field() 782 return self.complexFieldTimeV, np.angle(self.complexFieldV)
Get the instantaneous phase of the trace.
If complexFieldV is already computed and stored, no re-calculation occurs
Returns:
time array (fs) instantaneous phase array (fs)
784 def get_zero_delay(self): 785 """Get the time value corresponding to the envelope peak. 786 787 Returns: 788 789 zero_delay: float 790 """ 791 # careful: the time grid should be fine enough to resolve the maximum of the envelope 792 t, en = self.get_envelope() 793 dt = t[1] - t[0] 794 max_index, max_value = find_maximum_location(en) 795 self.zero_delay = t[0] + dt * max_index 796 return self.zero_delay
Get the time value corresponding to the envelope peak.
Returns:
zero_delay: float
798 def get_FWHM(self): 799 """get the FWHM of the trace. 800 801 802 Returns: 803 FWHM: float 804 """ 805 t, en = self.get_envelope() 806 dt = t[1] - t[0] 807 return fwhm(en**2, dt)
get the FWHM of the trace.
Returns:
FWHM: float
809 def fft_tukey_bandpass( 810 self, lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 811 ): 812 """Applies a bandpass filter to the trace in the frequency domain using a tukey window. 813 814 The tukey window is 1 between lowWavelengthEdge+lowEdgeWidth/2 and upWavelengthEdge-highEdgeWidth/2 815 and it is reaches zero at lowWavelengthEdge-lowEdgeWidth/2 and upWavelengthEdge+highEdgeWidth/2. 816 Notice that the edges are only cosine-shaped in the frequency domain. In the wavelength domain upWavelengthEdge - lowWavelengthEdge does not coincide with the FWHM of the tukey function 817 818 Args: 819 lowWavelengthEdge: float (nm) 820 upWavelengthEdge: float (nm) 821 lowEdgeWidth: float (nm) 822 highEdgeWidth: float (nm) 823 """ 824 upFreqEdge = ( 825 constants.speed_of_light / (lowWavelengthEdge - lowEdgeWidth / 2) / 2 * 1e-6 826 + constants.speed_of_light 827 / (lowWavelengthEdge + lowEdgeWidth / 2) 828 / 2 829 * 1e-6 830 ) 831 lowFreqEdge = ( 832 constants.speed_of_light / (upWavelengthEdge - highEdgeWidth / 2) / 2 * 1e-6 833 + constants.speed_of_light 834 / (upWavelengthEdge + highEdgeWidth / 2) 835 / 2 836 * 1e-6 837 ) 838 839 upFreqEdgeWidth = ( 840 constants.speed_of_light / (lowWavelengthEdge - lowEdgeWidth / 2) * 1e-6 841 - constants.speed_of_light / (lowWavelengthEdge + lowEdgeWidth / 2) * 1e-6 842 ) 843 lowFreqEdgeWidth = ( 844 constants.speed_of_light / (upWavelengthEdge - highEdgeWidth / 2) * 1e-6 845 - constants.speed_of_light / (upWavelengthEdge + highEdgeWidth / 2) * 1e-6 846 ) 847 848 window = asymmetric_tukey_window( 849 np.abs(self.frequencyAxis), 850 lowFreqEdge, 851 upFreqEdge, 852 lowFreqEdgeWidth, 853 upFreqEdgeWidth, 854 ) 855 self.fftFieldV = self.fftFieldV * window 856 self.update_trace_from_fft() 857 self.update_fft_spectrum() 858 self.strip_from_trace()
Applies a bandpass filter to the trace in the frequency domain using a tukey window.
The tukey window is 1 between lowWavelengthEdge+lowEdgeWidth/2 and upWavelengthEdge-highEdgeWidth/2 and it is reaches zero at lowWavelengthEdge-lowEdgeWidth/2 and upWavelengthEdge+highEdgeWidth/2. Notice that the edges are only cosine-shaped in the frequency domain. In the wavelength domain upWavelengthEdge - lowWavelengthEdge does not coincide with the FWHM of the tukey function
Arguments:
- lowWavelengthEdge: float (nm)
- upWavelengthEdge: float (nm)
- lowEdgeWidth: float (nm)
- highEdgeWidth: float (nm)
860 def apply_transmission(self, wavelengths, f): 861 """Applies a spectral transmission function to the trace (e.g. spectral filter). 862 863 Args: 864 wavelengths: ndarray = wavelength array (nm) 865 f: ndarray = transmission function f(λ) 866 """ 867 if np.any(np.diff(wavelengths)) < 0: 868 if np.all(np.diff(wavelengths)) < 0: 869 wavelengths = wavelengths[::-1] 870 f = f[::-1] 871 else: 872 raise ValueError( 873 "in function TraceHandler.apply_transmission() wavelength array is not monotonous" 874 ) 875 freq = constants.speed_of_light / wavelengths[::-1] * 1e-6 876 spectrum_freq = f[::-1] 877 878 # fill with zeros 879 initial_zero_freq = np.linspace( 880 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 881 ) 882 initial_zeros = np.zeros(len(initial_zero_freq)) 883 final_zero_freq = np.linspace( 884 2 * freq[-1] - freq[-2], 885 4 * freq[-1], 886 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 887 ) 888 final_zeros = np.zeros(len(final_zero_freq)) 889 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 890 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 891 # add negative frequencies 892 freq = np.concatenate((-freq[::-1], freq)) 893 spectrum_freq = np.concatenate((spectrum_freq[::-1], spectrum_freq)) 894 895 # interpolate the spectrum to the frequency axis of the fft 896 spectrum_interp = np.interp(self.frequencyAxis, freq, spectrum_freq) 897 898 self.fftFieldV = self.fftFieldV * spectrum_interp 899 self.update_trace_from_fft() 900 self.update_fft_spectrum() 901 self.strip_from_trace()
Applies a spectral transmission function to the trace (e.g. spectral filter).
Arguments:
- wavelengths: ndarray = wavelength array (nm)
- f: ndarray = transmission function f(λ)
903 def apply_spectrum( 904 self, 905 wvl=None, 906 spectrum=None, 907 CEP_shift: float = 0.0, 908 stripZeroPadding: bool = True, 909 ): 910 """Applies a spectrum to the phase of the trace. This means that a new trace is computed and stored in the TraceHandler object (replacing the existing one); 911 the new trace has spectral intensity equal to the applied spectrum and spectral phase equal to the phase of the existing trace. 912 913 Args: 914 wvl: wavelength array (nm). If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied. 915 spectrum: spectral intensity array. If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied. 916 CEP_shift: a possible artificial phase shift IN UNITS OF PI! (default = 0) 917 stripZeroPadding (bool): whether to eliminate the zero padding (zeros appended to the trace). Default is True""" 918 919 if wvl is None or spectrum is None: 920 wvl = self.wvlSpectrometer 921 spectrum = self.ISpectrometer 922 # check that the wvl array is monotonically increasing 923 if np.any(np.diff(wvl) < 0): 924 if np.all(np.diff(wvl) < 0): 925 wvl = wvl[::-1] 926 spectrum = spectrum[::-1] 927 else: 928 raise ValueError( 929 "in function TraceHandler.apply_spectrum() wavelength array is not monotonous" 930 ) 931 932 # frequency spectrum (monotonically increasing) 933 freq = constants.speed_of_light / wvl[::-1] * 1e-6 934 spectrum_freq = spectrum[::-1] / ( 935 constants.speed_of_light / wvl[::-1] ** 2 * 1e-6 936 ) 937 spectrum_freq = np.maximum(spectrum_freq, np.zeros(len(spectrum_freq))) 938 939 # fill with zeros 940 initial_zero_freq = np.linspace( 941 freq[1] - freq[0], freq[0], int(np.ceil(4 * freq[0] / (freq[1] - freq[0]))) 942 ) 943 initial_zeros = np.zeros(len(initial_zero_freq)) 944 final_zero_freq = np.linspace( 945 2 * freq[-1] - freq[-2], 946 4 * freq[-1], 947 int(np.ceil(15 * freq[-1] / (freq[-1] - freq[-2]))), 948 ) 949 final_zeros = np.zeros(len(final_zero_freq)) 950 freq = np.concatenate((initial_zero_freq, freq, final_zero_freq)) 951 spectrum_freq = np.concatenate((initial_zeros, spectrum_freq, final_zeros)) 952 # add negative frequencies 953 freq = np.concatenate((-freq[::-1], freq)) 954 spectrum_freq = np.concatenate((spectrum_freq[::-1], spectrum_freq)) 955 956 # interpolate the spectrum to the frequency axis of the fft 957 spectrum_interp = np.interp(self.frequencyAxis, freq, spectrum_freq) 958 self.fftFieldV = np.sqrt(spectrum_interp) * np.exp( 959 1j * np.angle(self.fftFieldV) 960 + 1j * np.pi * CEP_shift * np.sign(self.frequencyAxis) 961 ) 962 963 self.update_trace_from_fft() 964 if stripZeroPadding: 965 self.strip_from_trace() 966 else: 967 self.fieldStdevV = None 968 self.update_fft_spectrum()
Applies a spectrum to the phase of the trace. This means that a new trace is computed and stored in the TraceHandler object (replacing the existing one); the new trace has spectral intensity equal to the applied spectrum and spectral phase equal to the phase of the existing trace.
Arguments:
- wvl: wavelength array (nm). If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied.
- spectrum: spectral intensity array. If None (default) the comparison spectrum stored in the class (self.wvlSpectrometer and self.ISpectrometer) is applied.
- CEP_shift: a possible artificial phase shift IN UNITS OF PI! (default = 0)
- stripZeroPadding (bool): whether to eliminate the zero padding (zeros appended to the trace). Default is True
970 def fresnel_reflection( 971 self, 972 material2, 973 angle_in, 974 material1=None, 975 forward: bool = True, 976 s_polarized: bool = True, 977 path: str = "./RefractiveIndices/", 978 ): 979 """calculates the fresnel reflection of the pulse at the interface between two materials. The waveform is travelling from material 1 to material 2. 980 The first medium (material1) should be non-absorptive. As usual the resulting waveform is stored in the TraceHandler object, replacing the previous one. 981 982 Refractive index files should contain 3 space-separated columns, respectively with headers: wvl n k, where wvl is the wavelength in um, n and k resp. the real and imaginary part of the refractive index. 983 Args: 984 material2: the filename (without '.txt') of the refractive index data for the material after the interface (e.g. Si Al MgF2); wavelength is in um in the refractive index file 985 angle_in: the incidence angle in degrees 986 material1: the filename (without '.txt') of the refractive index data for the material before the interface. If None (default), vacuum is assumed 987 forward (bool): if True (default) forward reflection is computed (the result waveform is the reflection of the previously stored waveform. 988 989 if False backward reflection is computed (the previous waveform is the reflection of the result waveform) 990 s_polarized (bool): True. Reflection calculation only implemented for s-polarized light 991 path (str): path for the refractive index files. Defaults to "./RefractiveIndices/" 992 """ 993 if not s_polarized: 994 raise ValueError( 995 "in function TraceHandler.fresnel_reflection() p_polarized is not implemented yet\n" 996 ) 997 998 # read refractive index 999 refIndexData = pandas.read_table( 1000 path + material2 + ".txt", sep=" ", keep_default_na=False 1001 ) 1002 wvl2 = np.array(refIndexData["wvl"]) * 1e3 1003 n2 = np.array(refIndexData["n"]) + 1j * np.array(refIndexData["k"]) 1004 if material1 is None: 1005 wvl1 = np.array(wvl2) 1006 n1 = n2 * 0 + 1 1007 else: 1008 refIndexData = pandas.read_table( 1009 path + material1 + ".txt", sep=" ", keep_default_na=False 1010 ) 1011 wvl1 = np.array(refIndexData["wvl"]) * 1e3 1012 n1 = np.array(refIndexData["n"]) + 1j * np.array(refIndexData["k"]) 1013 1014 # check that the wvl arrays are monotonically increasing 1015 if np.any(np.diff(wvl1) < 0): 1016 if np.all(np.diff(wvl1) < 0): 1017 wvl1 = wvl1[::-1] 1018 n1 = n1[::-1] 1019 else: 1020 raise ValueError( 1021 "in function TraceHandler.fresnel_reflection() wavelength array is not monotonous" 1022 ) 1023 if np.any(np.diff(wvl2) < 0): 1024 if np.all(np.diff(wvl2) < 0): 1025 wvl2 = wvl2[::-1] 1026 n2 = n2[::-1] 1027 else: 1028 raise ValueError( 1029 "in function TraceHandler.fresnel_reflection() wavelength array is not monotonous" 1030 ) 1031 1032 # frequency spectrum (monotonically increasing) 1033 freq1 = constants.speed_of_light / wvl1[::-1] * 1e-6 1034 freq2 = constants.speed_of_light / wvl2[::-1] * 1e-6 1035 n1 = n1[::-1] 1036 n2 = n2[::-1] 1037 1038 # fill with ones 1039 initial_ones_freq = np.linspace( 1040 freq1[1] - freq1[0], 1041 freq1[0], 1042 int(np.ceil(4 * freq1[0] / (freq1[1] - freq1[0]))), 1043 ) 1044 initial_ones = np.ones(len(initial_ones_freq)) * n1[0] 1045 final_ones_freq = np.linspace( 1046 2 * freq1[-1] - freq1[-2], 1047 4 * freq1[-1], 1048 int(np.ceil(15 * freq1[-1] / (freq1[-1] - freq1[-2]))), 1049 ) 1050 final_ones = np.ones(len(final_ones_freq)) * n1[-1] 1051 freq1 = np.concatenate((initial_ones_freq, freq1, final_ones_freq)) 1052 n1 = np.concatenate((initial_ones, n1, final_ones)) 1053 # same for n2 1054 initial_ones_freq = np.linspace( 1055 freq2[1] - freq2[0], 1056 freq2[0], 1057 int(np.ceil(4 * freq2[0] / (freq2[1] - freq2[0]))), 1058 ) 1059 initial_ones = np.ones(len(initial_ones_freq)) * n2[0] 1060 final_ones_freq = np.linspace( 1061 2 * freq2[-1] - freq2[-2], 1062 4 * freq2[-1], 1063 int(np.ceil(15 * freq2[-1] / (freq2[-1] - freq2[-2]))), 1064 ) 1065 final_ones = np.ones(len(final_ones_freq)) * n2[-1] 1066 freq2 = np.concatenate((initial_ones_freq, freq2, final_ones_freq)) 1067 n2 = np.concatenate((initial_ones, n2, final_ones)) 1068 1069 # add negative frequencies 1070 freq1 = np.concatenate((-freq1[::-1], freq1)) 1071 n1 = np.concatenate((np.conjugate(n1[::-1]), n1)) 1072 freq2 = np.concatenate((-freq2[::-1], freq2)) 1073 n2 = np.concatenate((np.conjugate(n2[::-1]), n2)) 1074 1075 # interpolate the n1 and n2 to the frequency axis of the fft 1076 n1Interp = np.interp(self.frequencyAxis, freq1, n1) 1077 n2Interp = np.interp(self.frequencyAxis, freq2, n2) 1078 1079 # calculate the fresnel reflection coefficients using only the angle of incidence (NOT SURE THIS WORKS WHEN THE FIRST MEDIUM IS LOSSY) 1080 r = ( 1081 n1Interp * np.cos(angle_in * np.pi / 180) 1082 - n2Interp 1083 * np.sqrt(1 - (n1Interp / n2Interp * np.sin(angle_in * np.pi / 180)) ** 2) 1084 ) / ( 1085 n1Interp * np.cos(angle_in * np.pi / 180) 1086 + n2Interp 1087 * np.sqrt(1 - (n1Interp / n2Interp * np.sin(angle_in * np.pi / 180)) ** 2) 1088 ) 1089 if forward: 1090 self.fftFieldV = self.fftFieldV * r 1091 else: 1092 self.fftFieldV = self.fftFieldV / r 1093 1094 self.fieldStdevV = None 1095 1096 self.update_trace_from_fft() 1097 self.strip_from_trace() 1098 self.update_fft_spectrum()
calculates the fresnel reflection of the pulse at the interface between two materials. The waveform is travelling from material 1 to material 2. The first medium (material1) should be non-absorptive. As usual the resulting waveform is stored in the TraceHandler object, replacing the previous one.
Refractive index files should contain 3 space-separated columns, respectively with headers: wvl n k, where wvl is the wavelength in um, n and k resp. the real and imaginary part of the refractive index.
Arguments:
- material2: the filename (without '.txt') of the refractive index data for the material after the interface (e.g. Si Al MgF2); wavelength is in um in the refractive index file
- angle_in: the incidence angle in degrees
- material1: the filename (without '.txt') of the refractive index data for the material before the interface. If None (default), vacuum is assumed
forward (bool): if True (default) forward reflection is computed (the result waveform is the reflection of the previously stored waveform.
if False backward reflection is computed (the previous waveform is the reflection of the result waveform)
- s_polarized (bool): True. Reflection calculation only implemented for s-polarized light
- path (str): path for the refractive index files. Defaults to "./RefractiveIndices/"
1100 def apply_zero_phase(self): 1101 """Applies zero-phase to the trace; this allows, for example, to retrieve the fourier limited pulse corresponding to the same FFT spectrum of the loaded trace""" 1102 tau = ( 1103 self.fsZeroPadding + (np.max(self.fieldTimeV) - np.min(self.fieldTimeV)) / 2 1104 ) 1105 self.fftFieldV = np.abs(self.fftFieldV) * np.exp( 1106 1.0j * (2 * np.pi * tau * self.frequencyAxis) 1107 ) 1108 self.update_trace_from_fft() 1109 self.strip_from_trace() 1110 self.update_fft_spectrum()
Applies zero-phase to the trace; this allows, for example, to retrieve the fourier limited pulse corresponding to the same FFT spectrum of the loaded trace
1112 def time_frequency_analysis( 1113 self, sigma_time, low_lim=None, up_lim=None, low_lim_freq=None, up_lim_freq=None 1114 ): 1115 """Performs time-frequency analysis by using scipy's short time fourier transform (fourier transform of the trace convoluted by a 'sigma_time' broad gaussian). 1116 1117 Args: 1118 sigma_time: sigma of the gaussian window 1119 low_lim, up_lim (float): xaxis limits for plotting. Default None 1120 low_lim_freq, up_lim_freq (float): xaxis limits for plotting. Default None 1121 1122 """ 1123 dt = np.mean(np.diff(self.fieldTimeV)) 1124 w = scipy.signal.windows.gaussian( 1125 int(sigma_time / dt * 6) + 1, sigma_time / dt, sym=True 1126 ) 1127 TFA = scipy.signal.ShortTimeFFT( 1128 w, hop=1, fs=1.0 / dt, mfft=int(sigma_time / dt * 24), scale_to="magnitude" 1129 ) 1130 TFData = TFA.stft(self.fieldV) 1131 1132 fig, ax = plt.subplots() 1133 t_lo, t_hi, f_lo, f_hi = TFA.extent( 1134 self.fieldV.size 1135 ) # time and freq range of plot 1136 if low_lim is None: 1137 low_lim = t_lo + self.fieldTimeV[0] 1138 if up_lim is None: 1139 up_lim = t_hi + self.fieldTimeV[0] 1140 if low_lim_freq is None: 1141 low_lim_freq = 0 1142 if up_lim_freq is None: 1143 up_lim_freq = 3.0 1144 1145 ax.set(xlim=(low_lim, up_lim), ylim=(low_lim_freq, up_lim_freq)) 1146 ax.set_xlabel("Time (fs)") 1147 ax.set_ylabel("Frequency (PHz)") 1148 im1 = ax.imshow( 1149 abs(TFData) / np.max(abs(TFData)), 1150 origin="lower", 1151 aspect="auto", 1152 extent=(t_lo + self.fieldTimeV[0], t_hi + self.fieldTimeV[0], f_lo, f_hi), 1153 cmap="viridis", 1154 ) 1155 cbar = fig.colorbar(im1) 1156 1157 cbar.ax.set_ylabel("Magnitude of the field (Arb. unit)") 1158 fig.tight_layout() 1159 return TFData
Performs time-frequency analysis by using scipy's short time fourier transform (fourier transform of the trace convoluted by a 'sigma_time' broad gaussian).
Arguments:
- sigma_time: sigma of the gaussian window
- low_lim, up_lim (float): xaxis limits for plotting. Default None
- low_lim_freq, up_lim_freq (float): xaxis limits for plotting. Default None
1161 def plot_trace(self, low_lim=None, up_lim=None, normalize: bool = True): 1162 """Plots the field trace. 1163 1164 Args: 1165 low_lim, up_lim: float = xaxis limits for plotting. Default None 1166 normalize: bool = if True (default) normalize the peak of the trace to 1 1167 """ 1168 fig, ax = plt.subplots() 1169 if normalize: 1170 norm_plot = self.normalization_trace 1171 else: 1172 norm_plot = 1 1173 main_line = ax.plot( 1174 self.fieldTimeV, self.fieldV / norm_plot, label="Field trace" 1175 ) 1176 if self.fieldStdevV is not None: 1177 ax.fill_between( 1178 self.fieldTimeV, 1179 (self.fieldV - self.fieldStdevV) / norm_plot, 1180 (self.fieldV + self.fieldStdevV) / norm_plot, 1181 color=main_line[0].get_color(), 1182 alpha=0.3, 1183 ) 1184 ax.set_xlabel("Time (fs)") 1185 ax.set_ylabel("Field (Arb. unit)") 1186 1187 if low_lim is not None and up_lim is not None: 1188 ax.set_xlim(low_lim, up_lim) 1189 return fig
Plots the field trace.
Arguments:
- low_lim, up_lim: float = xaxis limits for plotting. Default None
- normalize: bool = if True (default) normalize the peak of the trace to 1
1191 def plot_spectrum( 1192 self, low_lim=40, up_lim=1000, no_phase: bool = False, phase_blanking_level=0.05 1193 ): 1194 """Plots the trace spectrum and phase together with the spectrometer measurement [if provided]. 1195 1196 Args: 1197 low_lim, up_lim (float): xaxis limits for plotting. Default: 40, 1000 1198 no_phase: if True, don't plot the phase. Default: False 1199 """ 1200 fig, ax = plt.subplots() 1201 1202 if not no_phase: 1203 ax2 = ax.twinx() 1204 lines = [] 1205 min_intensity = phase_blanking_level * np.max(self.fftSpectrum) 1206 lines += ax.plot( 1207 self.wvlAxis[(self.wvlAxis > low_lim) & (self.wvlAxis < up_lim)], 1208 self.fftSpectrum[(self.wvlAxis > low_lim) & (self.wvlAxis < up_lim)], 1209 label="Fourier transform", 1210 ) 1211 if not no_phase: 1212 ax2.plot([], []) 1213 if self.wvlSpectrometer is not None: 1214 ax2.plot([], []) 1215 lines += ax2.plot( 1216 self.wvlAxis[ 1217 (self.wvlAxis > low_lim) 1218 & (self.wvlAxis < up_lim) 1219 & (self.fftSpectrum > min_intensity) 1220 ], 1221 self.fftphase[ 1222 (self.wvlAxis > low_lim) 1223 & (self.wvlAxis < up_lim) 1224 & (self.fftSpectrum > min_intensity) 1225 ], 1226 "--", 1227 label="Phase", 1228 ) 1229 if self.wvlSpectrometer is not None: 1230 lines += ax.plot( 1231 self.wvlSpectrometer[ 1232 (self.wvlSpectrometer > low_lim) & (self.wvlSpectrometer < up_lim) 1233 ], 1234 self.ISpectrometer[ 1235 (self.wvlSpectrometer > low_lim) & (self.wvlSpectrometer < up_lim) 1236 ], 1237 label="Spectrometer", 1238 ) 1239 ax.set_xlabel("Wavelength (nm)") 1240 ax.set_ylabel("Intensity (Arb. unit)") 1241 ax.tick_params(axis="both") 1242 if not no_phase: 1243 ax2.set_ylabel("Phase (rad)") 1244 ax2.tick_params(axis="y") 1245 ax.legend(lines, [l.get_label() for l in lines]) 1246 return fig
Plots the trace spectrum and phase together with the spectrometer measurement [if provided].
Arguments:
- low_lim, up_lim (float): xaxis limits for plotting. Default: 40, 1000
- no_phase: if True, don't plot the phase. Default: False
1249class MultiTraceHandler: 1250 """Initializes and stores multiple TraceHandler objects. It is used to plot multiple traces in a single graph, compare them, and perform operations between them 1251 1252 ALL TIMES IN FS, ALL WAVELENGTHS IN NM, ALL FREQUENCIES IN PHz 1253 1254 All stored traces are accessible as TraceHandler object in the corresponding list. 1255 1256 Attributes: 1257 traceHandlers (list): list of traceHandler objects 1258 fsZeroPadding (float): zero padding in fs for the traces (default 150 fs) 1259 """ 1260 1261 def __init__( 1262 self, 1263 filenameList=None, 1264 filenameSpectrumList=None, 1265 timeList=None, 1266 fieldList=None, 1267 stdevList=None, 1268 wvlList=None, 1269 spectrumList=None, 1270 traceHandlerList=None, 1271 ): 1272 """Constructor of MultiTraceHandler. 1273 1274 One of the following arguments must be provided (all arguments default to None: 1275 * filenameList: list of filenames containing the field traces (see TraceHandler constructor) 1276 * timeList AND fieldList: list of time arrays and field arrays (see TraceHandler constructor) 1277 * traceHandlerList: list of TraceHandler objects 1278 1279 Args: 1280 filenameList : list of filenames containing the field traces (see TraceHandler constructor) 1281 filenameSpectrumList : list of filenames containing the spectrum traces (spectrometer data; see TraceHandler constructor) 1282 timeList : list of time arrays (see TraceHandler constructor) 1283 fieldList : list of field arrays (see TraceHandler constructor) 1284 stdevList : list of standard deviation arrays (see TraceHandler constructor) 1285 wvlList : list of wavelength arrays (spectrometer data; see TraceHandler constructor) 1286 spectrumList : list of spectrum arrays (spectrometer data; see TraceHandler constructor) 1287 traceHandlerList : list of TraceHandler objects 1288 """ 1289 1290 self.fsZeroPadding = 150 1291 self.traceHandlers = [] 1292 if filenameList is not None: 1293 for i in range(len(filenameList)): 1294 if filenameSpectrumList is not None: 1295 self.traceHandlers.append( 1296 TraceHandler( 1297 filename=filenameList[i], 1298 filename_spectrum=filenameSpectrumList[i], 1299 ) 1300 ) 1301 else: 1302 self.traceHandlers.append(TraceHandler(filename=filenameList[i])) 1303 elif timeList is not None and fieldList is not None: 1304 if stdevList is None: 1305 stdevList = [None] * len(timeList) 1306 if wvlList is None: 1307 wvlList = [None] * len(timeList) 1308 if spectrumList is None: 1309 spectrumList = [None] * len(timeList) 1310 check_equal_length(timeList, fieldList, stdevList, wvlList, spectrumList) 1311 for i in range(len(timeList)): 1312 self.traceHandlers.append( 1313 TraceHandler( 1314 time=timeList[i], 1315 field=fieldList[i], 1316 stdev=stdevList[i], 1317 wvl=wvlList[i], 1318 spectrum=spectrumList[i], 1319 ) 1320 ) 1321 elif traceHandlerList is not None: 1322 for i in range(len(traceHandlerList)): 1323 if isinstance(traceHandlerList[i], TraceHandler): 1324 self.traceHandlers.append(traceHandlerList[i]) 1325 else: 1326 raise ValueError( 1327 "in function MultiTraceHandler.__init__() traceHandlerList is not a list of TraceHandler objects\n" 1328 ) 1329 else: 1330 raise ValueError( 1331 "in function MultiTraceHandler.__init__() too many arguments were None\n" 1332 "probably the constructor was erroneously used with no or too few arguments\n" 1333 "use either filenameList and filenameSpectrumList or timeList, fieldList or traceHandlerList\n" 1334 ) 1335 1336 def append_trace( 1337 self, 1338 filename=None, 1339 filename_spectrum=None, 1340 timeV=None, 1341 fieldV=None, 1342 stdevV=None, 1343 wvl=None, 1344 spectrum=None, 1345 traceHandler=None, 1346 ): 1347 """Append a new trace to the list. Usual rules apply. 1348 1349 Args: 1350 filename 1351 filename_spectrum 1352 timeV 1353 fieldV 1354 stdevV 1355 wvl 1356 spectrum 1357 traceHandler 1358 """ 1359 if filename_spectrum is not None and filename is not None: 1360 self.traceHandlers.append(TraceHandler(filename, filename_spectrum)) 1361 elif filename is not None: 1362 self.traceHandlers.append(TraceHandler(filename)) 1363 elif timeV is not None and fieldV is not None: 1364 self.traceHandlers.append( 1365 TraceHandler( 1366 time=timeV, field=fieldV, stdev=stdevV, wvl=wvl, spectrum=spectrum 1367 ) 1368 ) 1369 elif traceHandler is not None: 1370 if isinstance(traceHandler, TraceHandler): 1371 self.traceHandlers.append(traceHandler) 1372 else: 1373 raise ValueError( 1374 "in function MultiTraceHandler.append_trace() traceHandler is not a TraceHandler object\n" 1375 ) 1376 else: 1377 raise ValueError( 1378 "in function MultiTraceHandler.append_trace() filename and filename_spectrum cannot be None\n" 1379 "it might be that the function was erroneously used with no arguments\n" 1380 ) 1381 1382 def flip_trace(self, index: int): 1383 """flips the trace number 'index' 1384 1385 Args: 1386 index: int 1387 """ 1388 if index >= len(self.traceHandlers): 1389 raise ValueError( 1390 "in function MultiTraceHandler.flip() index is out of range\n" 1391 ) 1392 self.traceHandlers[index].fieldV = -self.traceHandlers[index].fieldV 1393 self.traceHandlers[index].fftFieldV = -self.traceHandlers[index].fftFieldV 1394 self.traceHandlers[index].complexFieldV = -self.traceHandlers[ 1395 index 1396 ].complexFieldV 1397 1398 def tukey_bandpass( 1399 self, lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 1400 ): 1401 """applies a bandpass filter the traces in the frequency domain using a tukey window. See traceHandler's docs""" 1402 for i in range(len(self.traceHandlers)): 1403 self.traceHandlers[i].fft_tukey_bandpass( 1404 lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 1405 ) 1406 1407 def apply_spectrum( 1408 self, wvl=None, spectrum=None, CEP_shift=0.0, stripZeroPadding=True 1409 ): 1410 """applies spectrum to the phase of the pulse. See traceHandler's docs""" 1411 for i in range(len(self.traceHandlers)): 1412 self.traceHandlers[i].apply_spectrum( 1413 wvl, spectrum, CEP_shift, stripZeroPadding 1414 ) 1415 1416 def plot_traces( 1417 self, 1418 low_lim=None, 1419 up_lim=None, 1420 labels=None, 1421 delay_shift=None, 1422 offset: float = 2.0, 1423 errorbar: bool = False, 1424 normalize: bool = True, 1425 ): 1426 """plots all traces. 1427 1428 Args: 1429 low_lim (float): for xaxis 1430 up_lim (float): for xaxis 1431 labels (list): list of labels for the legend 1432 delay_shift (list): list of delay offsets (one per trace) 1433 offset (float): y-axis offset between traces 1434 errorbar (bool): whether to plot errors. Default False 1435 normalize (bool): Default True 1436 """ 1437 fig, ax = plt.subplots() 1438 if delay_shift is None: 1439 delay_shift = [0.0] * len(self.traceHandlers) 1440 check_equal_length(delay_shift, self.traceHandlers) 1441 1442 for i in range(len(self.traceHandlers)): 1443 if normalize: 1444 norm_plot = self.traceHandlers[i].normalization_trace 1445 else: 1446 norm_plot = 1 1447 t, field = self.traceHandlers[i].get_trace() 1448 stdev_field = self.traceHandlers[i].get_stdev() 1449 if errorbar and stdev_field is not None: 1450 last_fill = ax.fill_between( 1451 t - delay_shift[i], 1452 offset * i + (field - stdev_field) / norm_plot, 1453 offset * i + (field + stdev_field) / norm_plot, 1454 label="_nolegend_", 1455 alpha=0.3, 1456 ) 1457 ax.plot( 1458 t - delay_shift[i], 1459 offset * i + field / norm_plot, 1460 label="Trace " + str(i), 1461 color=last_fill.get_facecolor(), 1462 alpha=1.0, 1463 ) 1464 else: 1465 ax.plot( 1466 t - delay_shift[i], 1467 offset * i + field / norm_plot, 1468 label="Trace " + str(i), 1469 ) 1470 ax.set_xlabel("Time (fs)") 1471 ax.set_ylabel("Field (Arb. unit)") 1472 1473 if labels is not None: 1474 handles, labels_dump = ax.get_legend_handles_labels() 1475 plt.legend(handles[::-1], labels[::-1], loc="upper left") 1476 else: 1477 pass 1478 if low_lim is not None and up_lim is not None: 1479 ax.set_xlim(low_lim, up_lim) 1480 return fig 1481 1482 def plot_spectra( 1483 self, low_lim=50, up_lim=1000, labels=None, offset=0.015, logscale: bool = False 1484 ): 1485 """Plot all spectra. 1486 low_lim: lower limit wavelength (nm). Default: 50 nm 1487 up_lim: upper limit wavelength (nm). Default: 1000 nm 1488 labels: label list for the plot legend. Labels should be in the same order as the stored traceHandler objects 1489 offset: artificial offset between two spectra for display purposes. Default: 0.015 1490 logscale: (bool); whether to plot in a logscale 1491 1492 """ 1493 fig, ax = plt.subplots() 1494 for i in range(len(self.traceHandlers)): 1495 self.traceHandlers[i].normalize_fft_spectrum() 1496 wvl, spctr = self.traceHandlers[i].get_spectrum_trace() 1497 if logscale: 1498 ax.plot(wvl, (offset**i) * spctr) 1499 ylow = np.mean(spctr[(wvl < up_lim) & (wvl > low_lim)]) / (offset**3) 1500 yup = np.mean(spctr[(wvl < up_lim) & (wvl > low_lim)]) * ( 1501 offset ** (len(self.traceHandlers)) 1502 ) 1503 ax.set_ylim(ylow, yup) 1504 else: 1505 ax.plot(wvl, i * offset + spctr) 1506 ax.set_xlabel("Wavelength (nm)") 1507 ax.set_ylabel("Intensity (Arb. unit)") 1508 if labels is not None: 1509 handles, labels_dump = ax.get_legend_handles_labels() 1510 plt.legend(handles[::-1], labels[::-1], loc="upper right") 1511 else: 1512 pass 1513 if low_lim is not None and up_lim is not None: 1514 ax.set_xlim(low_lim, up_lim) 1515 return fig
Initializes and stores multiple TraceHandler objects. It is used to plot multiple traces in a single graph, compare them, and perform operations between them
ALL TIMES IN FS, ALL WAVELENGTHS IN NM, ALL FREQUENCIES IN PHz
All stored traces are accessible as TraceHandler object in the corresponding list.
Attributes:
- traceHandlers (list): list of traceHandler objects
- fsZeroPadding (float): zero padding in fs for the traces (default 150 fs)
1261 def __init__( 1262 self, 1263 filenameList=None, 1264 filenameSpectrumList=None, 1265 timeList=None, 1266 fieldList=None, 1267 stdevList=None, 1268 wvlList=None, 1269 spectrumList=None, 1270 traceHandlerList=None, 1271 ): 1272 """Constructor of MultiTraceHandler. 1273 1274 One of the following arguments must be provided (all arguments default to None: 1275 * filenameList: list of filenames containing the field traces (see TraceHandler constructor) 1276 * timeList AND fieldList: list of time arrays and field arrays (see TraceHandler constructor) 1277 * traceHandlerList: list of TraceHandler objects 1278 1279 Args: 1280 filenameList : list of filenames containing the field traces (see TraceHandler constructor) 1281 filenameSpectrumList : list of filenames containing the spectrum traces (spectrometer data; see TraceHandler constructor) 1282 timeList : list of time arrays (see TraceHandler constructor) 1283 fieldList : list of field arrays (see TraceHandler constructor) 1284 stdevList : list of standard deviation arrays (see TraceHandler constructor) 1285 wvlList : list of wavelength arrays (spectrometer data; see TraceHandler constructor) 1286 spectrumList : list of spectrum arrays (spectrometer data; see TraceHandler constructor) 1287 traceHandlerList : list of TraceHandler objects 1288 """ 1289 1290 self.fsZeroPadding = 150 1291 self.traceHandlers = [] 1292 if filenameList is not None: 1293 for i in range(len(filenameList)): 1294 if filenameSpectrumList is not None: 1295 self.traceHandlers.append( 1296 TraceHandler( 1297 filename=filenameList[i], 1298 filename_spectrum=filenameSpectrumList[i], 1299 ) 1300 ) 1301 else: 1302 self.traceHandlers.append(TraceHandler(filename=filenameList[i])) 1303 elif timeList is not None and fieldList is not None: 1304 if stdevList is None: 1305 stdevList = [None] * len(timeList) 1306 if wvlList is None: 1307 wvlList = [None] * len(timeList) 1308 if spectrumList is None: 1309 spectrumList = [None] * len(timeList) 1310 check_equal_length(timeList, fieldList, stdevList, wvlList, spectrumList) 1311 for i in range(len(timeList)): 1312 self.traceHandlers.append( 1313 TraceHandler( 1314 time=timeList[i], 1315 field=fieldList[i], 1316 stdev=stdevList[i], 1317 wvl=wvlList[i], 1318 spectrum=spectrumList[i], 1319 ) 1320 ) 1321 elif traceHandlerList is not None: 1322 for i in range(len(traceHandlerList)): 1323 if isinstance(traceHandlerList[i], TraceHandler): 1324 self.traceHandlers.append(traceHandlerList[i]) 1325 else: 1326 raise ValueError( 1327 "in function MultiTraceHandler.__init__() traceHandlerList is not a list of TraceHandler objects\n" 1328 ) 1329 else: 1330 raise ValueError( 1331 "in function MultiTraceHandler.__init__() too many arguments were None\n" 1332 "probably the constructor was erroneously used with no or too few arguments\n" 1333 "use either filenameList and filenameSpectrumList or timeList, fieldList or traceHandlerList\n" 1334 )
Constructor of MultiTraceHandler.
One of the following arguments must be provided (all arguments default to None:
- filenameList: list of filenames containing the field traces (see TraceHandler constructor)
- timeList AND fieldList: list of time arrays and field arrays (see TraceHandler constructor)
- traceHandlerList: list of TraceHandler objects
Arguments:
- filenameList : list of filenames containing the field traces (see TraceHandler constructor)
- filenameSpectrumList : list of filenames containing the spectrum traces (spectrometer data; see TraceHandler constructor)
- timeList : list of time arrays (see TraceHandler constructor)
- fieldList : list of field arrays (see TraceHandler constructor)
- stdevList : list of standard deviation arrays (see TraceHandler constructor)
- wvlList : list of wavelength arrays (spectrometer data; see TraceHandler constructor)
- spectrumList : list of spectrum arrays (spectrometer data; see TraceHandler constructor)
- traceHandlerList : list of TraceHandler objects
1336 def append_trace( 1337 self, 1338 filename=None, 1339 filename_spectrum=None, 1340 timeV=None, 1341 fieldV=None, 1342 stdevV=None, 1343 wvl=None, 1344 spectrum=None, 1345 traceHandler=None, 1346 ): 1347 """Append a new trace to the list. Usual rules apply. 1348 1349 Args: 1350 filename 1351 filename_spectrum 1352 timeV 1353 fieldV 1354 stdevV 1355 wvl 1356 spectrum 1357 traceHandler 1358 """ 1359 if filename_spectrum is not None and filename is not None: 1360 self.traceHandlers.append(TraceHandler(filename, filename_spectrum)) 1361 elif filename is not None: 1362 self.traceHandlers.append(TraceHandler(filename)) 1363 elif timeV is not None and fieldV is not None: 1364 self.traceHandlers.append( 1365 TraceHandler( 1366 time=timeV, field=fieldV, stdev=stdevV, wvl=wvl, spectrum=spectrum 1367 ) 1368 ) 1369 elif traceHandler is not None: 1370 if isinstance(traceHandler, TraceHandler): 1371 self.traceHandlers.append(traceHandler) 1372 else: 1373 raise ValueError( 1374 "in function MultiTraceHandler.append_trace() traceHandler is not a TraceHandler object\n" 1375 ) 1376 else: 1377 raise ValueError( 1378 "in function MultiTraceHandler.append_trace() filename and filename_spectrum cannot be None\n" 1379 "it might be that the function was erroneously used with no arguments\n" 1380 )
Append a new trace to the list. Usual rules apply.
Arguments:
- filename
- filename_spectrum
- timeV
- fieldV
- stdevV
- wvl
- spectrum
- traceHandler
1382 def flip_trace(self, index: int): 1383 """flips the trace number 'index' 1384 1385 Args: 1386 index: int 1387 """ 1388 if index >= len(self.traceHandlers): 1389 raise ValueError( 1390 "in function MultiTraceHandler.flip() index is out of range\n" 1391 ) 1392 self.traceHandlers[index].fieldV = -self.traceHandlers[index].fieldV 1393 self.traceHandlers[index].fftFieldV = -self.traceHandlers[index].fftFieldV 1394 self.traceHandlers[index].complexFieldV = -self.traceHandlers[ 1395 index 1396 ].complexFieldV
flips the trace number 'index'
Arguments:
- index: int
1398 def tukey_bandpass( 1399 self, lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 1400 ): 1401 """applies a bandpass filter the traces in the frequency domain using a tukey window. See traceHandler's docs""" 1402 for i in range(len(self.traceHandlers)): 1403 self.traceHandlers[i].fft_tukey_bandpass( 1404 lowWavelengthEdge, upWavelengthEdge, lowEdgeWidth, highEdgeWidth 1405 )
applies a bandpass filter the traces in the frequency domain using a tukey window. See traceHandler's docs
1407 def apply_spectrum( 1408 self, wvl=None, spectrum=None, CEP_shift=0.0, stripZeroPadding=True 1409 ): 1410 """applies spectrum to the phase of the pulse. See traceHandler's docs""" 1411 for i in range(len(self.traceHandlers)): 1412 self.traceHandlers[i].apply_spectrum( 1413 wvl, spectrum, CEP_shift, stripZeroPadding 1414 )
applies spectrum to the phase of the pulse. See traceHandler's docs
1416 def plot_traces( 1417 self, 1418 low_lim=None, 1419 up_lim=None, 1420 labels=None, 1421 delay_shift=None, 1422 offset: float = 2.0, 1423 errorbar: bool = False, 1424 normalize: bool = True, 1425 ): 1426 """plots all traces. 1427 1428 Args: 1429 low_lim (float): for xaxis 1430 up_lim (float): for xaxis 1431 labels (list): list of labels for the legend 1432 delay_shift (list): list of delay offsets (one per trace) 1433 offset (float): y-axis offset between traces 1434 errorbar (bool): whether to plot errors. Default False 1435 normalize (bool): Default True 1436 """ 1437 fig, ax = plt.subplots() 1438 if delay_shift is None: 1439 delay_shift = [0.0] * len(self.traceHandlers) 1440 check_equal_length(delay_shift, self.traceHandlers) 1441 1442 for i in range(len(self.traceHandlers)): 1443 if normalize: 1444 norm_plot = self.traceHandlers[i].normalization_trace 1445 else: 1446 norm_plot = 1 1447 t, field = self.traceHandlers[i].get_trace() 1448 stdev_field = self.traceHandlers[i].get_stdev() 1449 if errorbar and stdev_field is not None: 1450 last_fill = ax.fill_between( 1451 t - delay_shift[i], 1452 offset * i + (field - stdev_field) / norm_plot, 1453 offset * i + (field + stdev_field) / norm_plot, 1454 label="_nolegend_", 1455 alpha=0.3, 1456 ) 1457 ax.plot( 1458 t - delay_shift[i], 1459 offset * i + field / norm_plot, 1460 label="Trace " + str(i), 1461 color=last_fill.get_facecolor(), 1462 alpha=1.0, 1463 ) 1464 else: 1465 ax.plot( 1466 t - delay_shift[i], 1467 offset * i + field / norm_plot, 1468 label="Trace " + str(i), 1469 ) 1470 ax.set_xlabel("Time (fs)") 1471 ax.set_ylabel("Field (Arb. unit)") 1472 1473 if labels is not None: 1474 handles, labels_dump = ax.get_legend_handles_labels() 1475 plt.legend(handles[::-1], labels[::-1], loc="upper left") 1476 else: 1477 pass 1478 if low_lim is not None and up_lim is not None: 1479 ax.set_xlim(low_lim, up_lim) 1480 return fig
plots all traces.
Arguments:
- low_lim (float): for xaxis
- up_lim (float): for xaxis
- labels (list): list of labels for the legend
- delay_shift (list): list of delay offsets (one per trace)
- offset (float): y-axis offset between traces
- errorbar (bool): whether to plot errors. Default False
- normalize (bool): Default True
1482 def plot_spectra( 1483 self, low_lim=50, up_lim=1000, labels=None, offset=0.015, logscale: bool = False 1484 ): 1485 """Plot all spectra. 1486 low_lim: lower limit wavelength (nm). Default: 50 nm 1487 up_lim: upper limit wavelength (nm). Default: 1000 nm 1488 labels: label list for the plot legend. Labels should be in the same order as the stored traceHandler objects 1489 offset: artificial offset between two spectra for display purposes. Default: 0.015 1490 logscale: (bool); whether to plot in a logscale 1491 1492 """ 1493 fig, ax = plt.subplots() 1494 for i in range(len(self.traceHandlers)): 1495 self.traceHandlers[i].normalize_fft_spectrum() 1496 wvl, spctr = self.traceHandlers[i].get_spectrum_trace() 1497 if logscale: 1498 ax.plot(wvl, (offset**i) * spctr) 1499 ylow = np.mean(spctr[(wvl < up_lim) & (wvl > low_lim)]) / (offset**3) 1500 yup = np.mean(spctr[(wvl < up_lim) & (wvl > low_lim)]) * ( 1501 offset ** (len(self.traceHandlers)) 1502 ) 1503 ax.set_ylim(ylow, yup) 1504 else: 1505 ax.plot(wvl, i * offset + spctr) 1506 ax.set_xlabel("Wavelength (nm)") 1507 ax.set_ylabel("Intensity (Arb. unit)") 1508 if labels is not None: 1509 handles, labels_dump = ax.get_legend_handles_labels() 1510 plt.legend(handles[::-1], labels[::-1], loc="upper right") 1511 else: 1512 pass 1513 if low_lim is not None and up_lim is not None: 1514 ax.set_xlim(low_lim, up_lim) 1515 return fig
Plot all spectra. low_lim: lower limit wavelength (nm). Default: 50 nm up_lim: upper limit wavelength (nm). Default: 1000 nm labels: label list for the plot legend. Labels should be in the same order as the stored traceHandler objects offset: artificial offset between two spectra for display purposes. Default: 0.015 logscale: (bool); whether to plot in a logscale
10def load_calibration_data(calibration_data_filepath): 11 """ 12 Load calibration data in a .npz file for the Reso spectrometer 13 14 Args: 15 calibration_data_filepath (StrOrBytesPath): path of the file to load 16 """ 17 try: 18 calibration = np.load(calibration_data_filepath) 19 wavelength_calibration = calibration["wavelength"] 20 lamp_spec = calibration["lamp_ref"] 21 lamp_measbyReso = calibration["lamp_measured"] 22 calibration_smoothed = np.abs(calibration["corr_factor_smoothed"]) 23 except FileNotFoundError: 24 print( 25 "Error: calibration data for the UV spectrometer not found.\n" 26 "Please copy the folder Attoworld/src/attoworld/spectrum/calibration_data into your current working directory\n" 27 "or alternatively create a calibration file with relative path ./calibration_data/Reso_Spectrometer_CalibrationCorrection.npz\n" 28 ) 29 raise FileNotFoundError("calibration data not found") 30 return wavelength_calibration, lamp_spec, lamp_measbyReso, calibration_smoothed
Load calibration data in a .npz file for the Reso spectrometer
Arguments:
- calibration_data_filepath (StrOrBytesPath): path of the file to load
33def smooth(y, box_pts: int): 34 """basic box smoothing for the spectra 35 36 ARGUMENTS: 37 y = spectrum to be smoothed [numpy array] 38 box_pts = width of smoothing box [int, number of points] 39 40 RETURNS: 41 smoothed_y [numpy array] 42 """ 43 box = np.ones(box_pts) / box_pts 44 y_smooth = np.convolve(y, box, mode="same") 45 return y_smooth
basic box smoothing for the spectra
ARGUMENTS:
y = spectrum to be smoothed [numpy array] box_pts = width of smoothing box [int, number of points]
RETURNS:
smoothed_y [numpy array]
48def tukey_f(x: float, center: float, FWHM: float, w: float): 49 """cosine-smoothed, flattop window function centered in 'center' 50 51 each edge is w-wide 52 amplitude = 1 53 54 ARGUMENTS: 55 x: float = input value 56 center = center of the t. function 57 FWHM = full width at half maximum of the t. function 58 w = width of the edges of the t. function (0 < w < FWHM) 59 """ 60 if FWHM < w: 61 print("tukey window can not have edges wider than FWHM") 62 pass 63 xmin = center - FWHM / 2 - w / 2 64 xmax = center + FWHM / 2 + w / 2 65 if x < xmin or x > xmax: 66 y = 0 67 elif xmin <= x and x < xmin + w: 68 y = (1 - np.cos((x - xmin) * np.pi / w)) / 2 69 elif xmin + w <= x and x <= xmax - w: 70 y = 1 71 elif xmax - w < x and x <= xmax: 72 y = (1 - np.cos((x - xmax) * np.pi / w)) / 2 73 else: 74 print(x, xmax, xmin, w) 75 raise ValueError( 76 "in tukey_f, x could not be assigned correctly to the sub intervals, might it (or the other parameters) be NaN?" 77 ) 78 return y
cosine-smoothed, flattop window function centered in 'center'
each edge is w-wide amplitude = 1
ARGUMENTS:
x: float = input value center = center of the t. function FWHM = full width at half maximum of the t. function w = width of the edges of the t. function (0 < w < FWHM)
81def tukey_window(x, center: float, FWHM: float, w: float): 82 """returns a cosine-smoothed, flattop window centered in 'center'; amplitude = 1. 83 84 The difference with the function tukey_f is that this function can take as input a numpy array or a list, and returns a numpy array or a list. 85 86 Args: 87 x: input array (numpy array or list) 88 center: center of the window 89 FWHM: full width at half maximum 90 w: width of the edges of the window (w > 0) 91 92 Returns: 93 window (numpy array or list)""" 94 if isinstance(x, np.ndarray): 95 y = [] 96 for xi in x: 97 y.append(tukey_f(xi, center, FWHM, w)) 98 y = np.array(y) 99 elif isinstance(x, list): 100 y = [] 101 for xi in x: 102 y.append(tukey_f(xi, center, FWHM, w)) 103 elif isinstance(x, float): 104 y = tukey_f(x, center, FWHM, w) 105 else: 106 raise TypeError( 107 "in function tukey window x is neither np array nor list nor float" 108 ) 109 return y
returns a cosine-smoothed, flattop window centered in 'center'; amplitude = 1.
The difference with the function tukey_f is that this function can take as input a numpy array or a list, and returns a numpy array or a list.
Arguments:
- x: input array (numpy array or list)
- center: center of the window
- FWHM: full width at half maximum
- w: width of the edges of the window (w > 0)
Returns:
window (numpy array or list)
112def read_csd_file(filename): 113 """DEPRECATED read the file produced by the Maya spectrometer""" 114 file = open(filename) 115 data = [] 116 for row in file: 117 # row = row.replace(',', '.') 118 elemlist = str(row).split() 119 row_float = [] 120 for elem in elemlist: 121 row_float.append(float(elem)) 122 data.append(row_float) 123 file.close() 124 return np.array(data)
DEPRECATED read the file produced by the Maya spectrometer
127def plot_spectra(filenameList, pdfFilename, legendItemList=None): 128 """DEPRECATED plot the spectra produced by the Maya spectrometer""" 129 130 dataList = [] 131 for filename in filenameList: 132 dataList.append(read_csd_file(filename)) 133 134 fig, ax = plt.subplots(2, 1, figsize=(6, 8)) 135 legendHandles = [] 136 for data in dataList: 137 avgeSpectrum = [] 138 for row in data: 139 avgeSpectrum.append(np.mean(np.array(row))) 140 (handle,) = ax[0].plot(data[:, 0], avgeSpectrum) 141 legendHandles.append(handle) 142 # ax[0].set_ylim(3e-17,1e-9) 143 # ax[0].set_xlim(freqLim[0], freqLim[1]) 144 ax[0].set_xlabel("Wavelength (nm)") 145 ax[0].set_ylabel("Intensity") 146 if legendItemList is not None: 147 ax[0].legend(legendHandles, legendItemList) 148 149 for data in dataList: 150 avgeSpectrum = [] 151 for row in data: 152 avgeSpectrum.append(np.mean(np.array(row))) 153 (handle,) = ax[1].semilogy(data[:, 0], avgeSpectrum) 154 legendHandles.append(handle) 155 # ax[1].set_ylim(3e-17,1e-9) 156 # ax[1].set_xlim(freqLim[0], freqLim[1]) 157 ax[1].set_xlabel("Wavelength (nm)") 158 ax[1].set_ylabel("Intensity") 159 if legendItemList is not None: 160 ax[1].legend(legendHandles, legendItemList) 161 162 fig.savefig(pdfFilename)
DEPRECATED plot the spectra produced by the Maya spectrometer
165def read_spectrometer_excel(filename): 166 """Reads xls file (passed as the string filename without extension!) produced by the UV-VIS spectrometer RESONANCE VS-7550-VUV. 167 168 This function currently only works in Linux (Ubuntu), since it uses the OS system command to brutally copy the content of the xls file into a txt; 169 such copying is necessary because the xls file is not readable by pandas (it's a non-standard xls, I couldn't find any other workaround). 170 For windows there is a similar command, please replace the line 171 os.system("cat " + filename + ".xls > " + filename + ".txt") 172 173 44 (skipped) rows at the beginning of the file (all headers) 174 175 Args: 176 filename: name of the file (without extension) 177 178 Returns: 179 numpy array with the spectral data (see original excel for more details)""" 180 if "." in filename and "./" not in filename: 181 raise ValueError( 182 "in function read_spectrometer_excel, filename must be passed without extension (filename should not contain a dot)" 183 ) 184 os.system("cat " + filename + ".xls > " + filename + ".txt") 185 dataF = pandas.read_table( 186 filename + ".txt", sep="\t", keep_default_na=False, skiprows=44 187 ) # keep_default_na=False, 188 data = [] 189 for row in dataF.values: 190 data.append([]) 191 for x in row: 192 if x == "": 193 data[-1].append(float("nan")) 194 else: 195 data[-1].append(float(x)) 196 return np.array(data)
Reads xls file (passed as the string filename without extension!) produced by the UV-VIS spectrometer RESONANCE VS-7550-VUV.
This function currently only works in Linux (Ubuntu), since it uses the OS system command to brutally copy the content of the xls file into a txt; such copying is necessary because the xls file is not readable by pandas (it's a non-standard xls, I couldn't find any other workaround). For windows there is a similar command, please replace the line os.system("cat " + filename + ".xls > " + filename + ".txt")
44 (skipped) rows at the beginning of the file (all headers)
Arguments:
- filename: name of the file (without extension)
Returns:
numpy array with the spectral data (see original excel for more details)
199def calibrate( 200 data, 201 column: int, 202 calibration_file_path="./calibration_data/Reso_Spectrometer_CalibrationCorrection.npz", 203 dark=None, 204 dark_c=None, 205 stitch: bool = False, 206 smooth_points: int = 10, 207 null_calibration: bool = False, 208 wavelength_calibration_intercept: float = 3.538, 209 wavelength_calibration_slope: float = 1.003427, 210): 211 """For the UV spectrometer. Calibrate the spectrum number 'column' in the the 'data' array. Intensity calibration factor is loaded in the first lines of UVSpectrumAnalysis.py. 212 Notice: each saved spectrum has 7 columns in the excel file; however, the argument column refers to the index of the spectrum, not the actual column in the file. 213 214 Args: 215 data: data [numpy] array (see read_spectrometer_excel) 216 column: index of the spectrum to be calibrated 217 calibration_file_path: path of the calibration file 218 dark: dark spectrum (optional) 219 dark_c: column of the dark spectrum (optional) 220 stitch: Boolean, if True, the spectra are stitched together (optional, default False) 221 if True, the spectrum will be calculated by stitching the 5 partial spectra in the previous columns. 222 the stitching only works if the spectrum number 'column' is a FULL-SPECTRUM in the xls file. 223 Stitching was implemented because the internal software of the spectrometer is not good enough at it (the saved full spectrum typically has discontinuities). 224 smooth_points: number of points for smoothing (optional, default 10). if 0, no smoothing is applied 225 null_calibration: Boolean, if True, the intensity calibration is not applied (optional, default False) 226 wavelength_calibration_intercept: intercept of the wavelength calibration (optional, default 3.538). If None no wavelength calibration is applied 227 wavelength_calibration_slope: slope of the wavelength calibration (optional, default 1.003427). If None no wavelength calibration is applied 228 Wavelength calibration is in the form λ_true = λ_measured * slope + intercept 229 Notice: wavelength calibration has an error of +/- 1 to 2 nm due to the stitching process. 230 for a better calibration compute and apply the correction to each of the 5 partial spectra 231 232 Returns: 233 wavelength: wavelength array 234 spectrum: calibrated spectrum 235 """ 236 wavelength_calibration, lamp_spec, lamp_measbyReso, calibration_smoothed = ( 237 load_calibration_data(calibration_file_path) 238 ) 239 240 if wavelength_calibration_slope is None or wavelength_calibration_intercept is None: 241 cal_slope = 1.0 242 cal_intercept = 0.0 243 else: 244 cal_slope = wavelength_calibration_slope 245 cal_intercept = wavelength_calibration_intercept 246 247 if stitch: 248 # check that the chosen spectrum (in column) is actually a full one 249 theoretically_complete_wvl = ( 250 data[:, 7 * (column) + 1] * cal_slope + cal_intercept 251 ) 252 theoretically_complete_wvl = theoretically_complete_wvl[ 253 ~np.isnan(theoretically_complete_wvl) 254 ] 255 if theoretically_complete_wvl[-1] - theoretically_complete_wvl[0] < 900: 256 raise ValueError( 257 "in function calibrate(), the spectrum to be stitched is not a full spectrum, please check the column index" 258 ) 259 260 extrema = [] 261 spectra = [] 262 for i in range(5): 263 wvl_single_sp = ( 264 data[:, 7 * (column - 5 + i) + 1] * cal_slope + cal_intercept 265 ) 266 single_sp = data[:, 7 * (column - 5 + i) + 2] 267 if dark is not None and dark_c is not None: 268 single_sp -= dark[:, 7 * (dark_c - 5 + i) + 2] 269 wvl_single_sp = wvl_single_sp[~np.isnan(wvl_single_sp)] 270 single_sp = single_sp[~np.isnan(single_sp)] 271 extrema.append([wvl_single_sp[0], wvl_single_sp[-1]]) 272 spectra.append([wvl_single_sp, single_sp]) 273 274 overlaps = [] 275 centers_of_overlap = [extrema[0][0]] 276 for i in range(4): 277 overlaps.append(extrema[i][1] - extrema[i + 1][0]) 278 centers_of_overlap.append(extrema[i][1] / 2 + extrema[i + 1][0] / 2) 279 280 w = np.min(overlaps) 281 centers_of_overlap.append(extrema[4][1] + w / 2) 282 centers_of_overlap[0] = centers_of_overlap[0] - w / 2 283 284 wavelength = data[:, 7 * column + 1] * cal_slope + cal_intercept 285 spectrum = wavelength * 0.0 286 287 for i in range(5): 288 center = centers_of_overlap[i] / 2 + centers_of_overlap[i + 1] / 2 289 FWHM = centers_of_overlap[i + 1] - centers_of_overlap[i] 290 spectrum += np.interp( 291 wavelength, 292 spectra[i][0], 293 spectra[i][1] * tukey_window(spectra[i][0], center, FWHM, w), 294 ) 295 plt.plot( 296 spectra[i][0], 297 spectra[i][1] * tukey_window(spectra[i][0], center, FWHM, w), 298 ) 299 calibr_interpolated = np.interp( 300 wavelength, wavelength_calibration, calibration_smoothed 301 ) 302 if not null_calibration: 303 spectrum *= calibr_interpolated 304 305 plt.plot( 306 data[:, 7 * column + 1] * cal_slope + cal_intercept, data[:, 7 * column + 2] 307 ) 308 plt.plot(wavelength, spectrum) 309 310 if w < 0: 311 raise ValueError( 312 "in function calibrate(), the spectra to be stitched do not overlap" 313 ) 314 315 else: 316 wavelength = data[:, 7 * column + 1] * cal_slope + cal_intercept 317 if dark is None or dark_c is None: 318 calibr_interpolated = np.interp( 319 wavelength, wavelength_calibration, calibration_smoothed 320 ) 321 if null_calibration: 322 calibr_interpolated = calibr_interpolated * 0.0 + 1 323 spectrum = ( 324 data[:, 7 * column + 2] - np.min(data[:, 7 * column + 2]) 325 ) * calibr_interpolated 326 else: 327 calibr_interpolated = np.interp( 328 wavelength, wavelength_calibration, calibration_smoothed 329 ) 330 if null_calibration: 331 calibr_interpolated = calibr_interpolated * 0.0 + 1 332 spectrum = ( 333 data[:, 7 * column + 2] - dark[:, 7 * dark_c + 2] 334 ) * calibr_interpolated 335 336 if len(data[:, 7 * column + 2][~np.isnan(data[:, 7 * column + 2])]) != len( 337 dark[:, 7 * dark_c + 2][~np.isnan(dark[:, 7 * dark_c + 2])] 338 ): 339 raise ValueError( 340 "In function calibrate(), the dark spectrum is not the same length as the spectrum to be calibrated.\n" 341 "Please check the column indices of the dark spectrum and the main spectrum" 342 ) 343 if ( 344 data[:, 7 * column + 1][~np.isnan(data[:, 7 * column + 1])][0] 345 != dark[:, 7 * dark_c + 2][~np.isnan(dark[:, 7 * dark_c + 1])][0] 346 ): 347 print( 348 "WARNING: In function calibrate(), the dark spectrum does not have the same wavelength as the spectrum to be calibrated.\n" 349 "Please check the column indices of the dark spectrum and the main spectrum" 350 ) 351 352 if smooth_points is not None and smooth_points > 0: 353 spectrum = smooth(spectrum, smooth_points) 354 return wavelength, spectrum
For the UV spectrometer. Calibrate the spectrum number 'column' in the the 'data' array. Intensity calibration factor is loaded in the first lines of UVSpectrumAnalysis.py. Notice: each saved spectrum has 7 columns in the excel file; however, the argument column refers to the index of the spectrum, not the actual column in the file.
Arguments:
- data: data [numpy] array (see read_spectrometer_excel)
- column: index of the spectrum to be calibrated
- calibration_file_path: path of the calibration file
- dark: dark spectrum (optional)
- dark_c: column of the dark spectrum (optional)
- stitch: Boolean, if True, the spectra are stitched together (optional, default False) if True, the spectrum will be calculated by stitching the 5 partial spectra in the previous columns. the stitching only works if the spectrum number 'column' is a FULL-SPECTRUM in the xls file. Stitching was implemented because the internal software of the spectrometer is not good enough at it (the saved full spectrum typically has discontinuities).
- smooth_points: number of points for smoothing (optional, default 10). if 0, no smoothing is applied
- null_calibration: Boolean, if True, the intensity calibration is not applied (optional, default False)
- wavelength_calibration_intercept: intercept of the wavelength calibration (optional, default 3.538). If None no wavelength calibration is applied
- wavelength_calibration_slope: slope of the wavelength calibration (optional, default 1.003427). If None no wavelength calibration is applied Wavelength calibration is in the form λ_true = λ_measured * slope + intercept Notice: wavelength calibration has an error of +/- 1 to 2 nm due to the stitching process. for a better calibration compute and apply the correction to each of the 5 partial spectra
Returns:
wavelength: wavelength array spectrum: calibrated spectrum
361def plot_spectra_UVsp( 362 filenameList, 363 columnList, 364 pdfFilename, 365 legendItemList=None, 366 darkTupleList=None, 367 normalizationList=None, 368 color_gradient: bool = False, 369 additive_constant_log_intensity=20, 370 wavelength_range=None, 371 title=None, 372 invert_order: bool = False, 373 plotList=None, 374 do_calibrate: bool = True, 375 stitch: bool = False, 376 smooth_points: int = 10, 377 calibration_file_path="./calibration_data/Reso_Spectrometer_CalibrationCorrection.npz", 378): 379 """Plotting function for the UV spectrometer. 380 381 Notice: The excel file usually contains several spectra. 382 Notice: each saved spectrum has 7 columns in the excel file; please consider that the argument 'column' refers to the index of the spectrum, not the actual column in the file. 383 384 Args: 385 filenameList: input file list (without extension) 386 columnList: list of the positions in the file of the spectra to be plotted (e.g. [1, 0, 5] -> plot 1st sp. for 1st file, 5th spectrum for 3rd file, ...) 387 pdfFilename: output filename 388 darkTupleList: [("darkFilename1stspectrum", spectrum_number_in_1stDark_filename), ( ..,.. ), ...] 389 legendItem List: list of entries for the legend (optional, keyword) 390 normalizationList: list of (multiplicative) normalization factors for the spectra (optional, keyword) 391 color_gradient: Boolean, whether plot should be displayed with color gradient instead of in matplotlib default color cycle 392 additive_constant_log_intensity: additive constant to the spectral intensity, so that the logarithmic plot does not have values too close to 0 393 wavelength_range: [min lambda, max lambda] for plotting 394 title: title of the figure 395 plotList: list of booleans: this function will plot the entry filenameList[i] if plotList[i] = True, otherwise the i-th data will not be shown in the plot 396 invert_order: Boolean, if True, the order of the spectra will be inverted (i.e. the last spectrum in the list will be plotted first) 397 do_calibrate: Boolean, if True, the spectra will be calibrated (default True, see calibrate() function) 398 calibration_file_path: str, path to the calibration file (default "./calibration_data/Reso_Spectrometer_CalibrationCorrection.npz"). Ignored if do_calibrate = False 399 stitch: Boolean, if True, the spectra will be stitched together (optional, default False) 400 if True, the spectrum will be calculated by stitching the 5 partial spectra in the previous columns. 401 the stitching only works if the spectrum number 'column' is a FULL-SPECTRUM in the xls file. 402 Stitching was implemented because the internal software of the spectrometer does not do it properly (the saved full spectrum tipically has discontinuities). 403 Stitching here works only if do_calibrate = True 404 smooth_points: number of points for smoothing (optional, default 10) 405 406 DEPENDS ON read_spectrometer_excel() 407 DEPENDS ON calibrate() [optional] 408 """ 409 410 if len(columnList) != len(filenameList): 411 print( 412 "Error: in function plot_spectra_UVsp\ncolumnList has different size than filenameList" 413 ) 414 list_error() 415 if legendItemList is not None: 416 if len(legendItemList) != len(filenameList): 417 print( 418 "Error: in function plot_spectra_UVsp\nlegendItemList has different size than filenameList" 419 ) 420 list_error() 421 if darkTupleList is not None: 422 if len(darkTupleList) != len(filenameList): 423 print( 424 "Error: in function plot_spectra_UVsp\ndarkTupleList has different size than filenameList" 425 ) 426 list_error() 427 if normalizationList is not None: 428 if len(normalizationList) != len(filenameList): 429 print( 430 "Error: in function plot_spectra_UVsp\nnormalizationList has different size than filenameList" 431 ) 432 list_error() 433 if plotList is not None: 434 if len(plotList) != len(filenameList): 435 print( 436 "Error: in function plot_spectra_UVsp\nplotList has different size than filenameList" 437 ) 438 list_error() 439 440 if invert_order: 441 filenameList.reverse() 442 columnList.reverse() 443 if legendItemList is not None: 444 legendItemList.reverse() 445 if darkTupleList is not None: 446 darkTupleList.reverse() 447 if normalizationList is not None: 448 normalizationList.reverse() 449 if plotList is not None: 450 plotList.reverse() 451 452 if plotList is not None: 453 for i in reversed(range(len(plotList))): 454 if not plotList[i]: 455 if filenameList is not None: 456 del filenameList[i] 457 if columnList is not None: 458 del columnList[i] 459 if legendItemList is not None: 460 del legendItemList[i] 461 if darkTupleList is not None: 462 del darkTupleList[i] 463 if normalizationList is not None: 464 del normalizationList[i] 465 466 dataList = [] 467 for filename in filenameList: 468 data = read_spectrometer_excel(filename) 469 dataList.append(data) 470 471 # check if the dark spectrum is shorter or longer than the spectrum to be calibrated (if it has nan values) 472 # [when a = nan the statement (a == a) returns False] 473 if darkTupleList is not None: 474 for tp, data, column in zip(darkTupleList, dataList, columnList): 475 dark = read_spectrometer_excel(tp[0]) 476 for i in range(len(data[:, 7 * column + 2])): 477 if ( 478 dark[i, 7 * tp[1] + 2] != dark[i, 7 * tp[1] + 2] 479 and data[i, 7 * column + 2] == data[i, 7 * column + 2] 480 ): 481 print( 482 "Warning: in function plot_spectra_UVsp\ndark spectrum is shorter than the spectrum to be calibrated" 483 "\nplease check the column index of the dark spectrum" 484 ) 485 break 486 elif ( 487 data[i, 7 * column + 2] != data[i, 7 * column + 2] 488 and dark[i, 7 * tp[1] + 2] == dark[i, 7 * tp[1] + 2] 489 ): 490 print( 491 "Warning: in function plot_spectra_UVsp\ndark spectrum is longer than the spectrum to be calibrated" 492 "\nplease check the column indices of the dark spectrum and the main spectrum" 493 ) 494 break 495 496 # subtract dark (if any, and if it will not be subtracted during calibration) 497 if darkTupleList is not None and not do_calibrate: 498 for tp, data, column in zip(darkTupleList, dataList, columnList): 499 dark = read_spectrometer_excel(tp[0]) 500 for i in range(len(data[:, 7 * column + 2])): 501 data[i, 7 * column + 2] = ( 502 data[i, 7 * column + 2] - dark[i, 7 * tp[1] + 2] 503 ) 504 505 if do_calibrate: 506 for i in range(len(dataList)): 507 if darkTupleList is not None: 508 dark = read_spectrometer_excel(darkTupleList[i][0]) 509 ( 510 dataList[i][:, 7 * columnList[i] + 1], 511 dataList[i][:, 7 * columnList[i] + 2], 512 ) = calibrate( 513 dataList[i], 514 columnList[i], 515 calibration_file_path=calibration_file_path, 516 dark=dark, 517 dark_c=darkTupleList[i][1], 518 stitch=stitch, 519 smooth_points=smooth_points, 520 ) 521 else: 522 ( 523 dataList[i][:, 7 * columnList[i] + 1], 524 dataList[i][:, 7 * columnList[i] + 2], 525 ) = calibrate( 526 dataList[i], 527 columnList[i], 528 calibration_file_path=calibration_file_path, 529 calibration_dark=None, 530 dark_c=None, 531 stitch=stitch, 532 smooth_points=smooth_points, 533 ) 534 535 n_lines = len(dataList) 536 if color_gradient: 537 # Take colors at regular intervals spanning the colormap. 538 cmap = mpl.colormaps[ 539 "magma" 540 ] #'viridis', 'plasma', 'inferno', 'magma', 'cividis' 541 colors = cmap(np.linspace(0, 0.92, n_lines)) 542 # fig, ax = plt.subplots(layout='constrained') 543 else: 544 prop_cycle = plt.rcParams["axes.prop_cycle"] 545 colors = prop_cycle.by_key()["color"] 546 # colors = colors[:n_lines] 547 548 fig, ax = plt.subplots(2, 1, figsize=(6, 8)) 549 if title is not None: 550 fig.suptitle(title) 551 ax[0].xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator()) 552 ax[1].xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator()) 553 legendHandles = [] 554 if normalizationList is None: 555 normalizationList = [1.0 for i in dataList] 556 for data, column, normalization, color, i in zip( 557 dataList, columnList, normalizationList, colors, range(len(dataList)) 558 ): 559 (handle,) = ax[0].plot( 560 data[:, 7 * column + 1], 561 data[:, 7 * column + 2] * normalization, 562 color=color, 563 ) 564 legendHandles.append(handle) 565 if wavelength_range is not None: 566 ax[0].set_xlim(wavelength_range[0], wavelength_range[1]) 567 ax[0].set_xlabel("Wavelength (nm)") 568 ax[0].set_ylabel("Intensity") 569 ax[0].grid(linestyle=(0, (5, 10))) # offset, dash length, space length 570 if legendItemList is not None: 571 ax[0].legend(legendHandles, legendItemList) 572 573 for data, column, normalization, color, i in zip( 574 dataList, columnList, normalizationList, colors, range(len(dataList)) 575 ): 576 (handle,) = ax[1].semilogy( 577 data[:, 7 * column + 1], 578 data[:, 7 * column + 2] * normalization + additive_constant_log_intensity, 579 color=color, 580 ) 581 legendHandles.append(handle) 582 if wavelength_range is not None: 583 ax[1].set_xlim(wavelength_range[0], wavelength_range[1]) 584 ax[1].set_xlabel("Wavelength (nm)") 585 ax[1].set_ylabel("Intensity") 586 ax[1].grid(linestyle=(0, (5, 10))) 587 if legendItemList is not None: 588 ax[0].legend(legendHandles, legendItemList) 589 590 fig.savefig(pdfFilename) 591 return fig
Plotting function for the UV spectrometer.
Notice: The excel file usually contains several spectra. Notice: each saved spectrum has 7 columns in the excel file; please consider that the argument 'column' refers to the index of the spectrum, not the actual column in the file.
Arguments:
- filenameList: input file list (without extension)
- columnList: list of the positions in the file of the spectra to be plotted (e.g. [1, 0, 5] -> plot 1st sp. for 1st file, 5th spectrum for 3rd file, ...)
- pdfFilename: output filename
- darkTupleList: [("darkFilename1stspectrum", spectrum_number_in_1stDark_filename), ( ..,.. ), ...]
- legendItem List: list of entries for the legend (optional, keyword)
- normalizationList: list of (multiplicative) normalization factors for the spectra (optional, keyword)
- color_gradient: Boolean, whether plot should be displayed with color gradient instead of in matplotlib default color cycle
- additive_constant_log_intensity: additive constant to the spectral intensity, so that the logarithmic plot does not have values too close to 0
- wavelength_range: [min lambda, max lambda] for plotting
- title: title of the figure
- plotList: list of booleans: this function will plot the entry filenameList[i] if plotList[i] = True, otherwise the i-th data will not be shown in the plot
- invert_order: Boolean, if True, the order of the spectra will be inverted (i.e. the last spectrum in the list will be plotted first)
- do_calibrate: Boolean, if True, the spectra will be calibrated (default True, see calibrate() function)
- calibration_file_path: str, path to the calibration file (default "./calibration_data/Reso_Spectrometer_CalibrationCorrection.npz"). Ignored if do_calibrate = False
- stitch: Boolean, if True, the spectra will be stitched together (optional, default False) if True, the spectrum will be calculated by stitching the 5 partial spectra in the previous columns. the stitching only works if the spectrum number 'column' is a FULL-SPECTRUM in the xls file. Stitching was implemented because the internal software of the spectrometer does not do it properly (the saved full spectrum tipically has discontinuities). Stitching here works only if do_calibrate = True
- smooth_points: number of points for smoothing (optional, default 10)
DEPENDS ON read_spectrometer_excel() DEPENDS ON calibrate() [optional]
14def eliminate_outliers(y, threshold: float = 3, window_points: int = 20): 15 """Eliminates outliers in the data by replacing them with the mean of the surrounding values. 16 17 Args: 18 y (np.ndarray): Input data array. 19 threshold (float): Threshold in units of sigma for outlier detection. Default is 3. 20 window_points (int): Number of points to consider for the mean and sigma calculation. Default is 20. 21 """ 22 if not isinstance(y, np.ndarray): 23 raise TypeError("Input of eliminate_outliers must be a numpy array.") 24 n_outliers = 0 25 for i in range(len(y)): 26 window_lower = int(max(0, i - window_points / 2)) 27 window_upper = int(min(len(y), i + window_points / 2)) 28 mean = np.mean(y[window_lower:window_upper]) 29 std = np.std(y[window_lower:window_upper]) 30 if abs(y[i] - mean) > threshold * std: 31 y[i] = np.mean(np.concatenate((y[window_lower:i], y[i + 1 : window_upper]))) 32 n_outliers += 1 33 34 print( 35 n_outliers, " outliers detected and replaced with mean of surrounding values." 36 )
Eliminates outliers in the data by replacing them with the mean of the surrounding values.
Arguments:
- y (np.ndarray): Input data array.
- threshold (float): Threshold in units of sigma for outlier detection. Default is 3.
- window_points (int): Number of points to consider for the mean and sigma calculation. Default is 20.
39def read_spectrum_maya( 40 filename, 41 remove_offsets_individually=False, 42 nm_smearing=1.0, 43 eliminate_outliers_spectrum=False, 44): 45 """Reads a spectrum file from the Maya spectrometer and returns the wavelength and spectrum arrays. 46 47 The file is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]. 48 The function also applies a Gaussian filter to the spectrum data to smooth it. 49 The function can optionally eliminate outliers in the spectrum data and remove offsets from each column individually. 50 51 Args: 52 filename 53 remove_offsets_individually (bool): If True, compute and remove offsets from each column individually 54 nm_smearing (float): sigma in nm of the gaussian filter applied to smooth the spectrum. 55 eliminate_outliers_spectrum (bool): = If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. 56 default is False. 57 """ 58 data = pandas.read_table(filename, sep=" ", keep_default_na=True, skiprows=0) 59 spectrum = [] 60 offsets = [0.0] * len(data.columns) 61 fig, ax = plt.subplots() 62 wvl = np.array(data.iloc[:, 0]) 63 final_nm_smearing = nm_smearing 64 if eliminate_outliers_spectrum: 65 nm_smearing = 0 66 for i in range(len(data.columns) - 1): 67 if nm_smearing > 0: 68 spectrum_i = gaussian_filter1d( 69 np.array(data.iloc[:, i + 1]), sigma=nm_smearing / (wvl[1] - wvl[0]) 70 ) 71 else: 72 spectrum_i = np.array(data.iloc[:, i + 1]) 73 if eliminate_outliers_spectrum: 74 eliminate_outliers(spectrum_i, threshold=3, window_points=20) 75 spectrum_i = gaussian_filter1d( 76 spectrum_i, sigma=final_nm_smearing / (wvl[1] - wvl[0]) 77 ) 78 offsets[i] = spectrum_i.min() 79 if remove_offsets_individually: 80 spectrum.append(spectrum_i - offsets[i]) 81 ax.plot(wvl, spectrum_i - offsets[i], label=f"Channel {i + 1}") 82 else: 83 spectrum.append(spectrum_i) 84 ax.plot(wvl, spectrum_i, label=f"Channel {i + 1}") 85 86 spectrum = np.mean(np.array(spectrum), axis=0) 87 ax.plot(wvl, spectrum, label="Mean spectrum") 88 89 return wvl, np.maximum(np.zeros(len(spectrum)), spectrum)
Reads a spectrum file from the Maya spectrometer and returns the wavelength and spectrum arrays.
The file is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]. The function also applies a Gaussian filter to the spectrum data to smooth it. The function can optionally eliminate outliers in the spectrum data and remove offsets from each column individually.
Arguments:
- filename
- remove_offsets_individually (bool): If True, compute and remove offsets from each column individually
- nm_smearing (float): sigma in nm of the gaussian filter applied to smooth the spectrum.
- eliminate_outliers_spectrum (bool): = If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. default is False.
92def read_spectrum_ocean_optics(filename): 93 """Reads a spectrum file from the Ocean Optics spectrometer and returns the wavelength and spectrum arrays. 94 The file is expected to have one column of wavelength data and one column of spectrum data. 95 wavelengths are in nm. 96 14 lines of header are skipped.""" 97 data = pandas.read_table(filename, sep="\t", keep_default_na=True, skiprows=14) 98 wvl = np.array(data.iloc[:, 0]) 99 spectrum = np.array(data.iloc[:, 1]) 100 return wvl, spectrum
Reads a spectrum file from the Ocean Optics spectrometer and returns the wavelength and spectrum arrays. The file is expected to have one column of wavelength data and one column of spectrum data. wavelengths are in nm. 14 lines of header are skipped.
103def asymmetric_tukey_f( 104 x: float, edge1: float, edge2: float, edge1_width: float, edge2_width: float 105): 106 if edge1_width < 0 or edge2_width < 0: 107 edge1_width = 0 108 edge2_width = 0 109 print("Warning: negative tukey edge width; rectangular window computed") 110 if abs(edge1_width) + abs(edge2_width) > 2 * abs(edge2 - edge1): 111 raise Exception("Error: tukey edge width larger than admissible") 112 if edge2 < edge1: 113 e1 = edge2 114 e2 = edge1 115 w1 = edge2_width 116 w2 = edge1_width 117 elif edge1 < edge2: 118 e1 = edge1 119 e2 = edge2 120 w1 = edge1_width 121 w2 = edge2_width 122 xmin = e1 - w1 / 2 123 xmax = e2 + w2 / 2 124 if x < xmin or x > xmax: 125 y = 0 126 elif xmin <= x and x < xmin + w1: 127 y = (1 - np.cos((x - xmin) * np.pi / w1)) / 2 128 elif xmin + w1 <= x and x <= xmax - w2: 129 y = 1 130 elif xmax - w2 < x and x <= xmax: 131 y = (1 - np.cos((x - xmax) * np.pi / w2)) / 2 132 else: 133 print("x, xmax, xmin, w1, w2: ", x, xmax, xmin, w1, w2) 134 raise ValueError( 135 "in tukey_f, x could not be assigned correctly to the sub intervals, might it (or the other parameters) be NaN?" 136 ) 137 return y
140def asymmetric_tukey_window( 141 x, edge1: float, edge2: float, edge1_width: float, edge2_width: float 142): 143 if isinstance(x, np.ndarray): 144 y = [] 145 for xi in x: 146 y.append(asymmetric_tukey_f(xi, edge1, edge2, edge1_width, edge2_width)) 147 y = np.array(y) 148 elif isinstance(x, list): 149 y = [] 150 for xi in x: 151 y.append(asymmetric_tukey_f(xi, edge1, edge2, edge1_width, edge2_width)) 152 elif isinstance(x, float): 153 y = asymmetric_tukey_f(x, edge1, edge2, edge1_width, edge2_width) 154 else: 155 raise TypeError( 156 "in function tukey window x is neither np array nor list nor float" 157 ) 158 return y
161class SpectrumHandler: 162 """Class to perform standard operations on spectra (calibration, plotting, division, multiplication, etc.)""" 163 164 def __init__( 165 self, 166 filename: str = None, 167 wavelengths=None, 168 spectrum=None, 169 remove_offsets_individually: bool = False, 170 nm_smearing=1.0, 171 eliminate_outliers_spectrum=False, 172 filetype="MayaScarab", 173 ): 174 """Provide either a filename or the wavelengths and spectrum arrays to the constructor. 175 176 Args: 177 filename (str): Path to the file containing the spectrum data. 178 wavelengths and spectrum (np.ndarray): = Wavelengths and spectrum data arrays. (alternative to filename) 179 remove_offsets_individually (bool): = If True, compute and remove offsets from each column individually 180 (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]). 181 nm_smearing (float): Smearing in nm to be applied to the spectrum. 182 eliminate_outliers_spectrum (bool): If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. 183 filetype (str): Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'. 184 185 """ 186 self.nm_smearing = nm_smearing 187 self.wvl, self.spectrum = np.linspace(200, 1000, 800), np.zeros(800) 188 self.calibration_lamp_wvl, self.calibration_lamp_spectrum = None, None 189 self.calibration_factor = None 190 191 if filetype == "MayaScarab" and filename is not None: 192 self.wvl, self.spectrum = read_spectrum_maya( 193 filename, 194 remove_offsets_individually, 195 nm_smearing, 196 eliminate_outliers_spectrum, 197 ) 198 elif filetype == "OceanOptics" and filename is not None: 199 self.wvl, self.spectrum = read_spectrum_ocean_optics(filename) 200 elif wavelengths is not None and spectrum is not None: 201 self.wvl = wavelengths 202 self.spectrum = spectrum 203 self.check_wvl_ascending() 204 205 def get_spectrum(self): 206 return self.wvl, self.spectrum 207 208 def get_calibration_factor(self): 209 if self.calibration_factor is None: 210 print( 211 "Warning: Calibration factor not available. Computing it from given spectra..." 212 ) 213 self.compute_calibration_factor_spectrometer() 214 return self.wvl, self.calibration_factor 215 216 def calibrate(self): 217 """Applies the calibration factor to the spectrum and sets the calibration factor to None.""" 218 if self.calibration_factor is None: 219 print( 220 "Warning: Calibration factor not available. Try load_calibration_factor_from_file(). Returning original spectrum." 221 ) 222 else: 223 self.spectrum = self.spectrum * self.calibration_factor 224 self.calibration_factor = None 225 print( 226 "Calibration factor applied to the spectrum. Calibration factor is now set to None." 227 ) 228 229 def save_to_file(self, filename): 230 if self.wvl is None or self.spectrum is None: 231 raise ValueError("Spectrum not loaded.") 232 data = pandas.DataFrame({"Wavelength": self.wvl, "Spectrum": self.spectrum}) 233 data.to_csv(filename, index=False, sep="\t") 234 235 def save_calibration_factor_to_file(self, filename): 236 """Saves the calibration factor to a file. 237 238 The file can be either a .txt, .csv or .npz file. 239 The file will contain the wavelength, calibration factor, calibration lamp true spectrum and measured lamp spectrum. 240 """ 241 242 if self.calibration_factor is None: 243 raise ValueError("Calibration factor not computed.") 244 if ".txt" in filename or ".csv" in filename: 245 data = pandas.DataFrame( 246 { 247 "Wavelength": self.wvl, 248 "Calibration Factor": self.calibration_factor, 249 "Lamp Spectrum": self.calibration_lamp_spectrum, 250 "Measured Spectrum": self.spectrum, 251 } 252 ) 253 data.to_csv(filename, index=False, sep="\t") 254 elif ".npz" in filename: 255 np.savez( 256 filename, 257 wavelength=self.wvl, 258 corr_factor_smoothed=self.calibration_factor, 259 lamp_ref=self.calibration_lamp_spectrum, 260 lamp_measured=self.spectrum, 261 ) 262 else: 263 raise ValueError("File type not supported. Use .txt, .csv or .npz.") 264 265 def load_calibration_factor_from_file(self, filename): 266 data = pandas.read_table(filename, sep="\t", keep_default_na=True, skiprows=0) 267 self.calibration_factor = np.interp( 268 self.wvl, np.array(data.iloc[:, 0]), np.array(data.iloc[:, 1]) 269 ) 270 271 def check_wvl_ascending(self): 272 if self.wvl is not None and np.any(np.diff(self.wvl)) < 0: 273 if np.all(np.diff(self.wvl) < 0): 274 self.wvl = np.flip(self.wvl) 275 self.spectrum = np.flip(self.spectrum) 276 else: 277 raise ValueError( 278 "Wavelength array must be either in ascending or descending order." 279 ) 280 # same for calib spectrum 281 if ( 282 self.calibration_lamp_wvl is not None 283 and np.any(np.diff(self.calibration_lamp_wvl)) < 0 284 ): 285 if np.all(np.diff(self.calibration_lamp_wvl) < 0): 286 self.calibration_lamp_wvl = np.flip(self.calibration_lamp_wvl) 287 self.calibration_lamp_spectrum = np.flip(self.calibration_lamp_spectrum) 288 else: 289 raise ValueError( 290 "Wavelength array must be either in ascending or descending order." 291 ) 292 return True 293 294 def load_calibration_lamp_data( 295 self, filename="./calibration_data/7315273LS-Deuterium-Halogen_CC-VISNIR.lmp" 296 ): 297 Data = pandas.read_table(filename) 298 self.calibration_lamp_wvl, self.calibration_lamp_spectrum = ( 299 np.array(Data.iloc[:, 0]), 300 np.array(Data.iloc[:, 1]), 301 ) 302 self.calibration_lamp_spectrum = gaussian_filter1d( 303 self.calibration_lamp_spectrum, 304 self.nm_smearing 305 / (self.calibration_lamp_wvl[1] - self.calibration_lamp_wvl[0]), 306 ) 307 self.check_wvl_ascending() 308 309 def plot_calibration_data( 310 self, 311 low_lim=None, 312 up_lim=None, 313 low_lim_y=None, 314 up_lim_y=None, 315 wavelength_ROI: list = [420, 800], 316 ): 317 """Plot the calibration data of the lamp, the measured lamp spectrum, and the calibration factor. 318 319 The method assumes that the true calibration lamp data and the measured spectrum of the calibration lamp are loaded in the object 320 The normalization of the three curves is arbitrary. By default the region between 420 and 800 nm is used for normalization. 321 322 Args: 323 low_lim, up_lim(float): Wavelength limits for the x-axis. 324 low_lim_y, up_lim_y (float): Intensity limits for the y-axis. 325 wavelength_ROI (list): [wvl1, wvl2] = Wavelength range for the normalization of the calibration factor (for display purposes). 326 Default is [420, 800] nm. 327 """ 328 if low_lim is None or up_lim is None: 329 low_lim = np.min(self.wvl) 330 up_lim = np.max(self.wvl) 331 if low_lim_y is None or up_lim_y is None: 332 low_lim_y = 0 333 up_lim_y = 3 334 fig, ax = plt.subplots() 335 ax.plot( 336 self.wvl, 337 self.spectrum / np.max(self.spectrum), 338 label="spectrometer reading", 339 ) 340 multipl_factor_lamp_plot = ( 341 self.spectrum[ 342 (self.wvl > wavelength_ROI[0]) & (self.wvl < wavelength_ROI[1]) 343 ].mean() 344 / self.calibration_lamp_spectrum[ 345 (self.calibration_lamp_wvl > wavelength_ROI[0]) 346 & (self.calibration_lamp_wvl < wavelength_ROI[1]) 347 ].mean() 348 ) 349 350 ax.plot( 351 self.calibration_lamp_wvl, 352 self.calibration_lamp_spectrum 353 / np.max(self.spectrum) 354 * multipl_factor_lamp_plot, 355 label="actual lamp spectrum", 356 ) 357 if self.calibration_factor is not None: 358 ax.plot(self.wvl, self.calibration_factor, label="calibration factor") 359 ax.set_xlabel("Wavelength (nm)") 360 ax.set_ylabel("Intensity (Arb. unit)") 361 ax.legend(loc="upper right") 362 ax.set_xlim(low_lim, up_lim) 363 ax.set_ylim(low_lim_y, up_lim_y) 364 return fig 365 366 def calibrate_wavelength_axis(self, intercept: float, slope: float): 367 """Calibrate the wavelength axis of the spectrum using the given coefficient of a linear fit. 368 369 Form of the calibration: true_wavelength = intercept + slope * measured_wavelength 370 The function modifies the wavelength axis in place. 371 372 Args: 373 intercept: float, 374 slope: float 375 """ 376 if self.wvl is None or self.spectrum is None: 377 raise ValueError("Spectrum not loaded.") 378 self.wvl = intercept + slope * self.wvl 379 self.check_wvl_ascending() 380 381 def tukey_filter( 382 self, edge1: float, edge2: float, edge1_width: float, edge2_width: float 383 ): 384 """Applies a Tukey window to the spectrum (e.g. to remove noise at the edges). 385 386 Tukey window is defined as: 387 * cosine-shaped window between edge1-edge1_width/2 and edge1+edge1_width/2 388 * 1 between edge1+edge1_width/2 and edge2-edge2_width/2 389 * cosine-shaped window between edge2-edge2_width/2 and edge2+edge2_width/2 390 * 0 everywhere else 391 In this case, tukey window is computed in the wavelength domain. 392 393 Args: 394 edge1, edge2 (float): Wavelength limits for the Tukey window (edge2-edge1 = FWHM, in nm) 395 edge1_width, edge2_width (float): Width of the cosine-shaped edge of the tukey window (in nm). 396 """ 397 398 window = asymmetric_tukey_window( 399 self.wvl, edge1, edge2, edge1_width, edge2_width 400 ) 401 self.spectrum = self.spectrum * window 402 if self.calibration_factor is not None: 403 self.calibration_factor = self.calibration_factor * window 404 if ( 405 self.calibration_lamp_wvl is not None 406 and self.calibration_lamp_spectrum is not None 407 ): 408 window = asymmetric_tukey_window( 409 self.calibration_lamp_wvl, edge1, edge2, edge1_width, edge2_width 410 ) 411 self.calibration_lamp_spectrum = self.calibration_lamp_spectrum * window 412 413 def divide_by(self, spectrumObject, nm_smearing=0.0): 414 """normalizes the current spectrum with another spectrum, and stores the result in the present object. 415 416 Args: 417 spectrumObject (either 'c'/'calib'/'calibration'/'lamp' or SpectrumHandler object): 418 the current spectrum is divided at each wavelength by the loaded calibration lamp data or by the spectrum passed as SpectrumHandler object. 419 nm_smearing (float): sigma in nm for the gaussian smoothing 420 """ 421 if ( 422 spectrumObject == "c" 423 or spectrumObject == "calib" 424 or spectrumObject == "calibration" 425 or spectrumObject == "lamp" 426 or spectrumObject == "calibration data" 427 ): 428 if ( 429 self.calibration_lamp_wvl is None 430 or self.calibration_lamp_spectrum is None 431 ): 432 raise ValueError("Calibration lamp data not loaded.") 433 wvl_divisor = self.calibration_lamp_wvl 434 spectrum_divisor = self.calibration_lamp_spectrum 435 elif not isinstance(spectrumObject, SpectrumHandler): 436 raise TypeError("Input of divide_by must be a SpectrumHandler object.") 437 else: 438 wvl_divisor, spectrum_divisor = spectrumObject.get_spectrum() 439 440 if nm_smearing > 0: 441 spectrum_divisor = gaussian_filter1d( 442 spectrum_divisor, sigma=nm_smearing / (wvl_divisor[1] - wvl_divisor[0]) 443 ) 444 self.spectrum = gaussian_filter1d( 445 self.spectrum, sigma=nm_smearing / (self.wvl[1] - self.wvl[0]) 446 ) 447 commonWvl = self.wvl[ 448 (self.wvl > np.min(wvl_divisor)) & (self.wvl < np.max(wvl_divisor)) 449 ] 450 interpolated_spd = np.interp(commonWvl, wvl_divisor, spectrum_divisor) 451 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 452 self.wvl = commonWvl 453 self.spectrum = interpolated_original / interpolated_spd 454 455 def add(self, spectrumObject): 456 """adds two spectra and stores the result in the present object 457 458 Args: 459 spectrumObject (SpectrumHandler): spectrum to be added 460 """ 461 if not isinstance(spectrumObject, SpectrumHandler): 462 raise TypeError("Input of add must be a SpectrumHandler object.") 463 wvl_add, spectrum_add = spectrumObject.get_spectrum() 464 commonWvl = self.wvl[ 465 (self.wvl > np.min(wvl_add)) & (self.wvl < np.max(wvl_add)) 466 ] 467 interpolated_spd = np.interp(commonWvl, wvl_add, spectrum_add) 468 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 469 self.wvl = commonWvl 470 self.spectrum = interpolated_original + interpolated_spd 471 472 def multiply(self, spectrumObject): 473 """multiplies the current spectrum by a second one at each wavelength. 474 475 Args: 476 spectrumObject (SpectrumHandler) 477 """ 478 if not isinstance(spectrumObject, SpectrumHandler): 479 raise TypeError("Input of multiply must be a SpectrumHandler object.") 480 wvl_mult, spectrum_mult = spectrumObject.get_spectrum() 481 commonWvl = self.wvl[ 482 (self.wvl > np.min(wvl_mult)) & (self.wvl < np.max(wvl_mult)) 483 ] 484 interpolated_spd = np.interp(commonWvl, wvl_mult, spectrum_mult) 485 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 486 self.wvl = commonWvl 487 self.spectrum = interpolated_original * interpolated_spd 488 489 def multiply_scalar(self, scalar): 490 """multiplies the current spectrum by a constant. 491 492 Args: 493 scalar (float) 494 """ 495 self.spectrum = self.spectrum * scalar 496 497 def add_scalar(self, scalar): 498 """adds a constant offset. 499 500 Args: 501 scalar (float) 502 """ 503 self.spectrum = self.spectrum + scalar 504 505 def compute_calibration_factor_spectrometer( 506 self, 507 transmission_additional_optics=None, 508 smoothing="poly", 509 extend_calibration: bool = False, 510 wavelength_ROI: list = [420, 800], 511 ): 512 """Computes the calibration factor. 513 514 It assumes that in the SpectrumHandler object: 515 * the measured spectrum of the calibration lamp is loaded as main spectrum 516 * the true tabulated spectrum of the calibration lamp is loaded as self.calibration_lamp_spectrum [via load_calibration_lamp_data()] 517 518 Args: 519 transmission_additional_optics: list of SpectrumHandler objects = list of transmission spectra of additional optics to be taken into account when computing the calibration factor. 520 The transmission ['reflection' for mirrors] spectra of additional optics will be included (multiplied) in the calibration factor 521 Additional optics are assumed to be in the beam path of any standard measurement (e.g. integrating sphere) BUT NOT PRESENT during the calibration lamp measurement. 522 smoothing: Type of smoothing to be applied to the calibration factor. Options are 'poly' (default) None or an integer number. If 'poly', a polynomial fit is applied to the calibration factor. 523 If None, no smoothing is applied. If an integer number, a box filter of that size is applied to the calibration factor. 524 extend_calibration (bool): If True, the calibration factor is extended to the full wavelength range of the spectrometer (the first and last values of the calibration curve are used for the extension). 525 This is a quick and dirty solution to deal with the fact that the calibration lamp spectrum is not available for the full wavelength range of the spectrometer. 526 wavelength_ROI (list): [wvl1, wvl2] Wavelength range for the normalization of the calibration factor (for display purposes). 527 Default is [420, 800] nm.""" 528 if self.calibration_lamp_wvl is None or self.calibration_lamp_spectrum is None: 529 raise ValueError("Calibration lamp data not loaded.") 530 wvl_stored, spectrum_stored = self.wvl, self.spectrum 531 self.divide_by("calibration data") 532 533 eliminate_outliers(self.spectrum, threshold=3, window_points=20) 534 535 if transmission_additional_optics is not None: 536 for sao in transmission_additional_optics: 537 if not isinstance(sao, SpectrumHandler): 538 raise TypeError( 539 "Input of spectra_additional_optics must be a list of SpectrumHandler objects." 540 ) 541 wvl_sao, spectrum_sao = sao.get_spectrum() 542 if np.any(np.diff(wvl_sao)) < 0: 543 raise ValueError( 544 "Wavelength array of additional optics spectrum must be sorted in ascending order." 545 ) 546 wvl_sao = np.concatenate( 547 ( 548 np.array([0]), 549 wvl_sao, 550 np.array([max(np.max(self.wvl), np.max(wvl_sao))]), 551 ) 552 ) 553 spectrum_sao = np.concatenate( 554 ( 555 np.array([spectrum_sao[0]]), 556 spectrum_sao, 557 np.array([spectrum_sao[-1]]), 558 ) 559 ) 560 self.spectrum = self.spectrum * np.interp( 561 self.wvl, wvl_sao, spectrum_sao 562 ) 563 564 self.calibration_factor = self.spectrum 565 if smoothing is None: 566 pass 567 elif smoothing == "poly": 568 smoothed_cf = np.poly1d(np.polyfit(self.wvl, self.calibration_factor, 50)) 569 self.calibration_factor = smoothed_cf(self.wvl) 570 else: 571 if not isinstance(smoothing, int): 572 raise TypeError( 573 "Input of smoothing must be 'poly', None or an integer." 574 ) 575 if smoothing < 1: 576 raise ValueError("Input of smoothing must be a positive integer.") 577 self.calibration_factor = box_smooth(self.calibration_factor, smoothing) 578 579 for i in range(len(self.wvl)): 580 if self.calibration_factor[i] < 0.01: 581 self.calibration_factor[i] = 0.01 582 self.calibration_factor = 1 / self.calibration_factor 583 self.calibration_factor = self.calibration_factor / np.mean( 584 self.calibration_factor[ 585 (self.wvl > wavelength_ROI[0]) & (self.wvl < wavelength_ROI[1]) 586 ] 587 ) 588 589 if extend_calibration: 590 # Extend the calibration factor to the full wavelength range of the spectrometer (pick the first and last values of the calibration curve for the extension) 591 self.wvl = np.concatenate( 592 ( 593 np.linspace(0, self.wvl[0], 100), 594 self.wvl, 595 np.linspace(self.wvl[-1], self.wvl[-1], 100), 596 ) 597 ) 598 self.calibration_factor = np.concatenate( 599 ( 600 np.ones(100) * self.calibration_factor[0], 601 self.calibration_factor, 602 np.ones(100) * self.calibration_factor[-1], 603 ) 604 ) 605 self.calibration_factor = np.interp( 606 wvl_stored, self.wvl, self.calibration_factor 607 ) 608 self.wvl = wvl_stored 609 self.spectrum = spectrum_stored 610 else: 611 self.spectrum = spectrum_stored[ 612 (wvl_stored > np.min(self.calibration_lamp_wvl)) 613 & (wvl_stored < np.max(self.calibration_lamp_wvl)) 614 ] 615 616 def plot_spectrum(self, low_lim=None, up_lim=None): 617 fig, ax = plt.subplots() 618 if low_lim is not None and up_lim is not None: 619 ax.plot( 620 self.wvl[(self.wvl > low_lim) & (self.wvl < up_lim)], 621 self.spectrum[(self.wvl > low_lim) & (self.wvl < up_lim)], 622 ) 623 else: 624 ax.plot(self.wvl, self.spectrum) 625 ax.set_xlabel("Wavelength (nm)") 626 ax.set_ylabel("Intensity (Arb. unit)") 627 return fig
Class to perform standard operations on spectra (calibration, plotting, division, multiplication, etc.)
164 def __init__( 165 self, 166 filename: str = None, 167 wavelengths=None, 168 spectrum=None, 169 remove_offsets_individually: bool = False, 170 nm_smearing=1.0, 171 eliminate_outliers_spectrum=False, 172 filetype="MayaScarab", 173 ): 174 """Provide either a filename or the wavelengths and spectrum arrays to the constructor. 175 176 Args: 177 filename (str): Path to the file containing the spectrum data. 178 wavelengths and spectrum (np.ndarray): = Wavelengths and spectrum data arrays. (alternative to filename) 179 remove_offsets_individually (bool): = If True, compute and remove offsets from each column individually 180 (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]). 181 nm_smearing (float): Smearing in nm to be applied to the spectrum. 182 eliminate_outliers_spectrum (bool): If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. 183 filetype (str): Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'. 184 185 """ 186 self.nm_smearing = nm_smearing 187 self.wvl, self.spectrum = np.linspace(200, 1000, 800), np.zeros(800) 188 self.calibration_lamp_wvl, self.calibration_lamp_spectrum = None, None 189 self.calibration_factor = None 190 191 if filetype == "MayaScarab" and filename is not None: 192 self.wvl, self.spectrum = read_spectrum_maya( 193 filename, 194 remove_offsets_individually, 195 nm_smearing, 196 eliminate_outliers_spectrum, 197 ) 198 elif filetype == "OceanOptics" and filename is not None: 199 self.wvl, self.spectrum = read_spectrum_ocean_optics(filename) 200 elif wavelengths is not None and spectrum is not None: 201 self.wvl = wavelengths 202 self.spectrum = spectrum 203 self.check_wvl_ascending()
Provide either a filename or the wavelengths and spectrum arrays to the constructor.
Arguments:
- filename (str): Path to the file containing the spectrum data.
- wavelengths and spectrum (np.ndarray): = Wavelengths and spectrum data arrays. (alternative to filename)
- remove_offsets_individually (bool): = If True, compute and remove offsets from each column individually (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]).
- nm_smearing (float): Smearing in nm to be applied to the spectrum.
- eliminate_outliers_spectrum (bool): If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values.
- filetype (str): Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'.
216 def calibrate(self): 217 """Applies the calibration factor to the spectrum and sets the calibration factor to None.""" 218 if self.calibration_factor is None: 219 print( 220 "Warning: Calibration factor not available. Try load_calibration_factor_from_file(). Returning original spectrum." 221 ) 222 else: 223 self.spectrum = self.spectrum * self.calibration_factor 224 self.calibration_factor = None 225 print( 226 "Calibration factor applied to the spectrum. Calibration factor is now set to None." 227 )
Applies the calibration factor to the spectrum and sets the calibration factor to None.
235 def save_calibration_factor_to_file(self, filename): 236 """Saves the calibration factor to a file. 237 238 The file can be either a .txt, .csv or .npz file. 239 The file will contain the wavelength, calibration factor, calibration lamp true spectrum and measured lamp spectrum. 240 """ 241 242 if self.calibration_factor is None: 243 raise ValueError("Calibration factor not computed.") 244 if ".txt" in filename or ".csv" in filename: 245 data = pandas.DataFrame( 246 { 247 "Wavelength": self.wvl, 248 "Calibration Factor": self.calibration_factor, 249 "Lamp Spectrum": self.calibration_lamp_spectrum, 250 "Measured Spectrum": self.spectrum, 251 } 252 ) 253 data.to_csv(filename, index=False, sep="\t") 254 elif ".npz" in filename: 255 np.savez( 256 filename, 257 wavelength=self.wvl, 258 corr_factor_smoothed=self.calibration_factor, 259 lamp_ref=self.calibration_lamp_spectrum, 260 lamp_measured=self.spectrum, 261 ) 262 else: 263 raise ValueError("File type not supported. Use .txt, .csv or .npz.")
Saves the calibration factor to a file.
The file can be either a .txt, .csv or .npz file. The file will contain the wavelength, calibration factor, calibration lamp true spectrum and measured lamp spectrum.
271 def check_wvl_ascending(self): 272 if self.wvl is not None and np.any(np.diff(self.wvl)) < 0: 273 if np.all(np.diff(self.wvl) < 0): 274 self.wvl = np.flip(self.wvl) 275 self.spectrum = np.flip(self.spectrum) 276 else: 277 raise ValueError( 278 "Wavelength array must be either in ascending or descending order." 279 ) 280 # same for calib spectrum 281 if ( 282 self.calibration_lamp_wvl is not None 283 and np.any(np.diff(self.calibration_lamp_wvl)) < 0 284 ): 285 if np.all(np.diff(self.calibration_lamp_wvl) < 0): 286 self.calibration_lamp_wvl = np.flip(self.calibration_lamp_wvl) 287 self.calibration_lamp_spectrum = np.flip(self.calibration_lamp_spectrum) 288 else: 289 raise ValueError( 290 "Wavelength array must be either in ascending or descending order." 291 ) 292 return True
294 def load_calibration_lamp_data( 295 self, filename="./calibration_data/7315273LS-Deuterium-Halogen_CC-VISNIR.lmp" 296 ): 297 Data = pandas.read_table(filename) 298 self.calibration_lamp_wvl, self.calibration_lamp_spectrum = ( 299 np.array(Data.iloc[:, 0]), 300 np.array(Data.iloc[:, 1]), 301 ) 302 self.calibration_lamp_spectrum = gaussian_filter1d( 303 self.calibration_lamp_spectrum, 304 self.nm_smearing 305 / (self.calibration_lamp_wvl[1] - self.calibration_lamp_wvl[0]), 306 ) 307 self.check_wvl_ascending()
309 def plot_calibration_data( 310 self, 311 low_lim=None, 312 up_lim=None, 313 low_lim_y=None, 314 up_lim_y=None, 315 wavelength_ROI: list = [420, 800], 316 ): 317 """Plot the calibration data of the lamp, the measured lamp spectrum, and the calibration factor. 318 319 The method assumes that the true calibration lamp data and the measured spectrum of the calibration lamp are loaded in the object 320 The normalization of the three curves is arbitrary. By default the region between 420 and 800 nm is used for normalization. 321 322 Args: 323 low_lim, up_lim(float): Wavelength limits for the x-axis. 324 low_lim_y, up_lim_y (float): Intensity limits for the y-axis. 325 wavelength_ROI (list): [wvl1, wvl2] = Wavelength range for the normalization of the calibration factor (for display purposes). 326 Default is [420, 800] nm. 327 """ 328 if low_lim is None or up_lim is None: 329 low_lim = np.min(self.wvl) 330 up_lim = np.max(self.wvl) 331 if low_lim_y is None or up_lim_y is None: 332 low_lim_y = 0 333 up_lim_y = 3 334 fig, ax = plt.subplots() 335 ax.plot( 336 self.wvl, 337 self.spectrum / np.max(self.spectrum), 338 label="spectrometer reading", 339 ) 340 multipl_factor_lamp_plot = ( 341 self.spectrum[ 342 (self.wvl > wavelength_ROI[0]) & (self.wvl < wavelength_ROI[1]) 343 ].mean() 344 / self.calibration_lamp_spectrum[ 345 (self.calibration_lamp_wvl > wavelength_ROI[0]) 346 & (self.calibration_lamp_wvl < wavelength_ROI[1]) 347 ].mean() 348 ) 349 350 ax.plot( 351 self.calibration_lamp_wvl, 352 self.calibration_lamp_spectrum 353 / np.max(self.spectrum) 354 * multipl_factor_lamp_plot, 355 label="actual lamp spectrum", 356 ) 357 if self.calibration_factor is not None: 358 ax.plot(self.wvl, self.calibration_factor, label="calibration factor") 359 ax.set_xlabel("Wavelength (nm)") 360 ax.set_ylabel("Intensity (Arb. unit)") 361 ax.legend(loc="upper right") 362 ax.set_xlim(low_lim, up_lim) 363 ax.set_ylim(low_lim_y, up_lim_y) 364 return fig
Plot the calibration data of the lamp, the measured lamp spectrum, and the calibration factor.
The method assumes that the true calibration lamp data and the measured spectrum of the calibration lamp are loaded in the object The normalization of the three curves is arbitrary. By default the region between 420 and 800 nm is used for normalization.
Arguments:
- low_lim, up_lim(float): Wavelength limits for the x-axis.
- low_lim_y, up_lim_y (float): Intensity limits for the y-axis.
- wavelength_ROI (list): [wvl1, wvl2] = Wavelength range for the normalization of the calibration factor (for display purposes). Default is [420, 800] nm.
366 def calibrate_wavelength_axis(self, intercept: float, slope: float): 367 """Calibrate the wavelength axis of the spectrum using the given coefficient of a linear fit. 368 369 Form of the calibration: true_wavelength = intercept + slope * measured_wavelength 370 The function modifies the wavelength axis in place. 371 372 Args: 373 intercept: float, 374 slope: float 375 """ 376 if self.wvl is None or self.spectrum is None: 377 raise ValueError("Spectrum not loaded.") 378 self.wvl = intercept + slope * self.wvl 379 self.check_wvl_ascending()
Calibrate the wavelength axis of the spectrum using the given coefficient of a linear fit.
Form of the calibration: true_wavelength = intercept + slope * measured_wavelength The function modifies the wavelength axis in place.
Arguments:
- intercept: float,
- slope: float
381 def tukey_filter( 382 self, edge1: float, edge2: float, edge1_width: float, edge2_width: float 383 ): 384 """Applies a Tukey window to the spectrum (e.g. to remove noise at the edges). 385 386 Tukey window is defined as: 387 * cosine-shaped window between edge1-edge1_width/2 and edge1+edge1_width/2 388 * 1 between edge1+edge1_width/2 and edge2-edge2_width/2 389 * cosine-shaped window between edge2-edge2_width/2 and edge2+edge2_width/2 390 * 0 everywhere else 391 In this case, tukey window is computed in the wavelength domain. 392 393 Args: 394 edge1, edge2 (float): Wavelength limits for the Tukey window (edge2-edge1 = FWHM, in nm) 395 edge1_width, edge2_width (float): Width of the cosine-shaped edge of the tukey window (in nm). 396 """ 397 398 window = asymmetric_tukey_window( 399 self.wvl, edge1, edge2, edge1_width, edge2_width 400 ) 401 self.spectrum = self.spectrum * window 402 if self.calibration_factor is not None: 403 self.calibration_factor = self.calibration_factor * window 404 if ( 405 self.calibration_lamp_wvl is not None 406 and self.calibration_lamp_spectrum is not None 407 ): 408 window = asymmetric_tukey_window( 409 self.calibration_lamp_wvl, edge1, edge2, edge1_width, edge2_width 410 ) 411 self.calibration_lamp_spectrum = self.calibration_lamp_spectrum * window
Applies a Tukey window to the spectrum (e.g. to remove noise at the edges).
Tukey window is defined as:
- cosine-shaped window between edge1-edge1_width/2 and edge1+edge1_width/2
- 1 between edge1+edge1_width/2 and edge2-edge2_width/2
- cosine-shaped window between edge2-edge2_width/2 and edge2+edge2_width/2
- 0 everywhere else
In this case, tukey window is computed in the wavelength domain.
Arguments:
- edge1, edge2 (float): Wavelength limits for the Tukey window (edge2-edge1 = FWHM, in nm)
- edge1_width, edge2_width (float): Width of the cosine-shaped edge of the tukey window (in nm).
413 def divide_by(self, spectrumObject, nm_smearing=0.0): 414 """normalizes the current spectrum with another spectrum, and stores the result in the present object. 415 416 Args: 417 spectrumObject (either 'c'/'calib'/'calibration'/'lamp' or SpectrumHandler object): 418 the current spectrum is divided at each wavelength by the loaded calibration lamp data or by the spectrum passed as SpectrumHandler object. 419 nm_smearing (float): sigma in nm for the gaussian smoothing 420 """ 421 if ( 422 spectrumObject == "c" 423 or spectrumObject == "calib" 424 or spectrumObject == "calibration" 425 or spectrumObject == "lamp" 426 or spectrumObject == "calibration data" 427 ): 428 if ( 429 self.calibration_lamp_wvl is None 430 or self.calibration_lamp_spectrum is None 431 ): 432 raise ValueError("Calibration lamp data not loaded.") 433 wvl_divisor = self.calibration_lamp_wvl 434 spectrum_divisor = self.calibration_lamp_spectrum 435 elif not isinstance(spectrumObject, SpectrumHandler): 436 raise TypeError("Input of divide_by must be a SpectrumHandler object.") 437 else: 438 wvl_divisor, spectrum_divisor = spectrumObject.get_spectrum() 439 440 if nm_smearing > 0: 441 spectrum_divisor = gaussian_filter1d( 442 spectrum_divisor, sigma=nm_smearing / (wvl_divisor[1] - wvl_divisor[0]) 443 ) 444 self.spectrum = gaussian_filter1d( 445 self.spectrum, sigma=nm_smearing / (self.wvl[1] - self.wvl[0]) 446 ) 447 commonWvl = self.wvl[ 448 (self.wvl > np.min(wvl_divisor)) & (self.wvl < np.max(wvl_divisor)) 449 ] 450 interpolated_spd = np.interp(commonWvl, wvl_divisor, spectrum_divisor) 451 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 452 self.wvl = commonWvl 453 self.spectrum = interpolated_original / interpolated_spd
normalizes the current spectrum with another spectrum, and stores the result in the present object.
Arguments:
- spectrumObject (either 'c'/'calib'/'calibration'/'lamp' or SpectrumHandler object): the current spectrum is divided at each wavelength by the loaded calibration lamp data or by the spectrum passed as SpectrumHandler object.
- nm_smearing (float): sigma in nm for the gaussian smoothing
455 def add(self, spectrumObject): 456 """adds two spectra and stores the result in the present object 457 458 Args: 459 spectrumObject (SpectrumHandler): spectrum to be added 460 """ 461 if not isinstance(spectrumObject, SpectrumHandler): 462 raise TypeError("Input of add must be a SpectrumHandler object.") 463 wvl_add, spectrum_add = spectrumObject.get_spectrum() 464 commonWvl = self.wvl[ 465 (self.wvl > np.min(wvl_add)) & (self.wvl < np.max(wvl_add)) 466 ] 467 interpolated_spd = np.interp(commonWvl, wvl_add, spectrum_add) 468 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 469 self.wvl = commonWvl 470 self.spectrum = interpolated_original + interpolated_spd
adds two spectra and stores the result in the present object
Arguments:
- spectrumObject (SpectrumHandler): spectrum to be added
472 def multiply(self, spectrumObject): 473 """multiplies the current spectrum by a second one at each wavelength. 474 475 Args: 476 spectrumObject (SpectrumHandler) 477 """ 478 if not isinstance(spectrumObject, SpectrumHandler): 479 raise TypeError("Input of multiply must be a SpectrumHandler object.") 480 wvl_mult, spectrum_mult = spectrumObject.get_spectrum() 481 commonWvl = self.wvl[ 482 (self.wvl > np.min(wvl_mult)) & (self.wvl < np.max(wvl_mult)) 483 ] 484 interpolated_spd = np.interp(commonWvl, wvl_mult, spectrum_mult) 485 interpolated_original = np.interp(commonWvl, self.wvl, self.spectrum) 486 self.wvl = commonWvl 487 self.spectrum = interpolated_original * interpolated_spd
multiplies the current spectrum by a second one at each wavelength.
Arguments:
- spectrumObject (SpectrumHandler)
489 def multiply_scalar(self, scalar): 490 """multiplies the current spectrum by a constant. 491 492 Args: 493 scalar (float) 494 """ 495 self.spectrum = self.spectrum * scalar
multiplies the current spectrum by a constant.
Arguments:
- scalar (float)
497 def add_scalar(self, scalar): 498 """adds a constant offset. 499 500 Args: 501 scalar (float) 502 """ 503 self.spectrum = self.spectrum + scalar
adds a constant offset.
Arguments:
- scalar (float)
505 def compute_calibration_factor_spectrometer( 506 self, 507 transmission_additional_optics=None, 508 smoothing="poly", 509 extend_calibration: bool = False, 510 wavelength_ROI: list = [420, 800], 511 ): 512 """Computes the calibration factor. 513 514 It assumes that in the SpectrumHandler object: 515 * the measured spectrum of the calibration lamp is loaded as main spectrum 516 * the true tabulated spectrum of the calibration lamp is loaded as self.calibration_lamp_spectrum [via load_calibration_lamp_data()] 517 518 Args: 519 transmission_additional_optics: list of SpectrumHandler objects = list of transmission spectra of additional optics to be taken into account when computing the calibration factor. 520 The transmission ['reflection' for mirrors] spectra of additional optics will be included (multiplied) in the calibration factor 521 Additional optics are assumed to be in the beam path of any standard measurement (e.g. integrating sphere) BUT NOT PRESENT during the calibration lamp measurement. 522 smoothing: Type of smoothing to be applied to the calibration factor. Options are 'poly' (default) None or an integer number. If 'poly', a polynomial fit is applied to the calibration factor. 523 If None, no smoothing is applied. If an integer number, a box filter of that size is applied to the calibration factor. 524 extend_calibration (bool): If True, the calibration factor is extended to the full wavelength range of the spectrometer (the first and last values of the calibration curve are used for the extension). 525 This is a quick and dirty solution to deal with the fact that the calibration lamp spectrum is not available for the full wavelength range of the spectrometer. 526 wavelength_ROI (list): [wvl1, wvl2] Wavelength range for the normalization of the calibration factor (for display purposes). 527 Default is [420, 800] nm.""" 528 if self.calibration_lamp_wvl is None or self.calibration_lamp_spectrum is None: 529 raise ValueError("Calibration lamp data not loaded.") 530 wvl_stored, spectrum_stored = self.wvl, self.spectrum 531 self.divide_by("calibration data") 532 533 eliminate_outliers(self.spectrum, threshold=3, window_points=20) 534 535 if transmission_additional_optics is not None: 536 for sao in transmission_additional_optics: 537 if not isinstance(sao, SpectrumHandler): 538 raise TypeError( 539 "Input of spectra_additional_optics must be a list of SpectrumHandler objects." 540 ) 541 wvl_sao, spectrum_sao = sao.get_spectrum() 542 if np.any(np.diff(wvl_sao)) < 0: 543 raise ValueError( 544 "Wavelength array of additional optics spectrum must be sorted in ascending order." 545 ) 546 wvl_sao = np.concatenate( 547 ( 548 np.array([0]), 549 wvl_sao, 550 np.array([max(np.max(self.wvl), np.max(wvl_sao))]), 551 ) 552 ) 553 spectrum_sao = np.concatenate( 554 ( 555 np.array([spectrum_sao[0]]), 556 spectrum_sao, 557 np.array([spectrum_sao[-1]]), 558 ) 559 ) 560 self.spectrum = self.spectrum * np.interp( 561 self.wvl, wvl_sao, spectrum_sao 562 ) 563 564 self.calibration_factor = self.spectrum 565 if smoothing is None: 566 pass 567 elif smoothing == "poly": 568 smoothed_cf = np.poly1d(np.polyfit(self.wvl, self.calibration_factor, 50)) 569 self.calibration_factor = smoothed_cf(self.wvl) 570 else: 571 if not isinstance(smoothing, int): 572 raise TypeError( 573 "Input of smoothing must be 'poly', None or an integer." 574 ) 575 if smoothing < 1: 576 raise ValueError("Input of smoothing must be a positive integer.") 577 self.calibration_factor = box_smooth(self.calibration_factor, smoothing) 578 579 for i in range(len(self.wvl)): 580 if self.calibration_factor[i] < 0.01: 581 self.calibration_factor[i] = 0.01 582 self.calibration_factor = 1 / self.calibration_factor 583 self.calibration_factor = self.calibration_factor / np.mean( 584 self.calibration_factor[ 585 (self.wvl > wavelength_ROI[0]) & (self.wvl < wavelength_ROI[1]) 586 ] 587 ) 588 589 if extend_calibration: 590 # Extend the calibration factor to the full wavelength range of the spectrometer (pick the first and last values of the calibration curve for the extension) 591 self.wvl = np.concatenate( 592 ( 593 np.linspace(0, self.wvl[0], 100), 594 self.wvl, 595 np.linspace(self.wvl[-1], self.wvl[-1], 100), 596 ) 597 ) 598 self.calibration_factor = np.concatenate( 599 ( 600 np.ones(100) * self.calibration_factor[0], 601 self.calibration_factor, 602 np.ones(100) * self.calibration_factor[-1], 603 ) 604 ) 605 self.calibration_factor = np.interp( 606 wvl_stored, self.wvl, self.calibration_factor 607 ) 608 self.wvl = wvl_stored 609 self.spectrum = spectrum_stored 610 else: 611 self.spectrum = spectrum_stored[ 612 (wvl_stored > np.min(self.calibration_lamp_wvl)) 613 & (wvl_stored < np.max(self.calibration_lamp_wvl)) 614 ]
Computes the calibration factor.
It assumes that in the SpectrumHandler object:
- the measured spectrum of the calibration lamp is loaded as main spectrum
- the true tabulated spectrum of the calibration lamp is loaded as self.calibration_lamp_spectrum [via load_calibration_lamp_data()]
Arguments:
- transmission_additional_optics: list of SpectrumHandler objects = list of transmission spectra of additional optics to be taken into account when computing the calibration factor. The transmission ['reflection' for mirrors] spectra of additional optics will be included (multiplied) in the calibration factor Additional optics are assumed to be in the beam path of any standard measurement (e.g. integrating sphere) BUT NOT PRESENT during the calibration lamp measurement.
- smoothing: Type of smoothing to be applied to the calibration factor. Options are 'poly' (default) None or an integer number. If 'poly', a polynomial fit is applied to the calibration factor. If None, no smoothing is applied. If an integer number, a box filter of that size is applied to the calibration factor.
- extend_calibration (bool): If True, the calibration factor is extended to the full wavelength range of the spectrometer (the first and last values of the calibration curve are used for the extension). This is a quick and dirty solution to deal with the fact that the calibration lamp spectrum is not available for the full wavelength range of the spectrometer.
- wavelength_ROI (list): [wvl1, wvl2] Wavelength range for the normalization of the calibration factor (for display purposes). Default is [420, 800] nm.
616 def plot_spectrum(self, low_lim=None, up_lim=None): 617 fig, ax = plt.subplots() 618 if low_lim is not None and up_lim is not None: 619 ax.plot( 620 self.wvl[(self.wvl > low_lim) & (self.wvl < up_lim)], 621 self.spectrum[(self.wvl > low_lim) & (self.wvl < up_lim)], 622 ) 623 else: 624 ax.plot(self.wvl, self.spectrum) 625 ax.set_xlabel("Wavelength (nm)") 626 ax.set_ylabel("Intensity (Arb. unit)") 627 return fig
630class MultiSpectrumHandler: 631 """Stores and plots multiple spectra""" 632 633 def __init__( 634 self, 635 filenameList: list = None, 636 wavelengthList: list = None, 637 spectrumList: list = None, 638 spectrumHandlerList: list = None, 639 remove_offsets_individually: bool = False, 640 nm_smearing=1.0, 641 eliminate_outliers_spectrum=False, 642 filetype="MayaScarab", 643 ): 644 """Constructor for the MultiSpectrumHandler class. 645 646 Args: 647 filenameList: list = List of filenames to be read. 648 wavelengthList: list = List of wavelength arrays. (alternative to filename) 649 spectrumList: list = List of spectrum arrays. (alternative to filename) 650 remove_offsets_individually: bool = If True, compute and remove offsets from each column individually 651 (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]). 652 nm_smearing: float = Smearing in nm to be applied to the spectrum. 653 eliminate_outliers_spectrum: bool = If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. 654 filetype: str = Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'.""" 655 self.spectrumHandlers = [] 656 if filenameList is not None: 657 for filename in filenameList: 658 self.spectrumHandlers.append( 659 SpectrumHandler( 660 filename=filename, 661 remove_offsets_individually=remove_offsets_individually, 662 nm_smearing=nm_smearing, 663 eliminate_outliers_spectrum=eliminate_outliers_spectrum, 664 filetype=filetype, 665 ) 666 ) 667 elif wavelengthList is not None and spectrumList is not None: 668 for i in range(len(wavelengthList)): 669 self.spectrumHandlers.append( 670 SpectrumHandler( 671 wavelengths=wavelengthList[i], 672 spectrum=spectrumList[i], 673 remove_offsets_individually=remove_offsets_individually, 674 nm_smearing=nm_smearing, 675 eliminate_outliers_spectrum=eliminate_outliers_spectrum, 676 filetype=filetype, 677 ) 678 ) 679 elif spectrumHandlerList is not None: 680 for spectrumHandler in spectrumHandlerList: 681 if not isinstance(spectrumHandler, SpectrumHandler): 682 raise TypeError( 683 "Input of spectrumHandlerList must be a list of SpectrumHandler objects." 684 ) 685 self.spectrumHandlers.append(spectrumHandler) 686 else: 687 raise ValueError( 688 "Either filenameList or wavelengthList and spectrumList or spectrumHandlerList must be provided. filetype, if provided, can only be 'MayaScarab' or 'OceanOptics'." 689 ) 690 691 def plot(self, low_lim=None, up_lim=None): 692 fig, ax = plt.subplots() 693 for i, spectrumHandler in enumerate(self.spectrumHandlers): 694 wvl, spectrum = spectrumHandler.get_spectrum() 695 if low_lim is not None and up_lim is not None: 696 ax.plot( 697 wvl[(wvl > low_lim) & (wvl < up_lim)], 698 spectrum[(wvl > low_lim) & (wvl < up_lim)], 699 label=f"Spectrum {i + 1}", 700 ) 701 else: 702 ax.plot(wvl, spectrum, label=f"Spectrum {i + 1}") 703 ax.set_xlabel("Wavelength (nm)") 704 ax.set_ylabel("Intensity (Arb. unit)") 705 ax.legend(loc="upper right") 706 return fig
Stores and plots multiple spectra
633 def __init__( 634 self, 635 filenameList: list = None, 636 wavelengthList: list = None, 637 spectrumList: list = None, 638 spectrumHandlerList: list = None, 639 remove_offsets_individually: bool = False, 640 nm_smearing=1.0, 641 eliminate_outliers_spectrum=False, 642 filetype="MayaScarab", 643 ): 644 """Constructor for the MultiSpectrumHandler class. 645 646 Args: 647 filenameList: list = List of filenames to be read. 648 wavelengthList: list = List of wavelength arrays. (alternative to filename) 649 spectrumList: list = List of spectrum arrays. (alternative to filename) 650 remove_offsets_individually: bool = If True, compute and remove offsets from each column individually 651 (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]). 652 nm_smearing: float = Smearing in nm to be applied to the spectrum. 653 eliminate_outliers_spectrum: bool = If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values. 654 filetype: str = Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'.""" 655 self.spectrumHandlers = [] 656 if filenameList is not None: 657 for filename in filenameList: 658 self.spectrumHandlers.append( 659 SpectrumHandler( 660 filename=filename, 661 remove_offsets_individually=remove_offsets_individually, 662 nm_smearing=nm_smearing, 663 eliminate_outliers_spectrum=eliminate_outliers_spectrum, 664 filetype=filetype, 665 ) 666 ) 667 elif wavelengthList is not None and spectrumList is not None: 668 for i in range(len(wavelengthList)): 669 self.spectrumHandlers.append( 670 SpectrumHandler( 671 wavelengths=wavelengthList[i], 672 spectrum=spectrumList[i], 673 remove_offsets_individually=remove_offsets_individually, 674 nm_smearing=nm_smearing, 675 eliminate_outliers_spectrum=eliminate_outliers_spectrum, 676 filetype=filetype, 677 ) 678 ) 679 elif spectrumHandlerList is not None: 680 for spectrumHandler in spectrumHandlerList: 681 if not isinstance(spectrumHandler, SpectrumHandler): 682 raise TypeError( 683 "Input of spectrumHandlerList must be a list of SpectrumHandler objects." 684 ) 685 self.spectrumHandlers.append(spectrumHandler) 686 else: 687 raise ValueError( 688 "Either filenameList or wavelengthList and spectrumList or spectrumHandlerList must be provided. filetype, if provided, can only be 'MayaScarab' or 'OceanOptics'." 689 )
Constructor for the MultiSpectrumHandler class.
Arguments:
- filenameList: list = List of filenames to be read.
- wavelengthList: list = List of wavelength arrays. (alternative to filename)
- spectrumList: list = List of spectrum arrays. (alternative to filename)
- remove_offsets_individually: bool = If True, compute and remove offsets from each column individually (filename is expected to have one column of wavelength data and arbitrarily many columns of equivalent spectra [to be averaged]).
- nm_smearing: float = Smearing in nm to be applied to the spectrum.
- eliminate_outliers_spectrum: bool = If True, eliminate single pixel outliers in the spectral data and replace them with average of surrounding values.
- filetype: str = Type of the file to be read. Options are 'MayaScarab' (default) or 'OceanOptics'.
691 def plot(self, low_lim=None, up_lim=None): 692 fig, ax = plt.subplots() 693 for i, spectrumHandler in enumerate(self.spectrumHandlers): 694 wvl, spectrum = spectrumHandler.get_spectrum() 695 if low_lim is not None and up_lim is not None: 696 ax.plot( 697 wvl[(wvl > low_lim) & (wvl < up_lim)], 698 spectrum[(wvl > low_lim) & (wvl < up_lim)], 699 label=f"Spectrum {i + 1}", 700 ) 701 else: 702 ax.plot(wvl, spectrum, label=f"Spectrum {i + 1}") 703 ax.set_xlabel("Wavelength (nm)") 704 ax.set_ylabel("Intensity (Arb. unit)") 705 ax.legend(loc="upper right") 706 return fig