diff --git a/docs/src/4_extra_options/including_dust_continuum.rst b/docs/src/4_extra_options/including_dust_continuum.rst new file mode 100644 index 00000000..b11c823f --- /dev/null +++ b/docs/src/4_extra_options/including_dust_continuum.rst @@ -0,0 +1,41 @@ +Including dust or continuum sources in Magritte +=============================================== + +Starting from Magritte version 0.9.11, you can include additional continuum sources in your model. +These are treated in LTE; therefore you need to provide a dust/continuum opacity table (per point, per frequency) and temperature (per point). +This can be done using: + +.. code-block:: python + + model.dust.dust_opacities.set(continuum_opacity_table)# in 1/m dims: [N_points, N_continuum_frequencies] + model.dust.dust_frequencies.set(frequency_table)# in Hz dims: [N_continuum_frequencies] + model.dust.dust_temperatures.set(temperature_table)# in K dims: [N_points] + +Evidently, we have also provides some tools to help calculate the dust opacities from tabulated data. +In particular, if you have a table from the Jena dust database (https://www2.astro.uni-jena.de/Laboratory/OCDB/), you can use the following commands: + +.. code-block:: python + + import magritte.tools as tools + continuum_reference_density = astropy.Quantity # in mass density [e.g. g/cm^3]; given by the continuum data; used for rescaling the values + frequency_grid, complex_refractive_index = tools.read_dust_opacity_table(continuum_file_location, ASTROPY_UNIT_OF_FREQ_WAVELENGTH_...) + # reads the file and returns the frequency grid (first column), multiplied whatever astropy unit you specified, and the complex refractive index (unitless, third column) as a numpy array + frequency_grid_Hz, absorption_coeff_per_density = tools.convert_dust_opacity_table_to_SI(frequency_grid, complex_refractive_index, continuum_ref_density) + # returns the frequency grid in Hz and absorption coeff in m^2/kg (needs to be multiplied with the mass density to get the absorption coeff per m) + #for this example, assume we scale the continuum/dust number density with the H2 number density + continuum_opacity_table = (absorption_coeff_per_density/1000) * fraction_density_dust * nH2 * 2.016 / 6.022e23 #H2: 2.016 g/mol, 6.022e23 particles/mol -> particles/m3 to g/m3 + +In which the last step converts the absorption coefficient per mass density (m^2/kg) to an absorption coefficient per number density (1/m). +In this release, we also included options to image the model using a custom frequency grid (given that equidistant grids might not be optimal for imaging the continuum). + +.. code-block:: python + + # Instead of model.compute_spectral_discretization(...) + model.set_custom_spectral_discretization(frequency_grid_Hz)# np.array, in Hz, dims: [N_frequencies_to_image] + model.compute_image_new(...) #as usual + + +.. Warning:: + + Even though you might have a model for which you only have continuum sources (no line sources), you still need to include a dummy species producing lines. + You can even set its abundance to zero, but Magritte needs at least one species to properly initialize a model. See tests/benchmarks/analytic/continuum_implementation.py for an example. \ No newline at end of file diff --git a/docs/src/4_extra_options/index.rst b/docs/src/4_extra_options/index.rst index d2aa3cb3..a127a690 100644 --- a/docs/src/4_extra_options/index.rst +++ b/docs/src/4_extra_options/index.rst @@ -11,3 +11,4 @@ we will explain a few interesting options for our users. other_solvers adaptive_ng_acceleration NLTE_in_low_density + including_dust_continuum diff --git a/magritte/tools.py b/magritte/tools.py index 87d593c0..35c2186b 100644 --- a/magritte/tools.py +++ b/magritte/tools.py @@ -3,6 +3,7 @@ from datetime import datetime from time import perf_counter +import pandas as pd from astropy.io import fits from astropy import units, constants from scipy.interpolate import griddata, interp1d @@ -775,3 +776,97 @@ def check_one_line_approximation(model): contribution = np.exp(-diff_max**2) return contribution + + +def convert_dust_opacity_table_to_SI(wavelengths_wavenumbers_wavefrequencies: units.Quantity, complex_refractory_index: np.ndarray, reference_density: units.Quantity, new_frequency_grid: units.Quantity=None, left_fill_value: float=0.0, right_fill_value: float=0.0): + """Convert a dust opacity table (wavelengths, wavenumbers or frequencies) with complex refractive index to absorption coefficient in SI units (m^-1) and rescaled to unit density (kg/m^3). + + Args: + wavelengths_wavenumbers_wavefrequencies (Astropy.units.Quantity): 1D array containing the wavelengths, wavenumbers or frequencies in compatible units (m, m^-1 or Hz). + complex_refractory_index (np.ndarray): 1D array containing the complex refractive index values (unitless). Same length as `wavelengths_wavenumbers_wavefrequencies`. + reference_density (astropy.Quantity): Refernce density in units compatible with kg/m^3, used for rescaling the absorption coefficient. + new_frequency_grid (Astropy.units.Quantity, optional): New frequency grid to interpolate values upon. If None, use `wavelengths_wavenumbers_wavefrequencies' instead. Defaults to None. + left_fill_value (float, optional): Fill value for dust opacity (in m^2/kg) for frequencies lower than the input frequency range. Defaults to 0.0. + right_fill_value (float, optional): Fill value for dust opacity (in m^2/kg) for frequencies higher than the input frequency range. Defaults to 0.0. + + Raises: + ValueError: Incompatible units for `wavelengths_wavenumbers_wavefrequencies` or `reference_density` or `new_frequency_grid'. + TypeError: `wavelengths_wavenumbers_wavefrequencies` or `reference_density` or `new_frequency_grid' is not an Astropy Quantity. + + Returns: + tuple(Astropy.units.Quantity, Astropy.units.Quantity): frequencies in Hz, rescaled absorption coefficient in m^2/kg (needs to be multiplied by density in kg/m^3 to get absorption coefficient in m^-1). + """ + + frequencies_in_hz = None + wavelengths_in_m = None + reference_density_in_kg_per_m3 = None + #first check whether the wavelengths_or_wavenumbers are have astropy units compatible with m (wavelength), m^-1 (wavenumber) of Hz (frequency) + #if so, convert to Hz + if isinstance(wavelengths_wavenumbers_wavefrequencies, units.Quantity): + if wavelengths_wavenumbers_wavefrequencies.unit.is_equivalent(units.m) or wavelengths_wavenumbers_wavefrequencies.unit.is_equivalent(1.0/units.m) or wavelengths_wavenumbers_wavefrequencies.unit.is_equivalent(units.Hz): + # Convert to Hz + frequencies_in_hz = wavelengths_wavenumbers_wavefrequencies.to(units.Hz, equivalencies=units.spectral()) + wavelengths_in_m = wavelengths_wavenumbers_wavefrequencies.to(units.m, equivalencies=units.spectral()) + pass + else: + raise ValueError("Input must be in units compatible to m or Hz.") + else: + raise TypeError("Input must be an astropy Quantity. This is for ease of unit conversion.") + + if isinstance(reference_density, units.Quantity): + if not reference_density.unit.is_equivalent(units.kg/units.m**3): + raise ValueError("Reference density must be in units compatible to kg/m^3.") + # Convert to kg/m^3 + reference_density_in_kg_per_m3 = reference_density.to(units.kg/units.m**3) + else: + raise TypeError("Reference density must be an astropy Quantity.") + + + # Convert complex refractory index to absorption coefficient + absorption_coefficient = 4*np.pi * complex_refractory_index / wavelengths_in_m #[m^-1] + #rescale to unit density (kg/m^3) + rescaled_absorption_coefficient = absorption_coefficient / reference_density_in_kg_per_m3 #[m^2/kg] + + #co-sort frequencies and rescaled absorption coefficient (because of possible unit conversion) + sorted_indices = np.argsort(frequencies_in_hz) + frequencies_in_hz = frequencies_in_hz[sorted_indices] + rescaled_absorption_coefficient = rescaled_absorption_coefficient[sorted_indices] + + if new_frequency_grid == None: + return frequencies_in_hz, rescaled_absorption_coefficient + else: + new_frequency_grid_in_hz = None + # Convert new frequency grid to Hz if it is not already + if isinstance(new_frequency_grid, units.Quantity): + if new_frequency_grid.unit.is_equivalent(units.m) or new_frequency_grid.unit.is_equivalent(1.0/units.m) or new_frequency_grid.unit.is_equivalent(units.Hz): + # Convert to Hz + new_frequency_grid_in_hz = new_frequency_grid.to(units.Hz, equivalencies=units.spectral()) + else: + raise ValueError("New frequency grid must be in units compatible to m, m^-1 or Hz.") + else: + raise TypeError("New frequency grid must be an astropy Quantity in Hz.") + # Interpolate the data onto the new frequency grid + # interpolation = interp1d(frequencies_in_hz, rescaled_absorption_coefficient, bounds_error=False, fill_value=0.0) + interpolated_rescaled_absorption_coefficient = np.interp(new_frequency_grid_in_hz, frequencies_in_hz, rescaled_absorption_coefficient, left=left_fill_value, right=right_fill_value) + return new_frequency_grid, interpolated_rescaled_absorption_coefficient + + +def read_dust_opacity_table(filename: str, wavelength_frequency_unit: units.Unit): + """Simple script for reading headerless dust opacity data files (with file format like the Jena dust database). + Returns the first column (wavelengths, wavenumbers or frequencies) and the third column (complex refractive index). + + Args: + filename (str): Location of the dust opacity table file. + wavelength_frequency_unit (Astropy.units.Unit): Unit of the first column (wavelengths, wavenumbers or frequencies). + + Returns: + tuple(Astropy.units.Quantity, np.ndarray): wavelengths or wavenumbers or frequencies, complex refractive index + """ + reader = pd.read_csv(filename, sep = '\s+', header=None, comment='#') + wavelength_frequency = np.array(reader[0]) * wavelength_frequency_unit # wavelengths or wavenumbers or frequencies + complex_refractive_index = np.array(reader[2]) # complex refractive index; unitless + + + return wavelength_frequency, complex_refractive_index + + diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index bab89e5e..1f444bcd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -26,6 +26,7 @@ set (SOURCE_FILES model/radiation/radiation.cpp model/radiation/frequencies/frequencies.cpp model/image/image.cpp + model/dust/dust.cpp solver/solver.cpp ) diff --git a/src/bindings/CMakeLists.txt b/src/bindings/CMakeLists.txt index 99e49646..10662952 100644 --- a/src/bindings/CMakeLists.txt +++ b/src/bindings/CMakeLists.txt @@ -28,6 +28,7 @@ set (SOURCE_FILES ../model/radiation/radiation.cpp ../model/radiation/frequencies/frequencies.cpp ../model/image/image.cpp + ../model/dust/dust.cpp ../solver/solver.cpp ) diff --git a/src/bindings/pybindings.cpp b/src/bindings/pybindings.cpp index b32b8fbc..f711091b 100644 --- a/src/bindings/pybindings.cpp +++ b/src/bindings/pybindings.cpp @@ -129,6 +129,7 @@ PYBIND11_MODULE(core, module) { .def_readonly("thermodynamics", &Model::thermodynamics) .def_readonly("radiation", &Model::radiation) .def_readonly("images", &Model::images) + .def_readonly("dust", &Model::dust) .def_readwrite("eta", &Model::eta) .def_readwrite("chi", &Model::chi) .def_readonly("S_ray", &Model::S_ray) @@ -179,6 +180,12 @@ PYBIND11_MODULE(core, module) { "images with the given min and max frequency. Can also specify the " "amount of frequency bins to use (instead of defaulting to " "parameters.nfreqs).") + .def("set_custom_spectral_discretization", + (int(Model::*)( + const py::array_t frequency_grid)) + & Model::set_custom_spectral_discretization, + "Set a custom spectral discretization for the model, using the " + "given frequency grid.") .def("compute_LTE_level_populations", &Model::compute_LTE_level_populations, "Compute the level populations for the model assuming local " "thermodynamic equilibrium (LTE).") @@ -680,6 +687,21 @@ PYBIND11_MODULE(core, module) { .def("read", &Frequencies::read, "Read object from file.") .def("write", &Frequencies::write, "Write object to file."); + // Dust + py::class_(module, "Dust", "Class containing the dust/continuum properties.") + // attributes + .def_readonly("n_dust_frequencies", &Dust::n_dust_frequencies, + "Number of dust/continuum frequency bins.") + .def_readwrite("dust_frequencies", &Dust::dust_frequencies, + "Array with the dust/continuum frequency bins.") + .def_readwrite( + "dust_opacities", &Dust::dust_opacities, "Dust/continuum opacity per frequency bin.") + .def_readwrite("dust_temperature", &Dust::dust_temperature, + "Dust/continuum temperature per model point.") + // functions + .def("read", &Dust::read, "Read object from file.") + .def("write", &Dust::write, "Write object to file."); + // Vector py::class_>(module, "VSize", py::buffer_protocol()) // buffer diff --git a/src/model/dust/dust.cpp b/src/model/dust/dust.cpp new file mode 100644 index 00000000..7f40f544 --- /dev/null +++ b/src/model/dust/dust.cpp @@ -0,0 +1,43 @@ +#include "dust.hpp" + +/// Reader for dust data +/// @param[in] io: io data object +void Dust::read(const Io& io) { + cout << "Reading dust..." << endl; + + // by default initialized to zero + n_dust_frequencies = 0; + // Note: dust is optional to include in the model, so first check if data is present + int err = io.read_number("dust/.n_dust_frequencies", n_dust_frequencies); + if (err == 0 && n_dust_frequencies > 0) { + dust_temperature.resize(parameters->npoints()); + dust_frequencies.resize(n_dust_frequencies); + dust_opacities.resize(parameters->npoints(), n_dust_frequencies); + + io.read_list("dust/dust_temperature", dust_temperature); + io.read_list("dust/dust_frequencies", dust_frequencies); + io.read_list("dust/dust_opacities", dust_opacities); + + cout << " Read dust/continuum opacities at " << n_dust_frequencies << " frequencies." + << endl; + } else { + // in this case, n_dust_frequencies remains zero, so when calculating dust properties, this + // must be checked to skip that step in case of no dust present in the model. + cout << "No dust/continuum data found." << endl; + } +} + +/// Writer for dust data +/// @param[in] io: io data object +void Dust::write(const Io& io) const { + cout << "Writing dust..." << endl; + + // Note: data will be written either way, even if it all zeros + io.write_list("dust/dust_temperature", dust_temperature); + io.write_list("dust/dust_frequencies", dust_frequencies); + io.write_list("dust/dust_opacities", dust_opacities); + + Size temp_n_dust_frequencies = dust_frequencies.size(); + + io.write_number("dust/.n_dust_frequencies", temp_n_dust_frequencies); +} diff --git a/src/model/dust/dust.hpp b/src/model/dust/dust.hpp new file mode 100644 index 00000000..7e6f4afe --- /dev/null +++ b/src/model/dust/dust.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "io/io.hpp" +#include "model/parameters/parameters.hpp" +#include "tools/types.hpp" + +/// Data structure for dust or continuum radiation in general; Note: these opacity sources will be +/// treated in LTE, using a single temperature for all at once +struct Dust { + Size n_dust_frequencies; ///< number of frequencies at which dust opacities are defined + std::shared_ptr parameters; ///< data structure containing model parameters + Vector dust_temperature; ///< dust temperature at each point + Vector dust_frequencies; ///< frequency grid at which dust opacities are defined + Matrix dust_opacities; ///< dust opacities (point, dust frequency index) + // Matrix dust_emissivities; ///< = dust opacities * B_nu(T_dust) + // Note: we cannot precalculate dust emissivities, as it depends on the exact frequency + + Dust(std::shared_ptr params) : parameters(params){}; + + void read(const Io& io); + void write(const Io& io) const; + + void compute_dust_opacity_emissivity( + Size p, Real freq, Real& dust_opacity, Real& dust_emissivity) const; + + inline Real planck(Real temp, Real freq) const; +}; + +#include "dust.tpp" \ No newline at end of file diff --git a/src/model/dust/dust.tpp b/src/model/dust/dust.tpp new file mode 100644 index 00000000..2880cc92 --- /dev/null +++ b/src/model/dust/dust.tpp @@ -0,0 +1,59 @@ +#include "tools/constants.hpp" +#include "tools/types.hpp" + +/// Planck function: copied from solver.cpp +/// @param[in] temp : temperature of the corresponding +/// black body +/// @param[in] freq : frequency at which to evaluate the +/// function +/// @return Planck function evaluated at this frequency +/////////////////////////////////////////////////////////////////////////// +inline Real Dust::planck(Real temp, Real freq) const { + return TWO_HH_OVER_CC_SQUARED * (freq * freq * freq) + / expm1(HH_OVER_KB * freq / temp); // best not to use float version here +} + +/// Computes the dust opacity by interpolating the precalculated dust opacities +/// @param[in] p : index of the cell +/// @param[in] freq : Comoving frame frequency at which to evaluate the dust opacity +/// @param[out] dust_opacity : dust opacity at this point, at this frequency +/// @param[out] dust_emissivity : dust emissivity at this point, at this frequency +inline void Dust::compute_dust_opacity_emissivity( + Size p, Real freq, Real& dust_opacity, Real& dust_emissivity) const { + // Return 0 if no dust present + if (this->n_dust_frequencies == 0) { + dust_opacity = 0.0; + dust_emissivity = 0.0; + return; + } + // If frequency is outside the range of the dust opacities, return the boundary value + if (freq < dust_frequencies[0]) { + dust_opacity = dust_opacities(p, 0); + dust_emissivity = dust_opacity * planck(dust_temperature[p], freq); + return; + } + if (freq > dust_frequencies[dust_frequencies.size() - 1]) { + dust_opacity = dust_opacities(p, dust_frequencies.size() - 1); + dust_emissivity = dust_opacity * planck(dust_temperature[p], freq); + return; + } + + // find the right bin using binary search + Size f_low = 0; + Size f_high = dust_frequencies.size() - 1; + while (f_high - f_low > 1) { + Size f_mid = (f_high + f_low) / 2; + if (dust_frequencies[f_mid] > freq) { + f_high = f_mid; + } else { + f_low = f_mid; + } + } + + // and do linear interpolation + Real slope = (dust_opacities(p, f_high) - dust_opacities(p, f_low)) + / (dust_frequencies[f_high] - dust_frequencies[f_low]); + dust_opacity = dust_opacities(p, f_low) + slope * (freq - dust_frequencies[f_low]); + dust_emissivity = dust_opacity * planck(dust_temperature[p], freq); + return; +} \ No newline at end of file diff --git a/src/model/image/image.cpp b/src/model/image/image.cpp index cc026a69..69ab465e 100644 --- a/src/model/image/image.cpp +++ b/src/model/image/image.cpp @@ -13,10 +13,10 @@ Image ::Image( const Geometry& geometry, const Frequencies& frequencies, const ImageType it, const Size rr) : imageType(it), imagePointPosition(AllModelPoints), ray_nr(rr), ray_direction(Vector3D(geometry.rays.get_direction(0, ray_nr))) { - if (geometry.parameters->dimension() == 1) { + if (geometry.parameters->dimension() == 1 && geometry.parameters->spherical_symmetry()) { const Vector3D raydir = geometry.rays.get_direction(0, ray_nr); if ((raydir.x() != 0.0) || (raydir.y() != 1.0) || (raydir.z() != 0.0)) { - throw std::runtime_error("In 1D, the image ray has to be (0,1,0)"); + throw std::runtime_error("In 1D spherical symmetry, the image ray has to be (0,1,0)"); } } @@ -30,10 +30,10 @@ Image ::Image(const Geometry& geometry, const Frequencies& frequencies, const Im const Size rr, const Size Nxpix, const Size Nypix) : imageType(it), imagePointPosition(ProjectionSurface), ray_nr(rr), ray_direction(Vector3D(geometry.rays.get_direction(0, ray_nr))) { - if (geometry.parameters->dimension() == 1) { + if (geometry.parameters->dimension() == 1 && geometry.parameters->spherical_symmetry()) { const Vector3D raydir = geometry.rays.get_direction(0, ray_nr); if ((raydir.x() != 0.0) || (raydir.y() != 1.0) || (raydir.z() != 0.0)) { - throw std::runtime_error("In 1D, the image ray has to be (0,1,0)"); + throw std::runtime_error("In 1D spherical symmetry, the image ray has to be (0,1,0)"); } } @@ -74,11 +74,11 @@ Image ::Image(const Geometry& geometry, const Frequencies& frequencies, const Im const Vector3D raydir) : imageType(it), imagePointPosition(AllModelPoints), ray_nr(-1), ray_direction(raydir), closest_bdy_point(geometry.parameters->npoints()) { - if (geometry.parameters->dimension() == 1) { + if (geometry.parameters->dimension() == 1 && geometry.parameters->spherical_symmetry()) { // Same error condition as previous imager. In 1D, it does not matter either // way from which direction we image. if ((raydir.x() != 0.0) || (raydir.y() != 1.0) || (raydir.z() != 0.0)) { - throw std::runtime_error("In 1D, the image ray has to be (0,1,0)"); + throw std::runtime_error("In 1D spherical symmetry, the image ray has to be (0,1,0)"); } } set_freqs(frequencies); @@ -90,11 +90,11 @@ Image ::Image(const Geometry& geometry, const Frequencies& frequencies, const Im Image ::Image(const Geometry& geometry, const Frequencies& frequencies, const ImageType it, const Vector3D raydir, const Size Nxpix, const Size Nypix) : imageType(it), imagePointPosition(ProjectionSurface), ray_nr(-1), ray_direction(raydir) { - if (geometry.parameters->dimension() == 1) { + if (geometry.parameters->dimension() == 1 && geometry.parameters->spherical_symmetry()) { // Same error condition as previous imager. In 1D, it does not matter either // way from which direction we image. if ((raydir.x() != 0.0) || (raydir.y() != 1.0) || (raydir.z() != 0.0)) { - throw std::runtime_error("In 1D, the image ray has to be (0,1,0)"); + throw std::runtime_error("In 1D spherical symmetry, the image ray has to be (0,1,0)"); } } set_freqs(frequencies); diff --git a/src/model/model.cpp b/src/model/model.cpp index ac79c181..7bd1f2e2 100644 --- a/src/model/model.cpp +++ b/src/model/model.cpp @@ -1,8 +1,11 @@ #include "model.hpp" #include "paracabs.hpp" +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" #include "solver/solver.hpp" #include "tools/heapsort.hpp" +namespace py = pybind11; void Model ::read(const Io& io) { // Before reading a model, first check whether a file exists at the given @@ -28,6 +31,7 @@ void Model ::read(const Io& io) { thermodynamics.read(io); lines.read(io); radiation.read(io); + dust.read(io); cout << " " << endl; cout << "-------------------------------------------" << endl; @@ -47,6 +51,7 @@ void Model ::read(const Io& io) { void Model ::write(const Io& io) const { // Let only root (rank 0) process write output + // Huh; why would we even attempt to write the model while in a parallel context? if (pc::message_passing::comm_rank() == 0) { parameters->write(io); geometry.write(io); @@ -54,6 +59,7 @@ void Model ::write(const Io& io) const { thermodynamics.write(io); lines.write(io); radiation.write(io); + dust.write(io); } } @@ -162,6 +168,7 @@ int Model ::compute_spectral_discretisation() { /// Computer for spectral (=frequency) discretisation /// Gives same frequency bins to each point +/// Cannot be used for the NLTE procedure /// @param[in] width : corresponding line width for frequency bins ///////////////////////////////////////////////////////////////////// int Model ::compute_spectral_discretisation(const Real width) { @@ -250,6 +257,7 @@ int Model ::compute_spectral_discretisation(const Real nu_min, const Real nu_max /// Computer for spectral (=frequency) discretisation /// Gives same frequency bins to each point +/// Cannot be used for the NLTE procedure /// @param[in] min : minimal frequency /// @param[in] max : maximal frequency /// @param[in] n_image_freqs : number of frequencies in the discretization @@ -297,6 +305,44 @@ int Model ::compute_spectral_discretisation(const Real nu_min, const Real nu_max return (0); } +/// Sets the spectral discretization to a custom frequency grid +/// Cannot be used for the NLTE procedure +/// @param frequency_grid: vector of frequencies to use as the spectral discretization +////////////////////////////////////////////////////////// +int Model ::set_custom_spectral_discretization( + const py::array_t frequency_grid_py) { + auto frequency_grid_buf = frequency_grid_py.request(); + Real* frequency_grid = static_cast(frequency_grid_buf.ptr); + + std::cout << "Setting custom spectral discretization..." << std::endl; + + if (frequency_grid_buf.size < 1) { + throw std::runtime_error( + "At least a single frequency is needed to set a spectral discretization."); + } + + radiation.frequencies.resize_data(frequency_grid_buf.size); + + threaded_for(p, parameters->npoints(), { + for (Size f = 0; f < frequency_grid_buf.size; f++) { + radiation.frequencies.nu(p, f) = frequency_grid[f]; + + radiation.frequencies.appears_in_line_integral[f] = false; + radiation.frequencies.corresponding_l_for_spec[f] = parameters->nfreqs(); + radiation.frequencies.corresponding_k_for_tran[f] = parameters->nfreqs(); + radiation.frequencies.corresponding_z_for_line[f] = parameters->nfreqs(); + } + }) + + // Set spectral discretisation setting + spectralDiscretisation = SD_Image; + + // TODO: for all frequencies, set the correct corresponding line. In this way, + // the OneLine approximation will be compatible with the imagers + + return (0); +} + /// Computer for the level populations, assuming LTE //////////////////////////////////////////////////// int Model ::compute_LTE_level_populations() { diff --git a/src/model/model.hpp b/src/model/model.hpp index fc34852b..02864e0c 100644 --- a/src/model/model.hpp +++ b/src/model/model.hpp @@ -1,16 +1,20 @@ #pragma once #include "chemistry/chemistry.hpp" +#include "dust/dust.hpp" #include "geometry/geometry.hpp" #include "image/image.hpp" #include "io/io.hpp" #include "io/python/io_python.hpp" #include "lines/lines.hpp" #include "parameters/parameters.hpp" +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" #include "radiation/radiation.hpp" #include "thermodynamics/thermodynamics.hpp" #include "tools/timer.hpp" #include "tools/types.hpp" +namespace py = pybind11; #include @@ -25,16 +29,17 @@ struct Model { Lines lines; Radiation radiation; vector images; + Dust dust; enum SpectralDiscretisation { SD_None, SD_Lines, SD_Image } spectralDiscretisation = SD_None; Model() : parameters(new Parameters()), geometry(parameters), chemistry(parameters), - thermodynamics(parameters), lines(parameters), radiation(parameters){}; + thermodynamics(parameters), lines(parameters), radiation(parameters), dust(parameters){}; Model(const string name) : parameters(new Parameters()), geometry(parameters), chemistry(parameters), - thermodynamics(parameters), lines(parameters), radiation(parameters) { + thermodynamics(parameters), lines(parameters), radiation(parameters), dust(parameters) { parameters->set_model_name(name); read(); } @@ -54,6 +59,8 @@ struct Model { int compute_spectral_discretisation(const Real nu_min, const Real nu_max); int compute_spectral_discretisation( const Real nu_min, const Real nu_max, const Size n_image_freqs); + int set_custom_spectral_discretization( + const py::array_t frequency_grid); int compute_LTE_level_populations(); int compute_radiation_field(); int compute_radiation_field_feautrier_order_2(); diff --git a/src/solver/solver.tpp b/src/solver/solver.tpp index d52d72f3..b59da536 100644 --- a/src/solver/solver.tpp +++ b/src/solver/solver.tpp @@ -1073,8 +1073,13 @@ accel inline void Solver ::set_data_for_line(const Model& model, const Size l, c const double dshift = shift_nxt - shift_crt; const double dshift_abs = fabs(dshift); + // for the dust, estimate the comoving frame frequency at which to evaluate the opacity + const double freq_line_curr = model.lines.line[l] * shift_crt; + const double freq_line_next = model.lines.line[l] * shift_nxt; + // get number of interpolation points - Size n_interp_points = interp_helper.get_n_interp_for_line(model, l, crt, nxt); + Size n_interp_points = + interp_helper.get_n_interp_around_freqs(model, crt, nxt, freq_line_curr, freq_line_next); if (n_interp_points > 1) { const double dZ_interpl = dZ_loc / n_interp_points; @@ -1158,9 +1163,10 @@ template <> accel inline void Solver ::get_eta_and_chi(const Model& model, const Size p, const Size ll, // dummy variable const Real freq, Real& eta, Real& chi) const { - // Initialize - eta = 0.0; - chi = model.parameters->min_opacity; + + // Dust/continuum contribution + model.dust.compute_dust_opacity_emissivity(p, freq, chi, eta); + chi += model.parameters->min_opacity; // Set line emissivity and opacity for (Size l = 0; l < model.parameters->nlines(); l++) { @@ -1186,9 +1192,20 @@ accel inline void Solver ::get_eta_and_chi_interpolated(const Model& model const Size p2, const Real interp_factor, const Size ll, // dummy variable const Real freq, Real& eta, Real& chi) const { - // Initialize - eta = 0.0; - chi = model.parameters->min_opacity; + + // Dust/continuum contribution + // if no dust, set emissivity/opacity to zero + if (model.dust.n_dust_frequencies == 0) { + eta = 0.0; + chi = model.parameters->min_opacity; + } else { // otherwise interpolate + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(p1, freq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(p2, freq, chi_dust2, eta_dust2); + eta = interp_helper.interpolate_log(eta_dust1, eta_dust2, interp_factor); + chi = interp_helper.interpolate_log(chi_dust1, chi_dust2, interp_factor) + + model.parameters->min_opacity; + } // Set line emissivity and opacity for (Size l = 0; l < model.parameters->nlines(); l++) { @@ -1220,10 +1237,12 @@ template <> accel inline void Solver ::get_eta_and_chi(const Model& model, const Size p, const Size ll, // dummy variable const Real freq, Real& eta, Real& chi) const { - // Initialize - eta = 0.0; - chi = model.parameters->min_opacity; + // Dust/continuum contribution + model.dust.compute_dust_opacity_emissivity(p, freq, chi, eta); + chi += model.parameters->min_opacity; + + // Line contribution const Real upper_bound_line_width = model.parameters->max_distance_opacity_contribution * model.thermodynamics.profile_width_upper_bound_with_linefreq( @@ -1267,10 +1286,22 @@ accel inline void Solver ::get_eta_and_chi_interpolated(const Model& const Size p1, const Size p2, const Real interp_factor, const Size ll, // dummy variable const Real freq, Real& eta, Real& chi) const { - // Initialize - eta = 0.0; - chi = model.parameters->min_opacity; + // Dust/continuum contribution + // if no dust, set emissivity/opacity to zero + if (model.dust.n_dust_frequencies == 0) { + eta = 0.0; + chi = model.parameters->min_opacity; + } else { // otherwise interpolate + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(p1, freq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(p2, freq, chi_dust2, eta_dust2); + eta = interp_helper.interpolate_log(eta_dust1, eta_dust2, interp_factor); + chi = interp_helper.interpolate_log(chi_dust1, chi_dust2, interp_factor) + + model.parameters->min_opacity; + } + + // Line contribution const Real upper_bound_line_width = model.parameters->max_distance_opacity_contribution * model.thermodynamics.profile_width_upper_bound_with_linefreq(p1, freq, @@ -1323,8 +1354,12 @@ accel inline void Solver ::get_eta_and_chi( const Real diff = freq - model.lines.line[l]; const Real prof = gaussian(model.lines.inverse_width(p, l), diff); - eta = prof * model.lines.emissivity(p, l); - chi = prof * model.lines.opacity(p, l) + model.parameters->min_opacity; + // Dust/continuum contribution + model.dust.compute_dust_opacity_emissivity(p, freq, chi, eta); + + // Line contribution + eta += prof * model.lines.emissivity(p, l); + chi += prof * model.lines.opacity(p, l) + model.parameters->min_opacity; } /// Getter for the emissivity (eta) and the opacity (chi), with interpolation support @@ -1342,6 +1377,21 @@ accel inline void Solver ::get_eta_and_chi_interpolated(const Model& mo const Size p2, const Real interp_factor, const Size l, const Real freq, Real& eta, Real& chi) const { + // Dust/continuum contribution + // if no dust, set emissivity/opacity to zero + if (model.dust.n_dust_frequencies == 0) { + eta = 0.0; + chi = model.parameters->min_opacity; + } else { // otherwise interpolate + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(p1, freq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(p2, freq, chi_dust2, eta_dust2); + eta = interp_helper.interpolate_log(eta_dust1, eta_dust2, interp_factor); + chi = interp_helper.interpolate_log(chi_dust1, chi_dust2, interp_factor) + + model.parameters->min_opacity; + } + + // Line contribution const Real diff = freq - model.lines.line[l]; const Real interp_inv_width = interp_helper.interpolate_linear( model.lines.inverse_width(p1, l), model.lines.inverse_width(p2, l), interp_factor); @@ -1352,8 +1402,8 @@ accel inline void Solver ::get_eta_and_chi_interpolated(const Model& mo const Real interp_opacity = interp_helper.interpolate_log( model.lines.opacity(p1, l), model.lines.opacity(p2, l), interp_factor); - eta = prof * interp_emissivity; - chi = prof * interp_opacity + model.parameters->min_opacity; + eta += prof * interp_emissivity; + chi += prof * interp_opacity; } /// Apply trapezium rule to x_crt and x_nxt @@ -1886,9 +1936,47 @@ inline void Solver ::compute_S_dtau_line_integrated(Model& model, Size Size nextpoint, Size currpoint_interp_idx, Size nextpoint_interp_idx, Size lineidx, Real currfreq, Real nextfreq, Real curr_interp, Real next_interp, Real dZ, Real& dtau, Real& Scurr, Real& Snext) { - // FIXME: line idx is wrong + Real sum_dtau = 0.0; + Real sum_dtau_times_Scurr = 0.0; + Real sum_dtau_times_Snext = 0.0; + + // Dust/continuum contribution + // if no dust present, do not even bother computing this contribution + if (model.dust.n_dust_frequencies != 0) { + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(currpoint, currfreq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(nextpoint, nextfreq, chi_dust2, eta_dust2); + Real chi_dust_interp1, chi_dust_interp2, eta_dust_interp1, eta_dust_interp2; + model.dust.compute_dust_opacity_emissivity( + currpoint_interp_idx, currfreq, chi_dust_interp1, eta_dust_interp1); + model.dust.compute_dust_opacity_emissivity( + nextpoint_interp_idx, nextfreq, chi_dust_interp2, eta_dust_interp2); + + Real S_dust1 = eta_dust1 / chi_dust1; + Real S_dust2 = eta_dust2 / chi_dust2; + Real S_dust_interp1 = eta_dust_interp1 / chi_dust_interp1; + Real S_dust_interp2 = eta_dust_interp2 / chi_dust_interp2; + + Real curr_dust_opacity = + interp_helper.interpolate_log(chi_dust1, chi_dust_interp1, curr_interp); + Real next_dust_opacity = + interp_helper.interpolate_log(chi_dust2, chi_dust_interp2, next_interp); + Real dust_Scurr = interp_helper.interpolate_log(S_dust1, S_dust_interp1, curr_interp); + Real dust_Snext = interp_helper.interpolate_log(S_dust2, S_dust_interp2, next_interp); + Real dust_dtau = 0.5 * (curr_dust_opacity + next_dust_opacity) * dZ; + + sum_dtau += dust_dtau; + sum_dtau_times_Scurr += dust_dtau * dust_Scurr; + sum_dtau_times_Snext += dust_dtau * dust_Snext; + } + + // Line contribution + + // FIXME: line idx can/will be wrong, if using any other spectral discretization than the + // standard NLTE one, given that lineidx will NOT be set properly dtau = compute_dtau_single_line(model, currpoint, nextpoint, currpoint_interp_idx, nextpoint_interp_idx, lineidx, currfreq, nextfreq, curr_interp, next_interp, dZ); + // const Real curr_opacity = interp_helper.interpolate_log(model.lines.opacity(currpoint, // lineidx), // model.lines.opacity(currpoint_interp_idx, lineidx), curr_interp); @@ -1905,16 +1993,24 @@ inline void Solver ::compute_S_dtau_line_integrated(Model& model, Size // Snext = next_emissivity / next_opacity; Real Scurr_p = - model.lines.emissivity(currpoint, lineidx) / model.lines.opacity(currpoint, lineidx); + (model.lines.emissivity(currpoint, lineidx)) / (model.lines.opacity(currpoint, lineidx)); Real Snext_p = - model.lines.emissivity(nextpoint, lineidx) / model.lines.opacity(nextpoint, lineidx); - Real Scurr_interp_p = model.lines.emissivity(currpoint_interp_idx, lineidx) - / model.lines.opacity(currpoint_interp_idx, lineidx); - Real Snext_interp_p = model.lines.emissivity(nextpoint_interp_idx, lineidx) - / model.lines.opacity(nextpoint_interp_idx, lineidx); - - Scurr = interp_helper.interpolate_log(Scurr_p, Scurr_interp_p, curr_interp); - Snext = interp_helper.interpolate_log(Snext_p, Snext_interp_p, next_interp); + (model.lines.emissivity(nextpoint, lineidx)) / (model.lines.opacity(nextpoint, lineidx)); + Real Scurr_interp_p = (model.lines.emissivity(currpoint_interp_idx, lineidx)) + / (model.lines.opacity(currpoint_interp_idx, lineidx)); + Real Snext_interp_p = (model.lines.emissivity(nextpoint_interp_idx, lineidx)) + / (model.lines.opacity(nextpoint_interp_idx, lineidx)); + + Real Scurr_line = interp_helper.interpolate_log(Scurr_p, Scurr_interp_p, curr_interp); + Real Snext_line = interp_helper.interpolate_log(Snext_p, Snext_interp_p, next_interp); + + sum_dtau += dtau; + sum_dtau_times_Scurr += dtau * Scurr_line; + sum_dtau_times_Snext += dtau * Snext_line; + + dtau = sum_dtau; + Scurr = sum_dtau_times_Scurr / sum_dtau; + Snext = sum_dtau_times_Snext / sum_dtau; } /// Computer for the optical depth and source function when @@ -1951,6 +2047,38 @@ inline void Solver ::compute_S_dtau_line_integrated(Model& model, Size cur Real sum_dtau = 0.0; Real sum_dtau_times_Scurr = 0.0; Real sum_dtau_times_Snext = 0.0; + + // Dust/continuum contribution + // if no dust present, do not even bother computing this contribution + if (model.dust.n_dust_frequencies != 0) { + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(currpoint, currfreq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(nextpoint, nextfreq, chi_dust2, eta_dust2); + Real chi_dust_interp1, chi_dust_interp2, eta_dust_interp1, eta_dust_interp2; + model.dust.compute_dust_opacity_emissivity( + currpoint_interp_idx, currfreq, chi_dust_interp1, eta_dust_interp1); + model.dust.compute_dust_opacity_emissivity( + nextpoint_interp_idx, nextfreq, chi_dust_interp2, eta_dust_interp2); + + Real S_dust1 = eta_dust1 / chi_dust1; + Real S_dust2 = eta_dust2 / chi_dust2; + Real S_dust_interp1 = eta_dust_interp1 / chi_dust_interp1; + Real S_dust_interp2 = eta_dust_interp2 / chi_dust_interp2; + + Real curr_dust_opacity = + interp_helper.interpolate_log(chi_dust1, chi_dust_interp1, curr_interp); + Real next_dust_opacity = + interp_helper.interpolate_log(chi_dust2, chi_dust_interp2, next_interp); + Real dust_Scurr = interp_helper.interpolate_log(S_dust1, S_dust_interp1, curr_interp); + Real dust_Snext = interp_helper.interpolate_log(S_dust2, S_dust_interp2, next_interp); + Real dust_dtau = 0.5 * (curr_dust_opacity + next_dust_opacity) * dZ; + + sum_dtau += dust_dtau; + sum_dtau_times_Scurr += dust_dtau * dust_Scurr; + sum_dtau_times_Snext += dust_dtau * dust_Snext; + } + + // Line contributions for (Size l = 0; l < model.parameters->nlines(); l++) { Real line_dtau = compute_dtau_single_line(model, currpoint, nextpoint, currpoint_interp_idx, nextpoint_interp_idx, l, currfreq, nextfreq, curr_interp, next_interp, dZ); @@ -2026,18 +2154,43 @@ inline void Solver ::compute_S_dtau_line_integrated(Model& model, Si Real sum_dtau_times_Scurr = 0.0; Real sum_dtau_times_Snext = 0.0; + // Dust/continuum contribution + // if no dust present, do not even bother computing this contribution + if (model.dust.n_dust_frequencies != 0) { + Real chi_dust1, chi_dust2, eta_dust1, eta_dust2; + model.dust.compute_dust_opacity_emissivity(currpoint, currfreq, chi_dust1, eta_dust1); + model.dust.compute_dust_opacity_emissivity(nextpoint, nextfreq, chi_dust2, eta_dust2); + Real chi_dust_interp1, chi_dust_interp2, eta_dust_interp1, eta_dust_interp2; + model.dust.compute_dust_opacity_emissivity( + currpoint_interp_idx, currfreq, chi_dust_interp1, eta_dust_interp1); + model.dust.compute_dust_opacity_emissivity( + nextpoint_interp_idx, nextfreq, chi_dust_interp2, eta_dust_interp2); + + Real S_dust1 = eta_dust1 / chi_dust1; + Real S_dust2 = eta_dust2 / chi_dust2; + Real S_dust_interp1 = eta_dust_interp1 / chi_dust_interp1; + Real S_dust_interp2 = eta_dust_interp2 / chi_dust_interp2; + + Real curr_dust_opacity = + interp_helper.interpolate_log(chi_dust1, chi_dust_interp1, curr_interp); + Real next_dust_opacity = + interp_helper.interpolate_log(chi_dust2, chi_dust_interp2, next_interp); + Real dust_Scurr = interp_helper.interpolate_log(S_dust1, S_dust_interp1, curr_interp); + Real dust_Snext = interp_helper.interpolate_log(S_dust2, S_dust_interp2, next_interp); + Real dust_dtau = 0.5 * (curr_dust_opacity + next_dust_opacity) * dZ; + + sum_dtau += dust_dtau; + sum_dtau_times_Scurr += dust_dtau * dust_Scurr; + sum_dtau_times_Snext += dust_dtau * dust_Snext; + } + + // Line contributions + // determine frequency bounds for searching nearby Real left_freq; Real right_freq; - // err, compiler will probably figure out that I just want - // these two values ordered - if (currfreq < nextfreq) { - left_freq = currfreq; - right_freq = nextfreq; - } else { - right_freq = currfreq; - left_freq = nextfreq; - } + left_freq = std::min(currfreq, nextfreq); + right_freq = std::max(currfreq, nextfreq); // using maximum of bounds on the two points to get an // upper bound for the line width diff --git a/src/solver/solver_interp_helper.hpp b/src/solver/solver_interp_helper.hpp index 48f33909..244a46af 100644 --- a/src/solver/solver_interp_helper.hpp +++ b/src/solver/solver_interp_helper.hpp @@ -35,8 +35,18 @@ class InterpHelper { // NOTE: interpolation is done in log space, as linear space would take far too many points inline Size get_n_interp(const Model& model, const Size curr_idx, const Size next_idx) const; + + private: + // Internal functions for getting number of interpolation points for the different opacity + // contributions inline Size get_n_interp_for_line( const Model& model, const Size l, const Size curr_idx, const Size next_idx) const; + inline Size get_n_interp_for_dust(const Model& model, const Size curr_idx, const Size next_idx, + const Real curr_freq, const Real next_freq) const; + + public: + inline Size get_n_interp_around_freqs(const Model& model, const Size curr_idx, + const Size next_idx, const Real curr_freq, const Real next_freq) const; // Interpolation functions themselves inline Real interpolate_linear(const Real f_start, const Real f_end, const Real factor) const; diff --git a/src/solver/solver_interp_helper.tpp b/src/solver/solver_interp_helper.tpp index e4786309..11d81792 100644 --- a/src/solver/solver_interp_helper.tpp +++ b/src/solver/solver_interp_helper.tpp @@ -1,5 +1,6 @@ -/// Get the number of points to interpolate between two points +/// Get the number of points to interpolate between two points (inefficient upper bound) +/// @param[in] model : reference to the model /// @param[in] curr_idx : index of the current point /// @param[in] next_idx : index of the next point /// @returns number of points to interpolate @@ -35,17 +36,39 @@ inline Size InterpHelper::get_n_interp( interpolation_criterion.begin() + next_line_end_idx, interpolation_criterion.begin() + curr_line_start_idx, sources_diff.begin(), [](Real x, Real y) { return fabs(logf(x / y)); }); - const Size source_diff_n_interp = std::ceil( + Size n_interp_points = std::ceil( *std::max_element(sources_diff.begin(), sources_diff.end()) / logf(max_source_diff)); - if (source_diff_n_interp > 1) { - return source_diff_n_interp; + // if dust is present, also consider dust opacity difference; upper bound given by iterating + // over all dust frequencies + if (model.dust.n_dust_frequencies > 0) { + std::vector dust_opacity_diff; + dust_opacity_diff.resize(model.dust.n_dust_frequencies); + + std::transform(model.dust.dust_opacities.dat + next_idx * model.dust.n_dust_frequencies, + model.dust.dust_opacities.dat + (next_idx + 1) * model.dust.n_dust_frequencies, + model.dust.dust_opacities.dat + curr_idx * model.dust.n_dust_frequencies, + dust_opacity_diff.begin(), [min_op = model.parameters->min_opacity](Real x, Real y) { + return fabs(logf((x + min_op) / (y + min_op))); + }); + + Size dust_diff_n_interp = + std::ceil(*std::max_element(dust_opacity_diff.begin(), dust_opacity_diff.end()) + / logf(max_source_diff)); // TODO: check if other value might be needed + n_interp_points = std::max(dust_diff_n_interp, n_interp_points); + } + + if (n_interp_points > 1) { + return n_interp_points; } else { return 1; } } -/// Get the number of points to interpolate between two points +/// Get the number of points to interpolate between two points; internal use only, use +/// get_n_interp_around_freqs instead +/// @param[in] model : reference to the model +/// @param[in] l : line index /// @param[in] curr_idx : index of the current point /// @param[in] next_idx : index of the next point /// @returns number of points to interpolate @@ -74,6 +97,149 @@ inline Size InterpHelper::get_n_interp_for_line( } } +/// Get the number of points to interpolate between two points for dust opacity; internal use only, +/// use get_n_interp_around_freqs instead +/// @param[in] model : reference to the model +/// @param[in] curr_idx : index of the current point +/// @param[in] next_idx : index of the next point +/// @param[in] curr_freq : current frequency +/// @param[in] next_freq : next frequency +/// @returns the number of interpolation points needed +/// NOTE: use only if dust is present +inline Size InterpHelper::get_n_interp_for_dust(const Model& model, const Size curr_idx, + const Size next_idx, const Real curr_freq, const Real next_freq) const { + Size n_interpolation_points = 1; + // evaluate dust opacity, interpolating on the grid + Real curr_dust_opacity; + Real next_dust_opacity; + // If frequency is outside the range of the dust opacities, return the boundary value + if (curr_freq < model.dust.dust_frequencies[0]) { + curr_dust_opacity = model.dust.dust_opacities(curr_idx, 0); + } + if (curr_freq > model.dust.dust_frequencies[model.dust.n_dust_frequencies - 1]) { + curr_dust_opacity = model.dust.dust_opacities(curr_idx, model.dust.n_dust_frequencies - 1); + } + // find the right bin using binary search + Size f_low = 0; + Size f_high = model.dust.n_dust_frequencies - 1; + while (f_high - f_low > 1) { + Size f_mid = (f_high + f_low) / 2; + if (model.dust.dust_frequencies[f_mid] > curr_freq) { + f_high = f_mid; + } else { + f_low = f_mid; + } + } + + // and do linear interpolation + Real slope = + (model.dust.dust_opacities(curr_idx, f_high) - model.dust.dust_opacities(curr_idx, f_low)) + / (model.dust.dust_frequencies[f_high] - model.dust.dust_frequencies[f_low]); + curr_dust_opacity = model.dust.dust_opacities(curr_idx, f_low) + + slope * (curr_freq - model.dust.dust_frequencies[f_low]); + + // Exactly the same for the next position + if (next_freq < model.dust.dust_frequencies[0]) { + next_dust_opacity = model.dust.dust_opacities(next_idx, 0); + } + if (next_freq > model.dust.dust_frequencies[model.dust.n_dust_frequencies - 1]) { + next_dust_opacity = model.dust.dust_opacities(next_idx, model.dust.n_dust_frequencies - 1); + } + + // find the right bin using binary search + f_low = 0; + f_high = model.dust.n_dust_frequencies - 1; + while (f_high - f_low > 1) { + Size f_mid = (f_high + f_low) / 2; + if (model.dust.dust_frequencies[f_mid] > next_freq) { + f_high = f_mid; + } else { + f_low = f_mid; + } + } + + // and do linear interpolation + slope = + (model.dust.dust_opacities(next_idx, f_high) - model.dust.dust_opacities(next_idx, f_low)) + / (model.dust.dust_frequencies[f_high] - model.dust.dust_frequencies[f_low]); + next_dust_opacity = model.dust.dust_opacities(next_idx, f_low) + + slope * (next_freq - model.dust.dust_frequencies[f_low]); + + Real dust_opacity_diff = + std::abs(logf((next_dust_opacity + model.parameters->min_opacity) + / (curr_dust_opacity + model.parameters->min_opacity))); // in log space + const Size n_interp = std::ceil( + dust_opacity_diff / logf(max_source_diff)); // TODO: check if other value might be needed + + if (n_interp > 1) { + return n_interp; + } else { + return 1; + } +} + +/// Get the number of points to interpolate between two points around certain frequencies +/// @param[in] model : reference to the model +/// @param[in] curr_idx : index of the current point +/// @param[in] next_idx : index of the next point +/// @param[in] curr_freq : current frequency +/// @param[in] next_freq : next frequency +/// @returns the number of interpolation points needed +inline Size InterpHelper::get_n_interp_around_freqs(const Model& model, const Size curr_idx, + const Size next_idx, const Real curr_freq, const Real next_freq) const { + + Size n_interpolation_points = 1; + + // first try to find which lines are close enough to any of the two frequencies + // and use the corresponding interpolation criteria + // copied from Solver::compute_S_dtau_line_integrated in solver.tpp + // determine frequency bounds for searching nearby + Real left_freq; + Real right_freq; + + left_freq = std::min(curr_freq, next_freq); + right_freq = std::max(curr_freq, next_freq); + + // using maximum of bounds on the two points to get an + // upper bound for the line width + const Real curr_bound_line_width = model.parameters->max_distance_opacity_contribution + * model.thermodynamics.profile_width_upper_bound_with_linefreq( + curr_idx, right_freq, model.lines.max_inverse_mass); + const Real next_bound_line_width = model.parameters->max_distance_opacity_contribution + * model.thermodynamics.profile_width_upper_bound_with_linefreq( + next_idx, right_freq, model.lines.max_inverse_mass); + const Real upper_bound_line_width = std::max(curr_bound_line_width, next_bound_line_width); + + const Real left_freq_bound = left_freq - upper_bound_line_width; + const Real right_freq_bound = right_freq + upper_bound_line_width; + + // apply default search algorithms on the bounds, + // obtaining iterators + auto left_line_bound = std::lower_bound( + model.lines.sorted_line.begin(), model.lines.sorted_line.end(), left_freq_bound); + auto right_line_bound = std::upper_bound( + model.lines.sorted_line.begin(), model.lines.sorted_line.end(), right_freq_bound); + + for (auto freq_sort_l = left_line_bound; freq_sort_l != right_line_bound; freq_sort_l++) { + const Size sort_l = freq_sort_l - model.lines.sorted_line.begin(); + // Map sorted line index to original line index + const Size l = model.lines.sorted_line_map[sort_l]; + + // evaluate interpolation criterion for this line, take max + n_interpolation_points = + std::max(get_n_interp_for_line(model, l, curr_idx, next_idx), n_interpolation_points); + } + + // and also include the dust opacity in the interpolation if present + if (model.dust.n_dust_frequencies > 0) { + n_interpolation_points = + std::max(get_n_interp_for_dust(model, curr_idx, next_idx, curr_freq, next_freq), + n_interpolation_points); + } + + return n_interpolation_points; +} + /// Linear interpolation of f(x) in interval [0,1] inline Real InterpHelper::interpolate_linear( const Real f_start, const Real f_end, const Real factor) const { diff --git a/tests/benchmarks/analytic/continuum_implementation.py b/tests/benchmarks/analytic/continuum_implementation.py new file mode 100644 index 00000000..8882522e --- /dev/null +++ b/tests/benchmarks/analytic/continuum_implementation.py @@ -0,0 +1,218 @@ +#simple 1D test to check whether adding a continuum background to a model works +# Does not include any line contributions - just a continuum source function + +import os +import sys + +curdir = os.path.dirname(os.path.realpath(__file__)) +datdir = f'{curdir}/../../data/' +moddir = f'{curdir}/../../models/' +resdir = f'{curdir}/../../results/' + +import numpy as np +import matplotlib.pyplot as plt +import magritte.tools as tools +import magritte.setup as setup +import magritte.core as magritte +import astropy.units as units +import scipy.integrate + + +dimension = 1 +npoints = 50 +nrays = 2 +nspecs = 3 +nlspecs = 1 +nquads = 1 + +nH2 = 1.0E+12 # [m^-3] +nTT = 1.0E+03 # [m^-3] +#yes, this dust density is quite high, but this is a benchmark, so I want to reach optical depths of order 1 +fraction_density_dust = 1.0E-2 # [.] fraction of number density in dust +temp = 4.5E+00 # [K] +dust_temp = 1.5E+02 # [K] +turb = 0.0E+00 # [m/s] +dx = 1.0E+12 # [m] +dv = 0.0E+03 / magritte.CC # [fraction of speed of light] + + +def create_model (): + """ + Create a model file for the all_constant benchmark, single ray. + """ + + modelName = f'all_constant_single_ray_continuum' + modelFile = f'{moddir}{modelName}.hdf5' + lamdaFile = f'{datdir}test.txt' + continuumFile = f'{datdir}fe50o_henning1995.txt' + continuum_ref_density = 4.9 * units.g/(units.cm)**3 # [g/cm3]; will be converted to the correct SI units below + #corresponds Fe_(0.5)Mg_(0.5)O from Henning 1995, reference density somewhere between 4.79 g/cm3 and 5.05 g/cm3, so I'll assume 4.9 g/cm3 + + + model = magritte.Model () + model.parameters.set_spherical_symmetry(False) + model.parameters.set_model_name (modelFile) + model.parameters.set_dimension (dimension) + model.parameters.set_npoints (npoints) + model.parameters.set_nrays (nrays) + model.parameters.set_nspecs (nspecs) + model.parameters.set_nlspecs (nlspecs) + model.parameters.set_nquads (nquads) + + model.geometry.points.position.set([[i*dx, 0, 0] for i in range(npoints)]) + model.geometry.points.velocity.set([[i*dv, 0, 0] for i in range(npoints)]) + + #Note: Magritte currently cannot handle creating continuum-only models, so we include a dummy species with zero abundance to avoid issues + model.chemistry.species.abundance = [[0*nTT, nH2, 0.0] for _ in range(npoints)]#disable line emission/absorption by setting density to 0 + model.chemistry.species.symbol = ['test', 'H2', 'e-'] + + model.thermodynamics.temperature.gas .set( temp * np.ones(npoints)) + model.thermodynamics.turbulence.vturb2.set((turb/magritte.CC)**2 * np.ones(npoints)) + + frequency_grid, complex_refractive_index = tools.read_dust_opacity_table(continuumFile, 1e-6*units.m) + frequency_grid_Hz, absorption_coeff_per_density = tools.convert_dust_opacity_table_to_SI(frequency_grid, complex_refractive_index, continuum_ref_density) + #absorption_coeff_per_density is now in m2/kg, thus needs to be divided by 1000 to get m2/g + model.dust.dust_opacities.set((absorption_coeff_per_density/1000 * fraction_density_dust * nH2 * 2.016 / 6.022e23).value[None, :]*np.ones(npoints)[:, None])#H2: 2.016 g/mol, 6.022e23 particles/mol -> particles/m3 to g/m3 + model.dust.dust_frequencies.set((frequency_grid_Hz).value) + model.dust.dust_temperature.set(dust_temp * np.ones(npoints)) + + setup.set_Delaunay_neighbor_lists (model) + setup.set_Delaunay_boundary (model) + setup.set_boundary_condition_CMB (model) + setup.set_uniform_rays (model) + setup.set_linedata_from_LAMDA_file(model, lamdaFile) + setup.set_quadrature (model) + + model.write() + + return #magritte.Model (modelFile) + + +def run_model (nosave=False): + + modelName = f'all_constant_single_ray_continuum' + modelFile = f'{moddir}{modelName}.hdf5' + timestamp = tools.timestamp() + + timer1 = tools.Timer('reading model') + timer1.start() + model = magritte.Model (modelFile) + timer1.stop() + + magritte.pcmt_set_n_threads_avail(1) + + dust_frequencies = np.array(model.dust.dust_frequencies) + chi = np.array(model.dust.dust_opacities)[0,:][None, :]#same for all points in this model + #conveniently, these frequencies correspond to the frequencies used to tabulate the continuum opacity, so no interpolation is needed + dust_temp = np.array(model.dust.dust_temperature)[0]#same for all points in this model + positions = np.array(model.geometry.points.position) + + timer2 = tools.Timer('setting model') + timer2.start() + model.set_custom_spectral_discretization(dust_frequencies) + model.compute_inverse_line_widths () + model.compute_LTE_level_populations () + timer2.stop() + + #compute the image, to check whether the intensity is as expected + timer3 = tools.Timer('Compute intensity image') + timer3.start() + model.compute_image_new(0,1,1) + timer3.stop() + + timer4 = tools.Timer('Compute image optical depth') + timer4.start() + model.compute_image_optical_depth_new(0,1,1) + timer4.stop() + + I_image = np.array(model.images[0].I)[0,:] + tau_image = np.array(model.images[1].I)[0,:] + + def evaluate_dust_opacity(frequency, velocity): + return np.interp(dust_frequencies[None, :]*(1-velocity[:, None]), frequency, chi[0,:]) + + + #compute optical depth + chi_shifted = evaluate_dust_opacity(dust_frequencies, np.array(model.geometry.points.velocity)[:,0]) + tau_ref = np.trapz(chi_shifted, x=np.array(model.geometry.points.position)[:,0], axis=0) + #TODO: fix the calculation of the reference intensity; currently it is wrong for any model with non-zero velocity field due to not taking into account the doppler shifts properly + #test attempt below + # def evaluate_source_function(frequency, velocity): + # return tools.planck(dust_temp[None, None], dust_frequencies[None, :]*(np.ones((1,1))-velocity[:, None])) + # S_shifted = evaluate_source_function(dust_frequencies, np.array(model.geometry.points.velocity)[:,0]) + # cumsum_tau = np.zeros((chi_shifted.shape[0]+1, chi_shifted.shape[1])) + # cumsum_tau[1:-1,:] = scipy.integrate.cumulative_trapezoid(chi_shifted, axis=0)*dx + # cumsum_tau[-1,:] = tau_ref + # intensity_contributions = S_shifted * (1 - np.exp(-dx*chi_shifted)) * np.exp(-cumsum_tau[:-1, :]) + # ref_intensity = np.sum(intensity_contributions, axis=0) + tools.I_CMB(dust_frequencies)*np.exp(-tau_ref) + + # Technically, the source function for the dust is not constant due to doppler shifts, therefore NO doppler shifts in this benchmark + ref_intensity = tools.planck(dust_temp, dust_frequencies) * (1-np.exp(-tau_ref)) + tools.I_CMB(dust_frequencies)*np.exp(-tau_ref) + + reldiff_I = 2.0*np.abs((I_image - ref_intensity)/(ref_intensity+I_image)) + reldiff_tau = 2.0*np.abs((tau_image - tau_ref)/(tau_ref+tau_image)) + + result = f'--- Benchmark name ----------------------------\n' + result += f'{modelName }\n' + result += f'--- Parameters --------------------------------\n' + result += f'dimension = {model.parameters.dimension() }\n' + result += f'npoints = {model.parameters.npoints () }\n' + result += f'nrays = {model.parameters.nrays () }\n' + result += f'nquads = {model.parameters.nquads () }\n' + result += f'--- Accuracy ----------------------------------\n' + result += f'max error in I = {np.max(reldiff_I[:40])} \n' + result += f'max error in tau = {np.max(reldiff_tau)} \n' + result += f'--- Timers ------------------------------------\n' + result += f'{timer1.print() }\n' + result += f'{timer2.print() }\n' + result += f'{timer3.print() }\n' + result += f'{timer4.print() }\n' + result += f'-----------------------------------------------\n' + + print(result) + + if not nosave: + with open(f'{resdir}{modelName}-{timestamp}.log' ,'w') as log: + log.write(result) + + plt.figure(dpi = 150) + plt.title('Intensity') + plt.plot(ref_intensity, label='ref') + plt.plot(I_image, label='model') + plt.axvline(x=40, color='gray', linestyle='--', label='end of checked range') + plt.yscale('log') + plt.legend() + plt.savefig(f'{resdir}{modelName}-intensity-{timestamp}.png', dpi=150) + plt.figure(dpi=150) + plt.title('Optical depth') + plt.plot(tau_ref, label='ref') + plt.plot(tau_image, label='model') + plt.yscale('log') + plt.savefig(f'{resdir}{modelName}-optical_depth-{timestamp}.png', dpi=150) + + #error bounds are chosen somewhat arbitrarily, based on previously obtained results; this should prevent serious regressions. + RELDIFF_I_AS_EXPECTED = (np.max(reldiff_I[:40])<1.7e-5)#well, the higher frequencies have very low intensities/sharp declines of the source function, so the relative error can be larger there + RELDIFF_TAU_AS_EXPECTED = (np.max(reldiff_tau)<4.4e-11) + + if not RELDIFF_I_AS_EXPECTED: + print("Continuum intensity max error too large: ", np.max(np.max(reldiff_I[:40]))) + if not RELDIFF_TAU_AS_EXPECTED: + print("Continuum optical depth max error too large: ", np.max(np.max(reldiff_tau))) + + + return (RELDIFF_I_AS_EXPECTED&RELDIFF_TAU_AS_EXPECTED) + + +def run_test (nosave=False): + + create_model () + run_model (nosave) + + return + + +if __name__ == '__main__': + + nosave = (len(sys.argv) > 1) and (sys.argv[1] == 'nosave') + + run_test (nosave) diff --git a/tests/benchmarks/analytic/test_benchmarks.py b/tests/benchmarks/analytic/test_benchmarks.py index ff31f56e..d1e3b3fc 100644 --- a/tests/benchmarks/analytic/test_benchmarks.py +++ b/tests/benchmarks/analytic/test_benchmarks.py @@ -15,6 +15,8 @@ from constant_velocity_gradient_1D_image import run_model as velocity_gradient_image_run from constant_velocity_gradient_1D_new_imager import create_model as velocity_gradient_new_imager_setup from constant_velocity_gradient_1D_new_imager import run_model as velocity_gradient_new_imager_run +from continuum_implementation import create_model as continuum_implementation_setup +from continuum_implementation import run_model as continuum_implementation_run @@ -84,6 +86,14 @@ def test_density_distribution1D_image_setup(self): def test_density_distribution1D_image_run(self): assert density_dist_image_run('a', nosave=True, use_widgets=False) + @pytest.mark.incremental + class TestContinuumImplementation: + def test_continuum_implementation_setup(self): + continuum_implementation_setup() + + def test_continuum_implementation_run(self): + assert continuum_implementation_run(nosave=True) + @pytest.mark.incremental class TestVelocityGradient1D: def test_velocity_gradient_1D_setup(self): diff --git a/tests/data/fe50o_henning1995.txt b/tests/data/fe50o_henning1995.txt new file mode 100644 index 00000000..c59c98e5 --- /dev/null +++ b/tests/data/fe50o_henning1995.txt @@ -0,0 +1,77 @@ + 0.20000 1.275000 0.949600 + 0.25000 1.650000 0.894200 + 0.30000 1.829330 0.752567 + 0.35000 1.920570 0.633203 + 0.40000 1.961000 0.531400 + 0.45000 1.966000 0.458912 + 0.50000 1.961000 0.418200 + 0.60000 1.959670 0.363701 + 0.70000 1.939860 0.348728 + 0.80000 1.933000 0.362600 + 0.90000 1.943780 0.388354 + 1.00000 1.968000 0.415600 + 1.20000 2.048450 0.449190 + 1.40000 2.139670 0.445246 + 1.60000 2.214990 0.410576 + 1.80000 2.265190 0.362685 + 2.00000 2.294000 0.313700 + 2.50000 2.318000 0.239600 + 3.00000 2.320000 0.193335 + 3.50000 2.314000 0.162715 + 4.00000 2.305000 0.141200 + 4.50000 2.293110 0.125623 + 5.00000 2.279000 0.113900 + 6.00000 2.245010 0.098369 + 7.00000 2.202160 0.090064 + 8.00000 2.150030 0.087191 + 9.00000 2.086670 0.087114 + 10.00000 2.011270 0.094262 + 11.00000 1.922490 0.105846 + 12.00000 1.814810 0.123826 + 13.00000 1.677310 0.153443 + 14.00000 1.501200 0.202534 + 15.00000 1.241240 0.311487 + 16.00000 1.025120 0.572044 + 17.00000 0.855875 0.871219 + 18.00000 0.737061 1.239720 + 19.00000 0.706528 1.642270 + 20.00000 0.745133 2.048190 + 21.00000 0.827257 2.456225 + 22.00000 0.955626 2.906187 + 23.00000 1.177110 3.396530 + 24.00000 1.526516 3.914342 + 25.00000 2.059380 4.449403 + 26.00000 2.900673 4.906424 + 27.00000 4.125912 4.933542 + 28.00000 5.061496 4.197180 + 29.00000 5.373952 3.417817 + 30.00000 5.446354 2.850807 + 31.00000 5.454564 2.412194 + 32.00000 5.400644 2.061386 + 33.00000 5.323125 1.787600 + 34.00000 5.218550 1.562245 + 35.00000 5.118211 1.397365 + 36.00000 5.028645 1.265914 + 38.00000 4.859378 1.066886 + 40.00000 4.716105 0.958105 + 42.00000 4.605909 0.894919 + 44.00000 4.530648 0.851494 + 46.00000 4.476258 0.810078 + 48.00000 4.419734 0.779872 + 50.00000 4.368821 0.758558 + 60.00000 4.268398 0.738961 + 70.00000 4.247900 0.757796 + 80.00000 4.261040 0.759746 + 90.00000 4.290357 0.769156 + 100.00000 4.306678 0.767868 + 120.00000 4.337099 0.782940 + 140.00000 4.367511 0.786807 + 160.00000 4.377516 0.814747 + 180.00000 4.393457 0.843530 + 200.00000 4.410885 0.864042 + 250.00000 4.461455 0.962321 + 300.00000 4.480455 1.108519 + 350.00000 4.542546 1.140882 + 400.00000 4.522373 1.299155 + 450.00000 4.539715 1.333670 + 500.00000 4.565743 1.339532