ARTS 2.5.11 (git: 6827797f)
m_optproperties.cc
Go to the documentation of this file.
1
15/*===========================================================================
16 === External declarations
17 ===========================================================================*/
18
19#include <cfloat>
20#include <cmath>
21#include "array.h"
22#include "arts.h"
23#include "arts_constants.h"
24#include "arts_conversions.h"
25#include "auto_md.h"
26#include "check_input.h"
27#include "exceptions.h"
28#include "interpolation.h"
29#include "interp.h"
30#include "logic.h"
31#include "math_funcs.h"
32#include "matpack_data.h"
33#include "messages.h"
34#include "montecarlo.h"
35#include "optproperties.h"
36#include "sorting.h"
37#include "xml_io.h"
38
39inline constexpr Numeric PI=Constant::pi;
40inline constexpr Numeric DEG2RAD=Conversion::deg2rad(1);
41inline constexpr Numeric RAD2DEG=Conversion::rad2deg(1);
42
43#define PART_TYPE scat_data[i_ss][i_se].ptype
44#define F_DATAGRID scat_data[i_ss][i_se].f_grid
45#define T_DATAGRID scat_data[i_ss][i_se].T_grid
46#define ZA_DATAGRID scat_data[i_ss][i_se].za_grid
47#define AA_DATAGRID scat_data[i_ss][i_se].aa_grid
48#define PHA_MAT_DATA scat_data[i_ss][i_se].pha_mat_data
49#define EXT_MAT_DATA scat_data[i_ss][i_se].ext_mat_data
50#define ABS_VEC_DATA scat_data[i_ss][i_se].abs_vec_data
51
52// If particle number density is below this value,
53// no transformations will be performed
54#define PND_LIMIT 1e-12
55
56/* Workspace method: Doxygen documentation will be auto-generated */
57void pha_mat_sptFromData( // Output:
58 Tensor5& pha_mat_spt,
59 // Input:
61 const Vector& za_grid,
62 const Vector& aa_grid,
63 const Index& za_index, // propagation directions
64 const Index& aa_index,
65 const Index& f_index,
66 const Vector& f_grid,
67 const Numeric& rtp_temperature,
68 const Tensor4& pnd_field,
69 const Index& scat_p_index,
70 const Index& scat_lat_index,
71 const Index& scat_lon_index,
72 const Verbosity& verbosity) {
73 const Index stokes_dim = pha_mat_spt.ncols();
74 if (stokes_dim > 4 || stokes_dim < 1) {
75 throw runtime_error(
76 "The dimension of the stokes vector \n"
77 "must be 1,2,3 or 4");
78 }
79
80 // Determine total number of scattering elements
81 const Index N_se_total = TotalNumberOfElements(scat_data);
82 if (N_se_total != pnd_field.nbooks()) {
83 ostringstream os;
84 os << "Total number of scattering elements in scat_data "
85 << "inconsistent with size of pnd_field.";
86 throw runtime_error(os.str());
87 }
88 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
89 // checks the same as the runtime_error above. Still, we keep it to be on the
90 // save side.
91 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
92
93 // Check that we don't have scat_data_mono here. Only checking the first
94 // scat element, assuming the other elements have been processed in the same
95 // manner. That's save against having mono data, but not against having
96 // individual elements produced with only a single frequency. This, however,
97 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
98 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
99 // (partly) produced from scat_data_singleTmatrix.
100 if (scat_data[0][0].f_grid.nelem() < 2) {
101 ostringstream os;
102 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
103 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
104 << "is expected here.";
105 throw runtime_error(os.str());
106 }
107
108 const Index N_ss = scat_data.nelem();
109
110 // Phase matrix in laboratory coordinate system. Dimensions:
111 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
112 Tensor5 pha_mat_data_int;
113
114 Index i_se_flat = 0;
115 // Loop over scattering species
116 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
117 const Index N_se = scat_data[i_ss].nelem();
118
119 // Loop over the included scattering elements
120 for (Index i_se = 0; i_se < N_se; i_se++) {
121 // If the particle number density at a specific point in the
122 // atmosphere for the i_se scattering element is zero, we don't need
123 // to do the transfromation!
124 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
125 PND_LIMIT) {
126 // First we have to transform the data from the coordinate system
127 // used in the database (depending on the kind of ptype) to the
128 // laboratory coordinate system.
129
130 // Frequency and temperature interpolation:
131
132 // Container for data at one frequency and one temperature.
133 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
134 PHA_MAT_DATA.nbooks(),
135 PHA_MAT_DATA.npages(),
136 PHA_MAT_DATA.nrows(),
137 PHA_MAT_DATA.ncols());
138
139 // Gridpositions:
140 GridPos freq_gp;
141 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
142 GridPos t_gp;
143 Vector itw;
144
145 Index ti = -1;
146
147 if (PHA_MAT_DATA.nvitrines() == 1) // just 1 T_grid element
148 {
149 ti = 0;
150 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
151 // pick one temperature'
152 {
153 if (rtp_temperature > -10.) // lowest T-point
154 {
155 ti = 0;
156 } else if (rtp_temperature > -20.) // highest T-point
157 {
158 ti = T_DATAGRID.nelem() - 1;
159 } else // median T-point
160 {
161 ti = T_DATAGRID.nelem() / 2;
162 }
163 }
164
165 if (ti < 0) // temperature interpolation
166 {
167 ostringstream os;
168 os << "In pha_mat_sptFromData.\n"
169 << "The temperature grid of the scattering data does not\n"
170 << "cover the atmospheric temperature at cloud location.\n"
171 << "The data should include the value T = " << rtp_temperature
172 << " K.";
173 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
174
175 gridpos(t_gp, T_DATAGRID, rtp_temperature);
176
177 // Interpolation weights:
178 itw.resize(4);
179 interpweights(itw, freq_gp, t_gp);
180
181 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
182 i_za_sca++)
183 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
184 i_aa_sca++)
185 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
186 i_za_inc++)
187 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
188 i_aa_inc++)
189 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
190 // Interpolation of phase matrix:
191 pha_mat_data_int(
192 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
193 interp(itw,
194 PHA_MAT_DATA(joker,
195 joker,
196 i_za_sca,
197 i_aa_sca,
198 i_za_inc,
199 i_aa_inc,
200 i),
201 freq_gp,
202 t_gp);
203 } else {
204 // Interpolation weights:
205 itw.resize(2);
206 interpweights(itw, freq_gp);
207 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
208 i_za_sca++)
209 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
210 i_aa_sca++)
211 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
212 i_za_inc++)
213 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
214 i_aa_inc++)
215 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
216 // Interpolation of phase matrix:
217 pha_mat_data_int(
218 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
219 interp(itw,
220 PHA_MAT_DATA(joker,
221 ti,
222 i_za_sca,
223 i_aa_sca,
224 i_za_inc,
225 i_aa_inc,
226 i),
227 freq_gp);
228 }
229
230 // Do the transformation into the laboratory coordinate system.
231 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
232 za_inc_idx++) {
233 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
234 aa_inc_idx++) {
236 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
237 pha_mat_data_int,
240 PART_TYPE,
241 za_index,
242 aa_index,
243 za_inc_idx,
244 aa_inc_idx,
245 za_grid,
246 aa_grid,
247 verbosity);
248 }
249 }
250 }
251 i_se_flat++;
252 }
253 }
254}
255
256/* Workspace method: Doxygen documentation will be auto-generated */
258 Tensor5& pha_mat_spt,
259 // Input:
260 const ArrayOfTensor7& pha_mat_sptDOITOpt,
261 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
262 const Index& doit_za_grid_size,
263 const Vector& aa_grid,
264 const Index& za_index, // propagation directions
265 const Index& aa_index,
266 const Numeric& rtp_temperature,
267 const Tensor4& pnd_field,
268 const Index& scat_p_index,
269 const Index& scat_lat_index,
270 const Index& scat_lon_index,
271 const Verbosity&) {
272 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
273
274 if (N_se_total != pnd_field.nbooks()) {
275 ostringstream os;
276 os << "Total number of scattering elements in scat_data_mono "
277 << "inconsistent with size of pnd_field.";
278 throw runtime_error(os.str());
279 }
280 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
281 // checks the same as the runtime_error above. Still, we keep it to be on the
282 // save side.
283 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
284
285 // atmosphere_dim = 3
286 if (pnd_field.ncols() > 1) {
287 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == N_se_total);
288 // Assuming that if the size is o.k. for one scattering element, it will
289 // also be o.k. for the other scattering elements.
290 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
291 scat_data_mono[0][0].T_grid.nelem());
292 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
293 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == aa_grid.nelem());
294 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
295 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
296 }
297
298 // atmosphere_dim = 1, only zenith angle required for scattered directions.
299 else if (pnd_field.ncols() == 1) {
300 //ARTS_ASSERT(is_size(scat_theta, doit_za_grid_size, 1,
301 // doit_za_grid_size, aa_grid.nelem()));
302
303 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == TotalNumberOfElements(scat_data_mono));
304 // Assuming that if the size is o.k. for one scattering element, it will
305 // also be o.k. for the other scattering elements.
306 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
307 scat_data_mono[0][0].T_grid.nelem());
308 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
309 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == 1);
310 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
311 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
312 }
313
314 ARTS_ASSERT(doit_za_grid_size > 0);
315
316 // Check that we do indeed have scat_data_mono here. Only checking the first
317 // scat element, assuming the other elements have been processed in the same
318 // manner. That's save against having scat_data here if that originated from
319 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
320 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
321 // produced from scat_data_singleTmatrix. That would be too costly here,
322 // though.
323 // Also, we can't check here whether data is at the correct frequency since we
324 // don't know f_grid and f_index here (we could pass it in, though).
325 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
326 ostringstream os;
327 os << "Scattering data seems to be *scat_data* (several freq points),\n"
328 << "but *scat_data_mono* (1 freq point only) is expected here.";
329 throw runtime_error(os.str());
330 }
331
332 // Create equidistant zenith angle grid
333 Vector za_grid;
334 nlinspace(za_grid, 0, 180, doit_za_grid_size);
335
336 const Index N_ss = scat_data_mono.nelem();
337 const Index stokes_dim = pha_mat_spt.ncols();
338
339 if (stokes_dim > 4 || stokes_dim < 1) {
340 throw runtime_error(
341 "The dimension of the stokes vector \n"
342 "must be 1,2,3 or 4");
343 }
344
345 GridPos T_gp;
346 Vector itw(2);
347
348 // Initialisation
349 pha_mat_spt = 0.;
350
351 Index i_se_flat = 0;
352
353 // Do the transformation into the laboratory coordinate system.
354 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
355 const Index N_se = scat_data_mono[i_ss].nelem();
356
357 for (Index i_se = 0; i_se < N_se; i_se++) {
358 // If the particle number density at a specific point in the
359 // atmosphere for the i_se scattering element is zero, we don't need
360 // to do the transformation!
361 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
362 PND_LIMIT) //TRS
363 {
364 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
365 Index ti = -1;
366
367 if (nT == 1) // just 1 T_grid element
368 {
369 ti = 0;
370 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
371 // pick one temperature'
372 {
373 if (rtp_temperature > -10.) // lowest T-point
374 {
375 ti = 0;
376 } else if (rtp_temperature > -20.) // highest T-point
377 {
378 ti = nT - 1;
379 } else // median T-point
380 {
381 ti = nT / 2;
382 }
383 } else {
384 ostringstream os;
385 os << "In pha_mat_sptFromDataDOITOpt.\n"
386 << "The temperature grid of the scattering data does not\n"
387 << "cover the atmospheric temperature at cloud location.\n"
388 << "The data should include the value T = " << rtp_temperature
389 << " K.";
391 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
392
393 // Gridpositions:
394 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
395 // Interpolation weights:
396 interpweights(itw, T_gp);
397 }
398
399 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
400 za_inc_idx++) {
401 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
402 aa_inc_idx++) {
403 if (ti < 0) // Temperature interpolation
404 {
405 for (Index i = 0; i < stokes_dim; i++) {
406 for (Index j = 0; j < stokes_dim; j++) {
407 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
408 interp(itw,
409 pha_mat_sptDOITOpt[i_se_flat](joker,
410 za_index,
411 aa_index,
412 za_inc_idx,
413 aa_inc_idx,
414 i,
415 j),
416 T_gp);
417 }
418 }
419 } else {
420 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker) =
421 pha_mat_sptDOITOpt[i_se_flat](ti,
422 za_index,
423 aa_index,
424 za_inc_idx,
425 aa_inc_idx,
426 joker,
427 joker);
428 }
429 }
430 }
431 } // TRS
432
433 i_se_flat++;
434 }
435 }
436}
437
438/* Workspace method: Doxygen documentation will be auto-generated */
439void opt_prop_sptFromData( // Output and Input:
440 ArrayOfPropagationMatrix& ext_mat_spt,
441 ArrayOfStokesVector& abs_vec_spt,
442 // Input:
443 const ArrayOfArrayOfSingleScatteringData& scat_data,
444 const Vector& za_grid,
445 const Vector& aa_grid,
446 const Index& za_index, // propagation directions
447 const Index& aa_index,
448 const Index& f_index,
449 const Vector& f_grid,
450 const Numeric& rtp_temperature,
451 const Tensor4& pnd_field,
452 const Index& scat_p_index,
453 const Index& scat_lat_index,
454 const Index& scat_lon_index,
455 const Verbosity& verbosity) {
456 const Index N_ss = scat_data.nelem();
457 const Numeric za_sca = za_grid[za_index];
458 const Numeric aa_sca = aa_grid[aa_index];
459
460 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);
461 if (N_ss) {
462 ARTS_ASSERT(ext_mat_spt[0].NumberOfFrequencies() == N_se_total);
463 ARTS_ASSERT(abs_vec_spt[0].NumberOfFrequencies() == N_se_total);
464 });
465
466 // Check that we don't have scat_data_mono here. Only checking the first
467 // scat element, assuming the other elements have been processed in the same
468 // manner. That's save against having mono data, but not against having
469 // individual elements produced with only a single frequency. This, however,
470 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
471 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
472 // (partly) produced from scat_data_singleTmatrix.
473 if (scat_data[0][0].f_grid.nelem() < 2) {
474 ostringstream os;
475 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
476 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
477 << "is expected here.";
478 throw runtime_error(os.str());
479 }
480
481 // Phase matrix in laboratory coordinate system. Dimensions:
482 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
483 Tensor3 ext_mat_data_int;
484 Tensor3 abs_vec_data_int;
485
486 // Initialisation
487 ext_mat_spt = 0.;
488 abs_vec_spt = 0.;
489
490 Index i_se_flat = 0;
491 // Loop over the included scattering species
492 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
493 const Index N_se = scat_data[i_ss].nelem();
494
495 // Loop over the included scattering elements
496 for (Index i_se = 0; i_se < N_se; i_se++) {
497 // If the particle number density at a specific point in the
498 // atmosphere for the i_se scattering element is zero, we don't need
499 // to do the transformation
500
501 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
502 PND_LIMIT) {
503 // First we have to transform the data from the coordinate system
504 // used in the database (depending on the kind of ptype) to the
505 // laboratory coordinate system.
506
507 // Frequency interpolation:
508
509 // The data is interpolated on one frequency.
510 //
511 // Resize the variables for the interpolated data:
512 //
513 ext_mat_data_int.resize(
514 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
515 //
516 abs_vec_data_int.resize(
517 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
518
519 // Gridpositions:
520 GridPos freq_gp;
521 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
522 GridPos t_gp;
523 Vector itw;
524
525 if (T_DATAGRID.nelem() > 1) {
526 ostringstream os;
527 os << "In opt_prop_sptFromData.\n"
528 << "The temperature grid of the scattering data does not\n"
529 << "cover the atmospheric temperature at cloud location.\n"
530 << "The data should include the value T = " << rtp_temperature
531 << " K.";
532 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
533
534 gridpos(t_gp, T_DATAGRID, rtp_temperature);
535
536 // Interpolation weights:
537 itw.resize(4);
538 interpweights(itw, freq_gp, t_gp);
539
540 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
541 i_za_sca++) {
542 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
543 i_aa_sca++) {
544 //
545 // Interpolation of extinction matrix:
546 //
547 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
548 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
549 interp(itw,
550 EXT_MAT_DATA(joker, joker, i_za_sca, i_aa_sca, i),
551 freq_gp,
552 t_gp);
553 }
554 }
555 }
556
557 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
558 i_za_sca++) {
559 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
560 i_aa_sca++) {
561 //
562 // Interpolation of absorption vector:
563 //
564 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
565 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
566 interp(itw,
567 ABS_VEC_DATA(joker, joker, i_za_sca, i_aa_sca, i),
568 freq_gp,
569 t_gp);
570 }
571 }
572 }
573 } else {
574 // Interpolation weights:
575 itw.resize(2);
576 interpweights(itw, freq_gp);
577
578 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
579 i_za_sca++) {
580 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
581 i_aa_sca++) {
582 //
583 // Interpolation of extinction matrix:
584 //
585 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
586 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
587 interp(itw,
588 EXT_MAT_DATA(joker, 0, i_za_sca, i_aa_sca, i),
589 freq_gp);
590 }
591 }
592 }
593
594 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
595 i_za_sca++) {
596 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
597 i_aa_sca++) {
598 //
599 // Interpolation of absorption vector:
600 //
601 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
602 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
603 interp(itw,
604 ABS_VEC_DATA(joker, 0, i_za_sca, i_aa_sca, i),
605 freq_gp);
606 }
607 }
608 }
609 }
610
611 //
612 // Do the transformation into the laboratory coordinate system.
613 //
614 // Extinction matrix:
615 //
616 ext_matTransform(ext_mat_spt[i_se_flat],
617 ext_mat_data_int,
620 PART_TYPE,
621 za_sca,
622 aa_sca,
623 verbosity);
624 //
625 // Absorption vector:
626 //
627 abs_vecTransform(abs_vec_spt[i_se_flat],
628 abs_vec_data_int,
631 PART_TYPE,
632 za_sca,
633 aa_sca,
634 verbosity);
635 }
636
637 i_se_flat++;
638 }
639 }
640}
641
642/* Workspace method: Doxygen documentation will be auto-generated */
643void opt_prop_sptFromScat_data( // Output and Input:
644 ArrayOfPropagationMatrix& ext_mat_spt,
645 ArrayOfStokesVector& abs_vec_spt,
646 // Input:
647 const ArrayOfArrayOfSingleScatteringData& scat_data,
648 const Index& scat_data_checked,
649 const Vector& za_grid,
650 const Vector& aa_grid,
651 const Index& za_index, // propagation directions
652 const Index& aa_index,
653 const Index& f_index,
654 const Numeric& rtp_temperature,
655 const Tensor4& pnd_field,
656 const Index& scat_p_index,
657 const Index& scat_lat_index,
658 const Index& scat_lon_index,
659 const Verbosity& verbosity) {
660 if (scat_data_checked != 1)
661 throw runtime_error(
662 "The scattering data must be flagged to have "
663 "passed a consistency check (scat_data_checked=1).");
664
665 const Index N_ss = scat_data.nelem();
666 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
667 const Numeric za_sca = za_grid[za_index];
668 const Numeric aa_sca = aa_grid[aa_index];
669
670 if (stokes_dim > 4 || stokes_dim < 1) {
671 throw runtime_error(
672 "The dimension of the stokes vector \n"
673 "must be 1,2,3 or 4");
674 }
675
676 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);)
677 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
678 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
679
680 // Phase matrix in laboratory coordinate system. Dimensions:
681 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
682 Tensor3 ext_mat_data_int;
683 Tensor3 abs_vec_data_int;
684
685 // Initialisation
686 for (auto& pm : ext_mat_spt) pm.SetZero();
687 for (auto& sv : abs_vec_spt) sv.SetZero();
688
689 Index this_f_index;
690
691 Index i_se_flat = 0;
692 // Loop over the included scattering species
693 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
694 const Index N_se = scat_data[i_ss].nelem();
695
696 // Loop over the included scattering elements
697 for (Index i_se = 0; i_se < N_se; i_se++) {
698 // If the particle number density at a specific point in the
699 // atmosphere for the i_se scattering element is zero, we don't need
700 // to do the transformation
701
702 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
703 PND_LIMIT) {
704 // First we have to transform the data from the coordinate system
705 // used in the database (depending on the kind of ptype) to the
706 // laboratory coordinate system.
707
708 // Resize the variables for the interpolated data (1freq, 1T):
709 ext_mat_data_int.resize(
710 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
711 abs_vec_data_int.resize(
712 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
713
714 // Gridpositions and interpolation weights;
715 GridPos t_gp;
716 Vector itw;
717 if (EXT_MAT_DATA.nbooks() > 1 || ABS_VEC_DATA.nbooks() > 1) {
718 ostringstream os;
719 os << "In opt_prop_sptFromScat_data.\n"
720 << "The temperature grid of the scattering data does not\n"
721 << "cover the atmospheric temperature at cloud location.\n"
722 << "The data should include the value T = " << rtp_temperature
723 << " K.";
724 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
725
726 gridpos(t_gp, T_DATAGRID, rtp_temperature);
727
728 // Interpolation weights:
729 itw.resize(2);
730 interpweights(itw, t_gp);
731 }
732
733 // Frequency extraction and temperature interpolation
734
735 if (EXT_MAT_DATA.nshelves() == 1)
736 this_f_index = 0;
737 else
738 this_f_index = f_index;
739
740 if (EXT_MAT_DATA.nbooks() > 1) {
741 // Interpolation of extinction matrix:
742 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
743 i_za_sca++) {
744 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
745 i_aa_sca++) {
746 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
747 ext_mat_data_int(i_za_sca, i_aa_sca, i) = interp(
748 itw,
749 EXT_MAT_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
750 t_gp);
751 }
752 }
753 }
754 } else {
755 ext_mat_data_int = EXT_MAT_DATA(this_f_index, 0, joker, joker, joker);
756 /*
757 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
758 i_za_sca++)
759 {
760 for(Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
761 i_aa_sca++)
762 {
763 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++)
764 {
765 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
766 EXT_MAT_DATA(this_f_index, 0,
767 i_za_sca, i_aa_sca, i);
768 }
769 }
770 } */
771 }
772
773 if (ABS_VEC_DATA.nshelves() == 1)
774 this_f_index = 0;
775 else
776 this_f_index = f_index;
777
778 if (ABS_VEC_DATA.nbooks() > 1) {
779 // Interpolation of absorption vector:
780 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
781 i_za_sca++) {
782 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
783 i_aa_sca++) {
784 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
785 abs_vec_data_int(i_za_sca, i_aa_sca, i) = interp(
786 itw,
787 ABS_VEC_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
788 t_gp);
789 }
790 }
791 }
792 } else {
793 abs_vec_data_int = ABS_VEC_DATA(this_f_index, 0, joker, joker, joker);
794 /*
795 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
796 i_za_sca++)
797 {
798 for(Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
799 i_aa_sca++)
800 {
801 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++)
802 {
803 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
804 ABS_VEC_DATA(this_f_index, 0,
805 i_za_sca, i_aa_sca, i);
806 }
807 }
808 } */
809 }
810
811 //
812 // Do the transformation into the laboratory coordinate system.
813 //
814 // Extinction matrix:
815 ext_matTransform(ext_mat_spt[i_se_flat],
816 ext_mat_data_int,
819 PART_TYPE,
820 za_sca,
821 aa_sca,
822 verbosity);
823 // Absorption vector:
824 abs_vecTransform(abs_vec_spt[i_se_flat],
825 abs_vec_data_int,
828 PART_TYPE,
829 za_sca,
830 aa_sca,
831 verbosity);
832 }
833 i_se_flat++;
834 }
835 }
836}
837
838/* Workspace method: Doxygen documentation will be auto-generated */
839void opt_prop_bulkCalc( // Output and Input:
840 PropagationMatrix& ext_mat,
841 StokesVector& abs_vec,
842 // Input:
843 const ArrayOfPropagationMatrix& ext_mat_spt,
844 const ArrayOfStokesVector& abs_vec_spt,
845 const Tensor4& pnd_field,
846 const Index& scat_p_index,
847 const Index& scat_lat_index,
848 const Index& scat_lon_index,
849 const Verbosity&) {
850 Index N_se = abs_vec_spt.nelem();
851 //ARTS_ASSERT( ext_mat_spt.npages()==N_se )
852 if (ext_mat_spt.nelem() not_eq N_se) {
853 ostringstream os;
854 os << "Number of scattering elements in *abs_vec_spt* and *ext_mat_spt*\n"
855 << "does not agree.";
856 throw runtime_error(os.str());
857 }
858
859 Index stokes_dim = abs_vec_spt[0].StokesDimensions();
860 //ARTS_ASSERT( ext_mat_spt.ncols()==stokes_dim && ext_mat_spt.nrows()==stokes_dim )
861 if (ext_mat_spt[0].StokesDimensions() not_eq stokes_dim) {
862 ostringstream os;
863 os << "*stokes_dim* of *abs_vec_spt* and *ext_mat_spt* does not agree.";
864 throw runtime_error(os.str());
865 }
866 if (stokes_dim > 4 || stokes_dim < 1) {
867 ostringstream os;
868 os << "The dimension of stokes vector can only be 1, 2, 3, or 4.";
869 throw runtime_error(os.str());
870 }
871
872 ext_mat = PropagationMatrix(1, stokes_dim);
873 ext_mat.SetZero(); // Initialize to zero!
874 abs_vec = StokesVector(1, stokes_dim);
875 abs_vec.SetZero(); // Initialize to zero!
876
877 PropagationMatrix ext_mat_part(1, stokes_dim);
878 ext_mat_part.SetZero();
879 StokesVector abs_vec_part(1, stokes_dim);
880 abs_vec_part.SetZero();
881
882 // this is the loop over the different scattering elements
883 for (Index l = 0; l < N_se; l++) {
884 abs_vec_part.MultiplyAndAdd(
885 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
886 abs_vec_spt[l]);
887 ext_mat_part.MultiplyAndAdd(
888 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
889 ext_mat_spt[l]);
890 }
891
892 //Add absorption due single scattering element.
893 abs_vec += abs_vec_part;
894 //Add extinction matrix due single scattering element to *ext_mat*.
895 ext_mat += ext_mat_part;
896}
897
898/* Workspace method: Doxygen documentation will be auto-generated */
899void ext_matAddGas(PropagationMatrix& ext_mat,
900 const PropagationMatrix& propmat_clearsky,
901 const Verbosity&) {
902 // Number of Stokes parameters:
903 const Index stokes_dim = ext_mat.StokesDimensions();
904
905 // The second dimension of ext_mat must also match the number of
906 // Stokes parameters:
907 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
908 "Col dimension of propmat_clearsky "
909 "inconsistent with col dimension in ext_mat.");
910
911 // Number of frequencies:
912 const Index f_dim = ext_mat.NumberOfFrequencies();
913
914 // This must be consistent with the second dimension of
915 // propmat_clearsky. Check this:
916 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
917 "Frequency dimension of ext_mat and propmat_clearsky\n"
918 "are inconsistent in ext_matAddGas.");
919
920 ext_mat += propmat_clearsky;
921}
922
923/* Workspace method: Doxygen documentation will be auto-generated */
924void abs_vecAddGas(StokesVector& abs_vec,
925 const PropagationMatrix& propmat_clearsky,
926 const Verbosity&) {
927 // Number of frequencies:
928 const Index f_dim = abs_vec.NumberOfFrequencies();
929 const Index stokes_dim = abs_vec.StokesDimensions();
930
931 // This must be consistent with the second dimension of
932 // propmat_clearsky. Check this:
933 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
934 "Frequency dimension of abs_vec and propmat_clearsky\n"
935 "are inconsistent in abs_vecAddGas.");
936 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
937 "Stokes dimension of abs_vec and propmat_clearsky\n"
938 "are inconsistent in abs_vecAddGas.");
939
940 // Loop all frequencies. Of course this includes the special case
941 // that there is only one frequency.
942 abs_vec += propmat_clearsky; // Defined to only add to the
943
944 // We don't have to do anything about higher elements of abs_vec,
945 // since scalar gas absorption only influences the first element.
946}
947
948/* Workspace method: Doxygen documentation will be auto-generated */
949/*
950void ext_matAddGasZeeman( Tensor3& ext_mat,
951 const Tensor3& ext_mat_zee,
952 const Verbosity&)
953{
954 // Number of Stokes parameters:
955 const Index stokes_dim = ext_mat.ncols();
956
957 // The second dimension of ext_mat must also match the number of
958 // Stokes parameters:
959 if ( stokes_dim != ext_mat.nrows() )
960 throw runtime_error("Row dimension of ext_mat inconsistent with "
961 "column dimension.");
962
963 for ( Index i=0; i<stokes_dim; ++i )
964 {
965 for ( Index j=0; j<stokes_dim; ++j )
966 {
967 // Add the zeeman extinction to extinction matrix.
968 ext_mat(joker,i,j) += ext_mat_zee(joker, i, j);
969 }
970
971 }
972}
973*/
974
975/* Workspace method: Doxygen documentation will be auto-generated */
976/*
977void abs_vecAddGasZeeman( Matrix& abs_vec,
978 const Matrix& abs_vec_zee,
979 const Verbosity&)
980{
981 // Number of Stokes parameters:
982 const Index stokes_dim = abs_vec_zee.ncols();
983 // that there is only one frequency.
984 for ( Index j=0; j<stokes_dim; ++j )
985 {
986 abs_vec(joker,j) += abs_vec_zee(joker,j);
987 }
988}
989*/
990
991/* Workspace method: Doxygen documentation will be auto-generated */
992void pha_matCalc(Tensor4& pha_mat,
993 const Tensor5& pha_mat_spt,
994 const Tensor4& pnd_field,
995 const Index& atmosphere_dim,
996 const Index& scat_p_index,
997 const Index& scat_lat_index,
998 const Index& scat_lon_index,
999 const Verbosity&) {
1000 Index N_se = pha_mat_spt.nshelves();
1001 Index Nza = pha_mat_spt.nbooks();
1002 Index Naa = pha_mat_spt.npages();
1003 Index stokes_dim = pha_mat_spt.nrows();
1004
1005 pha_mat.resize(Nza, Naa, stokes_dim, stokes_dim);
1006
1007 // Initialisation
1008 pha_mat = 0.0;
1009
1010 Index ilat = 0;
1011 Index ilon = 0;
1012 if (atmosphere_dim > 1) ilat = scat_lat_index;
1013 if (atmosphere_dim > 2) ilon = scat_lon_index;
1014
1015 if (atmosphere_dim == 1) {
1016 // For 1d atmospheres, we additinally integrate the phase matrix over the
1017 // azimuth, because there is no azimuth dependency of the incoming
1018 // field.
1019
1020 Numeric grid_step_size_azimuth = 360. / (Numeric)(Naa - 1) * DEG2RAD;
1021
1022 // this is a loop over the different scattering elements
1023 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1024 // these are loops over zenith angle and azimuth angle
1025 for (Index za_index = 0; za_index < Nza; ++za_index)
1026 for (Index aa_index = 0; aa_index < Naa - 1; ++aa_index)
1027 // now the last two loops over the stokes dimension.
1028 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1029 ++stokes_index_1)
1030 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1031 ++stokes_index_2)
1032 //summation of the product of pnd_field and
1033 //pha_mat_spt.
1034 pha_mat(za_index, 0, stokes_index_1, stokes_index_2) +=
1035 ((pha_mat_spt(pt_index,
1036 za_index,
1037 aa_index,
1038 stokes_index_1,
1039 stokes_index_2) +
1040 pha_mat_spt(pt_index,
1041 za_index,
1042 aa_index + 1,
1043 stokes_index_1,
1044 stokes_index_2)) /
1045 2 * grid_step_size_azimuth *
1046 pnd_field(pt_index, scat_p_index, ilat, ilon));
1047 } else {
1048 // this is a loop over the different scattering elements
1049 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1050 // these are loops over zenith angle and azimuth angle
1051 for (Index za_index = 0; za_index < Nza; ++za_index)
1052 for (Index aa_index = 0; aa_index < Naa; ++aa_index)
1053 // now the last two loops over the stokes dimension.
1054 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1055 ++stokes_index_1)
1056 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1057 ++stokes_index_2)
1058 //summation of the product of pnd_field and
1059 //pha_mat_spt.
1060 pha_mat(za_index, aa_index, stokes_index_1, stokes_index_2) +=
1061 (pha_mat_spt(pt_index,
1062 za_index,
1063 aa_index,
1064 stokes_index_1,
1065 stokes_index_2) *
1066 pnd_field(pt_index, scat_p_index, ilat, ilon));
1067 }
1068}
1069
1070/* Workspace method: Doxygen documentation will be auto-generated */
1071void scat_dataCheck( //Input:
1072 const ArrayOfArrayOfSingleScatteringData& scat_data,
1073 const String& check_type,
1074 const Numeric& threshold,
1075 const Verbosity& verbosity) {
1079 //CREATE_OUT3;
1080
1081 // FIXME:
1082 // so far, this works for both scat_data and scat_data_raw. Needs to be
1083 // adapted, though, once we have WSM that can create Z/K/a with different
1084 // f/T dimensions than scat_data_single.f/T_grid.
1085
1086 const Index N_ss = scat_data.nelem();
1087
1088 // 1) any negative values in Z11, K11, or a1? K11>=a1?
1089 // 2) scat_data containing any NaN?
1090 // 3) sca_mat norm sufficiently good (int(Z11)~=K11-a1?)
1091
1092 // Loop over the included scattering species
1093 out2 << " checking for negative values in Z11, K11, and a1, and for K11<a1\n";
1094 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1095 const Index N_se = scat_data[i_ss].nelem();
1096
1097 // Loop over the included scattering elements
1098 for (Index i_se = 0; i_se < N_se; i_se++) {
1099 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1100 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1101 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1102 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1103 if (EXT_MAT_DATA(f, t, zai, aai, 0) < 0 ||
1104 ABS_VEC_DATA(f, t, zai, aai, 0) < 0) {
1105 ostringstream os;
1106 os << "Scatt. species #" << i_ss << " element #" << i_se
1107 << " contains negative K11 or a1 at f#" << f << ", T#" << t
1108 << ", za#" << zai << ", aa#" << aai << "\n";
1109 throw runtime_error(os.str());
1110 }
1111 if (EXT_MAT_DATA(f, t, zai, aai, 0) <
1112 ABS_VEC_DATA(f, t, zai, aai, 0)) {
1113 ostringstream os;
1114 os << "Scatt. species #" << i_ss << " element #" << i_se
1115 << " has K11<a1 at f#" << f << ", T#" << t << ", za#" << zai
1116 << ", aa#" << aai << "\n";
1117 throw runtime_error(os.str());
1118 }
1119 }
1120 // Since allowing pha_mat to have a single T entry only (while
1121 // T_grid, ext_mat, abs_vec have more), we need a separate T loop
1122 // for pha_mat
1123 Index nTpha = PHA_MAT_DATA.nvitrines();
1124 for (Index t = 0; t < nTpha; t++) {
1125 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1126 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1127 if (PHA_MAT_DATA(f, t, zas, aas, zai, aai, 0) < 0) {
1128 ostringstream os;
1129 os << "Scatt. species #" << i_ss << " element #" << i_se
1130 << " contains negative Z11 at f#" << f << ", T#" << t
1131 << " (of " << nTpha << "), za_sca#" << zas << ", aa_sca#"
1132 << aas << ", za_inc#" << zai << ", aa_inc#" << aai
1133 << "\n";
1134 throw runtime_error(os.str());
1135 }
1136 }
1137 }
1138 }
1139 }
1140 }
1141
1142 // Loop over the included scattering species
1143 out2 << " checking for NaN anywhere in Z, K, and a\n";
1144 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1145 const Index N_se = scat_data[i_ss].nelem();
1146
1147 // Loop over the included scattering elements
1148 for (Index i_se = 0; i_se < N_se; i_se++) {
1149 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1150 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1151 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1152 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1153 for (Index st = 0; st < ABS_VEC_DATA.ncols(); st++)
1154 if (std::isnan(ABS_VEC_DATA(f, t, zai, aai, st))) {
1155 ostringstream os;
1156 os << "Scatt. species #" << i_ss << " element #" << i_se
1157 << " contains NaN in abs_vec at f#" << f << ", T#" << t
1158 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1159 << "\n";
1160 throw runtime_error(os.str());
1161 }
1162 for (Index st = 0; st < EXT_MAT_DATA.ncols(); st++)
1163 if (std::isnan(EXT_MAT_DATA(f, t, zai, aai, st))) {
1164 ostringstream os;
1165 os << "Scatt. species #" << i_ss << " element #" << i_se
1166 << " contains NaN in ext_mat at f#" << f << ", T#" << t
1167 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1168 << "\n";
1169 throw runtime_error(os.str());
1170 }
1171 }
1172 Index nTpha = PHA_MAT_DATA.nvitrines();
1173 for (Index t = 0; t < nTpha; t++) {
1174 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1175 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1176 for (Index st = 0; st < PHA_MAT_DATA.ncols(); st++)
1177 if (std::isnan(
1178 PHA_MAT_DATA(f, t, zas, aas, zai, aai, st))) {
1179 ostringstream os;
1180 os << "Scatt. species #" << i_ss << " element #" << i_se
1181 << " contains NaN in pha_mat at f#" << f << ", T#" << t
1182 << " (of " << nTpha << "), za_sca#" << zas
1183 << ", aa_sca#" << aas << ", za_inc#" << zai
1184 << ", aa_inc#" << aai << ", stokes #"
1185 << "\n";
1186 throw runtime_error(os.str());
1187 }
1188 }
1189 }
1190 }
1191 }
1192 }
1193
1194 if (check_type.toupper() == "ALL") {
1195 // Loop over the included scattering species
1196 out2 << " checking normalization of scattering matrix\n";
1197 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1198 const Index N_se = scat_data[i_ss].nelem();
1199
1200 // Loop over the included scattering elements
1201 for (Index i_se = 0; i_se < N_se; i_se++)
1202 if (T_DATAGRID.nelem() == PHA_MAT_DATA.nvitrines()) switch (PART_TYPE) {
1203 case PTYPE_TOTAL_RND: {
1204 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1205 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1206 Numeric Csca = AngIntegrate_trapezoid(
1207 PHA_MAT_DATA(f, t, joker, 0, 0, 0, 0), ZA_DATAGRID);
1208 Numeric Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1209 //Numeric Cabs = Cext_data - Csca;
1210 Numeric Cabs_data = ABS_VEC_DATA(f, t, 0, 0, 0);
1211 Numeric Csca_data = Cext_data - Cabs_data;
1212
1213 /*
1214 out3 << " Coefficients in database: "
1215 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1216 << " Csca: " << Csca_data << "\n"
1217 << " Calculated coefficients: "
1218 << "Cabs calc: " << Cabs
1219 << " Csca calc: " << Csca << "\n"
1220 << " Deviations "
1221 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1222 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1223 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1224 */
1225
1226 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1227 // below equivalent to the above
1228 // (it's actually the (absolute) albedo deviation!)
1229 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1230 ostringstream os;
1231 os << " Deviations in scat_data too large:\n"
1232 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1233 << " at nominal (actual) albedo of "
1234 << Csca_data / Cext_data << " (" << Csca / Cext_data
1235 << ").\n"
1236 << " Check entry for scattering element " << i_se
1237 << " of scattering species " << i_ss << " at " << f
1238 << ".frequency and " << t << ".temperature!\n";
1239 throw runtime_error(os.str());
1240 }
1241 }
1242 }
1243 break;
1244 }
1245
1246 case PTYPE_AZIMUTH_RND: {
1247 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1248 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1249 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1250 Numeric Csca =
1252 PHA_MAT_DATA(f, t, joker, joker, iza, 0, 0),
1254 AA_DATAGRID);
1255 Numeric Cext_data = EXT_MAT_DATA(f, t, iza, 0, 0);
1256 //Numeric Cabs = Cext_data - Csca;
1257 Numeric Cabs_data = ABS_VEC_DATA(f, t, iza, 0, 0);
1258 Numeric Csca_data = Cext_data - Cabs_data;
1259
1260 /*
1261 out3 << " Coefficients in database: "
1262 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1263 << " Csca: " << Csca_data << "\n"
1264 << " Calculated coefficients: "
1265 << "Cabs calc: " << Cabs
1266 << " Csca calc: " << Csca << "\n"
1267 << " Deviations "
1268 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1269 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1270 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1271 */
1272
1273 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1274 // below equivalent to the above
1275 // (it's actually the (absolute) albedo deviation!)
1276 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1277 ostringstream os;
1278 os << " Deviations in scat_data too large:\n"
1279 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1280 << " at nominal (actual) albedo of "
1281 << Csca_data / Cext_data << " (" << Csca / Cext_data
1282 << ").\n"
1283 << " Check entry for scattering element " << i_se
1284 << " of scattering species " << i_ss << " at " << f
1285 << ". frequency, " << t << ". temperature, and " << iza
1286 << ". incident polar angle!\n";
1287 throw runtime_error(os.str());
1288 }
1289 }
1290 }
1291 }
1292 break;
1293 }
1294
1295 default: {
1296 out0
1297 << " WARNING:\n"
1298 << " scat_data consistency check not implemented (yet?!) for\n"
1299 << " ptype " << PART_TYPE << "!\n";
1300 }
1301 }
1302 else
1303 out2 << " WARNING:\n"
1304 << " scat_data norm check can not be performed for pha_mat-only"
1305 << " T-reduced scattering elements\n"
1306 << " as found in scatt element #" << i_se
1307 << " of scatt species #" << i_ss << "!\n";
1308 }
1309 } else if (check_type.toupper() == "SANE") {
1310 out1 << " WARNING:\n"
1311 << " Normalization check on pha_mat switched off.\n"
1312 << " Scattering solution might be wrong.\n";
1313 } else {
1314 ostringstream os;
1315 os << "Invalid value for argument *check_type*: '" << check_type << "'.\n";
1316 os << "Valid values are 'all' or 'none'.";
1317 throw runtime_error(os.str());
1318 }
1319}
1320
1321/* Workspace method: Doxygen documentation will be auto-generated */
1323 Workspace& ws, //Output:
1324 ArrayOfTensor7& pha_mat_sptDOITOpt,
1325 ArrayOfArrayOfSingleScatteringData& scat_data_mono,
1326 Tensor7& pha_mat_doit,
1327 //Output and Input:
1328 Vector& aa_grid,
1329 //Input:
1330 const Index& doit_za_grid_size,
1331 const ArrayOfArrayOfSingleScatteringData& scat_data,
1332 const Index& scat_data_checked,
1333 const Index& f_index,
1334 const Index& atmosphere_dim,
1335 const Index& stokes_dim,
1336 const Tensor3& t_field,
1337 const ArrayOfIndex& cloudbox_limits,
1338 const Tensor4& pnd_field,
1339 const Agenda& pha_mat_spt_agenda,
1340 const Verbosity& verbosity) {
1341 if (scat_data_checked != 1)
1342 throw runtime_error(
1343 "The scattering data must be flagged to have "
1344 "passed a consistency check (scat_data_checked=1).");
1345
1346 // Number of azimuth angles.
1347 const Index Naa = aa_grid.nelem();
1348 Vector grid_stepsize(2);
1349 grid_stepsize[0] = 180. / (Numeric)(doit_za_grid_size - 1);
1350 //grid_stepsize[1] = 360./(Numeric)(Naa - 1);
1351
1352 // Initialize variable *pha_mat_spt*
1353 Tensor5 pha_mat_spt_local(pnd_field.nbooks(),
1354 doit_za_grid_size,
1355 aa_grid.nelem(),
1356 stokes_dim,
1357 stokes_dim,
1358 0.);
1359 Tensor4 pha_mat_local(doit_za_grid_size, Naa, stokes_dim, stokes_dim, 0.);
1360 Tensor6 pha_mat_local_out(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1361 doit_za_grid_size,
1362 doit_za_grid_size,
1363 Naa,
1364 stokes_dim,
1365 stokes_dim,
1366 0.);
1367
1368 // Interpolate all the data in frequency
1369 scat_data_monoExtract(scat_data_mono, scat_data, f_index, verbosity);
1370
1371 // For 1D calculation the scat_aa dimension is not required:
1372 Index N_aa_sca;
1373 if (atmosphere_dim == 1)
1374 N_aa_sca = 1;
1375 else
1376 N_aa_sca = aa_grid.nelem();
1377
1378 Vector za_grid;
1379 nlinspace(za_grid, 0, 180, doit_za_grid_size);
1380
1381 ARTS_ASSERT(scat_data.nelem() == scat_data_mono.nelem());
1382
1383 const Index N_ss = scat_data.nelem();
1384 // FIXME: We need this still as a workspace variable because pha_mat_spt_agenda
1385 // contains a WS method that requires it as input
1386 pha_mat_sptDOITOpt.resize(TotalNumberOfElements(scat_data));
1387
1388 Index i_se_flat = 0;
1389 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1390 const Index N_se = scat_data[i_ss].nelem();
1391
1392 for (Index i_se = 0; i_se < N_se; i_se++) {
1393 Index N_T = scat_data_mono[i_ss][i_se].T_grid.nelem();
1394 pha_mat_sptDOITOpt[i_se_flat].resize(N_T,
1395 doit_za_grid_size,
1396 N_aa_sca,
1397 doit_za_grid_size,
1398 aa_grid.nelem(),
1399 stokes_dim,
1400 stokes_dim);
1401
1402 // Initialize:
1403 pha_mat_sptDOITOpt[i_se_flat] = 0.;
1404
1405 // Calculate all scattering angles for all combinations of incoming
1406 // and scattered directions and interpolation.
1407 for (Index t_idx = 0; t_idx < N_T; t_idx++) {
1408 // These are the scattered directions as called in *scat_field_calc*
1409 for (Index za_sca_idx = 0; za_sca_idx < doit_za_grid_size;
1410 za_sca_idx++) {
1411 for (Index aa_sca_idx = 0; aa_sca_idx < N_aa_sca; aa_sca_idx++) {
1412 // Integration is performed over all incoming directions
1413 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
1414 za_inc_idx++) {
1415 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
1416 aa_inc_idx++) {
1418 pha_mat_sptDOITOpt[i_se_flat](t_idx,
1419 za_sca_idx,
1420 aa_sca_idx,
1421 za_inc_idx,
1422 aa_inc_idx,
1423 joker,
1424 joker),
1425 scat_data_mono[i_ss][i_se].pha_mat_data(
1426 0, t_idx, joker, joker, joker, joker, joker),
1427 scat_data_mono[i_ss][i_se].za_grid,
1428 scat_data_mono[i_ss][i_se].aa_grid,
1429 scat_data_mono[i_ss][i_se].ptype,
1430 za_sca_idx,
1431 aa_sca_idx,
1432 za_inc_idx,
1433 aa_inc_idx,
1434 za_grid,
1435 aa_grid,
1436 verbosity);
1437 }
1438 }
1439 }
1440 }
1441 }
1442
1443 i_se_flat++;
1444 }
1445 }
1446 // Interpolate phase matrix to current grid
1447 pha_mat_doit.resize(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1448 doit_za_grid_size,
1449 N_aa_sca,
1450 doit_za_grid_size,
1451 Naa,
1452 stokes_dim,
1453 stokes_dim);
1454 pha_mat_doit = 0;
1455
1456 if (atmosphere_dim == 1) {
1457 Index aa_index_local = 0;
1458
1459 // Get pha_mat at the grid positions
1460 // Since atmosphere_dim = 1, there is no loop over lat and lon grids
1461 for (Index p_index = 0; p_index <= cloudbox_limits[1] - cloudbox_limits[0];
1462 p_index++) {
1463 Numeric rtp_temperature_local =
1464 t_field(p_index + cloudbox_limits[0], 0, 0);
1465 //There is only loop over zenith angle grid ; no azimuth angle grid.
1466 for (Index za_index_local = 0;
1467 za_index_local < doit_za_grid_size;
1468 za_index_local++) {
1469 // Dummy index
1470 Index index_zero = 0;
1471
1472 // Calculate the phase matrix of individual scattering elements
1474 pha_mat_spt_local,
1475 za_index_local,
1476 index_zero,
1477 index_zero,
1478 p_index,
1479 aa_index_local,
1480 rtp_temperature_local,
1481 pha_mat_spt_agenda);
1482
1483 // Sum over all scattering elements
1484 pha_matCalc(pha_mat_local,
1485 pha_mat_spt_local,
1486 pnd_field,
1487 atmosphere_dim,
1488 p_index,
1489 0,
1490 0,
1491 verbosity);
1492 pha_mat_doit(
1493 p_index, za_index_local, 0, joker, joker, joker, joker) =
1494 pha_mat_local;
1495 }
1496 }
1497
1498 // Set incoming azimuth grid scattering to zero, because there is no
1499 // no azimuth dependcy for 1d atmospheres
1500 aa_grid.resize(1);
1501 aa_grid = 0;
1502 }
1503}
1504
1505/* Workspace method: Doxygen documentation will be auto-generated */
1507 const ArrayOfArrayOfSingleScatteringData& scat_data_raw,
1508 const Vector& f_grid,
1509 const Index& interp_order,
1510 const Verbosity&)
1511// FIXME: when we allow K, a, Z to be on different f and T grids, their use in
1512// the scatt solvers needs to be reviewed again and adaptedto this!
1513{
1514 //Extrapolation factor:
1515 //const Numeric extpolfac = 0.5;
1516
1517 Index nf = f_grid.nelem();
1518
1519 // Check, whether single scattering data contains the right frequencies:
1520 // The check was changed to allow extrapolation at the boundaries of the
1521 // frequency grid.
1522 const String which_interpolation = "scat_data_raw.f_grid to f_grid";
1523 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1524 for (Index i_se = 0; i_se < scat_data_raw[i_ss].nelem(); i_se++) {
1525 // Check for the special case that ssd.f_grid f_grid have only one
1526 // element. If identical, that's fine. If not, throw error.
1527 if (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1 && nf == 1)
1528 if (!is_same_within_epsilon(scat_data_raw[i_ss][i_se].f_grid[0],
1529 f_grid[0],
1530 2 * DBL_EPSILON)) {
1531 ostringstream os;
1532 os << "There is a problem with the grids for the following "
1533 << "interpolation:\n"
1534 << which_interpolation << "\n"
1535 << "If original grid has only 1 element, the new grid must also have\n"
1536 << "only a single element and hold the same value as the original grid.";
1537 throw runtime_error(os.str());
1538 }
1539
1540 // check with extrapolation
1541 chk_interpolation_grids(which_interpolation,
1542 scat_data_raw[i_ss][i_se].f_grid,
1543 f_grid,
1544 interp_order);
1545 }
1546 }
1547
1548 //Initialise scat_data
1549 scat_data.resize(scat_data_raw.nelem());
1550
1551 // Loop over the included scattering species
1552 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1553 const Index N_se = scat_data_raw[i_ss].nelem();
1554
1555 //Initialise scat_data
1556 scat_data[i_ss].resize(N_se);
1557
1558 // Loop over the included scattering elements
1559 for (Index i_se = 0; i_se < N_se; i_se++) {
1560 //Stuff that doesn't need interpolating
1561 PART_TYPE = scat_data_raw[i_ss][i_se].ptype;
1562 F_DATAGRID = f_grid;
1563 T_DATAGRID = scat_data_raw[i_ss][i_se].T_grid;
1564 ZA_DATAGRID = scat_data_raw[i_ss][i_se].za_grid;
1565 AA_DATAGRID = scat_data_raw[i_ss][i_se].aa_grid;
1566
1567 //Sizing SSD data containers
1568 PHA_MAT_DATA.resize(nf,
1569 scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines(),
1570 scat_data_raw[i_ss][i_se].pha_mat_data.nshelves(),
1571 scat_data_raw[i_ss][i_se].pha_mat_data.nbooks(),
1572 scat_data_raw[i_ss][i_se].pha_mat_data.npages(),
1573 scat_data_raw[i_ss][i_se].pha_mat_data.nrows(),
1574 scat_data_raw[i_ss][i_se].pha_mat_data.ncols());
1575 EXT_MAT_DATA.resize(nf,
1576 scat_data_raw[i_ss][i_se].ext_mat_data.nbooks(),
1577 scat_data_raw[i_ss][i_se].ext_mat_data.npages(),
1578 scat_data_raw[i_ss][i_se].ext_mat_data.nrows(),
1579 scat_data_raw[i_ss][i_se].ext_mat_data.ncols());
1580 ABS_VEC_DATA.resize(nf,
1581 scat_data_raw[i_ss][i_se].abs_vec_data.nbooks(),
1582 scat_data_raw[i_ss][i_se].abs_vec_data.npages(),
1583 scat_data_raw[i_ss][i_se].abs_vec_data.nrows(),
1584 scat_data_raw[i_ss][i_se].abs_vec_data.ncols());
1585
1586 const bool single_se_fgrid =
1587 (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1);
1588 if (!single_se_fgrid) {
1589 // Gridpositions:
1590 const auto lag_freq=my_interp::lagrange_interpolation_list<LagrangeInterpolation>(f_grid, scat_data_raw[i_ss][i_se].f_grid, interp_order);
1591 const auto itw = interpweights(lag_freq);
1592
1593 //Phase matrix data
1594 for (Index t_index = 0;
1595 t_index < scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines();
1596 t_index++) {
1597 for (Index i_za_sca = 0;
1598 i_za_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nshelves();
1599 i_za_sca++) {
1600 for (Index i_aa_sca = 0;
1601 i_aa_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nbooks();
1602 i_aa_sca++) {
1603 for (Index i_za_inc = 0;
1604 i_za_inc < scat_data_raw[i_ss][i_se].pha_mat_data.npages();
1605 i_za_inc++) {
1606 for (Index i_aa_inc = 0;
1607 i_aa_inc < scat_data_raw[i_ss][i_se].pha_mat_data.nrows();
1608 i_aa_inc++) {
1609 for (Index i = 0;
1610 i < scat_data_raw[i_ss][i_se].pha_mat_data.ncols();
1611 i++) {
1612 reinterp(scat_data[i_ss][i_se].pha_mat_data(joker,
1613 t_index,
1614 i_za_sca,
1615 i_aa_sca,
1616 i_za_inc,
1617 i_aa_inc,
1618 i),
1619 scat_data_raw[i_ss][i_se].pha_mat_data(joker,
1620 t_index,
1621 i_za_sca,
1622 i_aa_sca,
1623 i_za_inc,
1624 i_aa_inc,
1625 i),
1626 itw,
1627 lag_freq);
1628 }
1629 }
1630 }
1631 }
1632 }
1633 }
1634
1635 //Extinction matrix data
1636 for (Index t_index = 0;
1637 t_index < scat_data_raw[i_ss][i_se].ext_mat_data.nbooks();
1638 t_index++) {
1639 for (Index i_za_sca = 0;
1640 i_za_sca < scat_data_raw[i_ss][i_se].ext_mat_data.npages();
1641 i_za_sca++) {
1642 for (Index i_aa_sca = 0;
1643 i_aa_sca < scat_data_raw[i_ss][i_se].ext_mat_data.nrows();
1644 i_aa_sca++) {
1645 for (Index i = 0;
1646 i < scat_data_raw[i_ss][i_se].ext_mat_data.ncols();
1647 i++) {
1648 reinterp(scat_data[i_ss][i_se].ext_mat_data(
1649 joker, t_index, i_za_sca, i_aa_sca, i),
1650 scat_data_raw[i_ss][i_se].ext_mat_data(
1651 joker, t_index, i_za_sca, i_aa_sca, i),
1652 itw,
1653 lag_freq);
1654 }
1655 }
1656 }
1657 }
1658
1659 //Absorption vector data
1660 for (Index t_index = 0;
1661 t_index < scat_data_raw[i_ss][i_se].abs_vec_data.nbooks();
1662 t_index++) {
1663 for (Index i_za_sca = 0;
1664 i_za_sca < scat_data_raw[i_ss][i_se].abs_vec_data.npages();
1665 i_za_sca++) {
1666 for (Index i_aa_sca = 0;
1667 i_aa_sca < scat_data_raw[i_ss][i_se].abs_vec_data.nrows();
1668 i_aa_sca++) {
1669 for (Index i = 0;
1670 i < scat_data_raw[i_ss][i_se].abs_vec_data.ncols();
1671 i++) {
1672 reinterp(scat_data[i_ss][i_se].abs_vec_data(
1673 joker, t_index, i_za_sca, i_aa_sca, i),
1674 scat_data_raw[i_ss][i_se].abs_vec_data(
1675 joker, t_index, i_za_sca, i_aa_sca, i),
1676 itw,
1677 lag_freq);
1678 }
1679 }
1680 }
1681 }
1682 } else {
1683 ARTS_ASSERT(nf == 1);
1684 // we do only have one f_grid value in old and new data (and they have
1685 // been confirmed to be the same), hence only need to copy over/reassign
1686 // the data.
1687 scat_data[i_ss][i_se].pha_mat_data =
1688 scat_data_raw[i_ss][i_se].pha_mat_data;
1689 scat_data[i_ss][i_se].ext_mat_data =
1690 scat_data_raw[i_ss][i_se].ext_mat_data;
1691 scat_data[i_ss][i_se].abs_vec_data =
1692 scat_data_raw[i_ss][i_se].abs_vec_data;
1693 }
1694 }
1695 }
1696}
1697
1698/* Workspace method: Doxygen documentation will be auto-generated */
1700 const Index& i_ss,
1701 const Numeric& T,
1702 const Index& interp_order,
1703 const Index& phamat_only,
1704 const Numeric& threshold,
1705 const Verbosity&) {
1706 // We are directly acting on the scat_data entries, modifying them
1707 // individually. That is, we don't need to resize these arrays. Only the
1708 // pha_mat and probably ext_mat and abs_vec Tensors (in the latter case also
1709 // T_grid!).
1710
1711 // Check that species i_ss exists at all in scat_data
1712 const Index nss = scat_data.nelem();
1713 if (nss <= i_ss) {
1714 ostringstream os;
1715 os << "Can not T-reduce scattering species #" << i_ss << ".\n"
1716 << "*scat_data* contains only " << nss << " scattering species.";
1717 throw runtime_error(os.str());
1718 }
1719
1720 // Loop over the included scattering elements
1721 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
1722 // At very first check validity of the scatt elements ptype (so far we only
1723 // handle PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND).
1725 ostringstream os;
1726 os << "Only ptypes " << PTYPE_TOTAL_RND << " and " << PTYPE_AZIMUTH_RND
1727 << " can be handled.\n"
1728 << "Scattering element #" << i_se << " has ptype " << PART_TYPE << ".";
1729 throw runtime_error(os.str());
1730 }
1731
1732 // If ssd.T_grid already has only a single point, we do nothing.
1733 // This is not necessarily expected behaviour. BUT, it is in line with
1734 // previous use (that if nT==1, then assume ssd constant in T).
1735 Index nT = T_DATAGRID.nelem();
1736 if (nT > 1) {
1737 // Check, that we not have data that has already been T-reduced (in
1738 // pha_mat only. complete ssd T-reduce should have been sorted away
1739 // already above).
1740 if (PHA_MAT_DATA.nvitrines() != nT) {
1741 ostringstream os;
1742 os << "Single scattering data of scat element #" << i_se
1743 << " of scat species #" << i_ss << "\n"
1744 << "seems to have undergone some temperature grid manipulation in\n"
1745 << "*pha_mat_data* already. That can not be done twice!";
1746 throw runtime_error(os.str());
1747 }
1748
1749 // Check that ext_mat and abs_vec have the same temp dimensions as T_grid.
1750 // This should always be true, if not it's a bug not a user mistake, hence
1751 // use ARTS_ASSERT.
1752 ARTS_ASSERT(EXT_MAT_DATA.nbooks() == nT and ABS_VEC_DATA.nbooks() == nT);
1753
1754 // Check that T_grid is consistent with requested interpolation order
1755 ostringstream ost;
1756 ost << "Scattering data temperature interpolation for\n"
1757 << "scat element #" << i_se << " of scat species #" << i_ss << ".";
1758 chk_interpolation_grids(ost.str(), T_DATAGRID, T, interp_order);
1759
1760 // Gridpositions:
1761 const LagrangeInterpolation lag_T(0, T, T_DATAGRID, interp_order);
1762 const auto itw = interpweights(lag_T);
1763
1764 //Sizing of temporary SSD data containers
1765 Tensor7 phamat_tmp(PHA_MAT_DATA.nlibraries(),
1766 1,
1767 PHA_MAT_DATA.nshelves(),
1768 PHA_MAT_DATA.nbooks(),
1769 PHA_MAT_DATA.npages(),
1770 PHA_MAT_DATA.nrows(),
1771 PHA_MAT_DATA.ncols(),
1772 0.);
1773 Tensor5 extmat_tmp(EXT_MAT_DATA.nshelves(),
1774 1,
1775 EXT_MAT_DATA.npages(),
1776 EXT_MAT_DATA.nrows(),
1777 EXT_MAT_DATA.ncols(),
1778 0.);
1779 Tensor5 absvec_tmp(ABS_VEC_DATA.nshelves(),
1780 1,
1781 ABS_VEC_DATA.npages(),
1782 ABS_VEC_DATA.nrows(),
1783 ABS_VEC_DATA.ncols(),
1784 0.);
1785
1786 // a1) temp interpol of pha mat
1787 //We have to apply the interpolation separately for each of the pha_mat
1788 //entries, i.e. loop over all remaining size dimensions
1789 //We don't apply any transformation here, but interpolate the actual
1790 //stored ssd (i.e. not the 4x4matrices, but the 7-16 elements separately).
1791 for (Index i_f = 0; i_f < PHA_MAT_DATA.nlibraries(); i_f++)
1792 for (Index i_za1 = 0; i_za1 < PHA_MAT_DATA.nshelves(); i_za1++)
1793 for (Index i_aa1 = 0; i_aa1 < PHA_MAT_DATA.nbooks(); i_aa1++)
1794 for (Index i_za2 = 0; i_za2 < PHA_MAT_DATA.npages(); i_za2++)
1795 for (Index i_aa2 = 0; i_aa2 < PHA_MAT_DATA.nrows(); i_aa2++)
1796 for (Index i_st = 0; i_st < PHA_MAT_DATA.ncols(); i_st++)
1797 phamat_tmp(i_f, 0, i_za1, i_aa1, i_za2, i_aa2, i_st) =
1799 i_f, joker, i_za1, i_aa1, i_za2, i_aa2, i_st),
1800 itw,
1801 lag_T);
1802
1803 // a2) temp interpol of ext and abs.
1804 //We do that regardless of whether they should be reduced or not, because
1805 //we need them also for norm checking / renorming.
1806 for (Index i_f = 0; i_f < EXT_MAT_DATA.nshelves(); i_f++)
1807 for (Index i_za = 0; i_za < EXT_MAT_DATA.npages(); i_za++)
1808 for (Index i_aa = 0; i_aa < EXT_MAT_DATA.nrows(); i_aa++) {
1809 for (Index i_st = 0; i_st < EXT_MAT_DATA.ncols(); i_st++)
1810 extmat_tmp(i_f, 0, i_za, i_aa, i_st) =
1811 interp(EXT_MAT_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1812 for (Index i_st = 0; i_st < ABS_VEC_DATA.ncols(); i_st++)
1813 absvec_tmp(i_f, 0, i_za, i_aa, i_st) =
1814 interp(ABS_VEC_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1815 }
1816
1817 // Norm & other consistency checks.
1818 // All done separately for PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND (the
1819 // latter needs to loop over scat_za_inc).
1820 //
1821 // b) calculate norm of T-reduced pha mat
1822 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol
1823 // d) Ensure that T-reduced data is consistent/representative of all data.
1824 // and throw error/disallow reduction if sca xs varying too much.
1825 // d1) in case of pha_mat only reduction, the scat xs (pha_mat norm) needs
1826 // to be consistent with the sca xs from over the ext/abs T_grid. This is
1827 // essentially an energy conservation issue. That is, we should be as
1828 // strict here as with pha_mat norm deviations in general (however, should
1829 // we allow the norm at T_grid to deviate by a threshold from the
1830 // T_interpol norm (that should be a little looser) or from the ext-abs
1831 // derived expected norm?).
1832 // d2) in case of all-ssp reduction, the data should still be
1833 // representative. b)&c) ensure data consistency in itself, making this
1834 // rather an error on the SSP as such. Hence, we might be a little more
1835 // loose here.
1836 // d) the resulting check for d1) and d2) is the same (ext-abs sca xs at
1837 // T_interpol vs ext-abs sca xs at T_grid), but we use different
1838 // thresholds.
1839 //
1840 // FIXME?
1841 // Regarding b)&c) should we also calc norm of original-T pha mats? To get
1842 // a measure how strong they already deviate from expected norm (As we can
1843 // not be more exact here than what is already in the original data...).
1844 // On the other hand, a certain accuracy should be guaranteed from
1845 // scat_dataCheck already.
1846 // Hence, for now we skip that (but maybe added later when proves
1847 // necessary).
1848 //
1849 // FIXME?
1850 // Regarding d1), we could alternatively make sure here that norm at
1851 // T_interpol is good. And later on ignore any deviations between norm and
1852 // ext-abs sca xs and instead blindly renorm to expected norm (would that
1853 // be ok? correct norm here, doesn't imply correct norm at whatever scat
1854 // angle grids the user is applying. for that, we could in place also calc
1855 // the original-data norm. but that might be expensive (as we can't do
1856 // that from ext-abs sca xs, because we don't know to which T that refers.
1857 // that would go away if we'd actually store pha_mat normed to 1 or 4Pi.
1858 // but that's prob not going to happen. is it? Another option would be to
1859 // introduce an additional T_grid, eg T_grid_phamat.). which we actually
1860 // want to avoid :-/
1861
1862 Numeric this_threshold;
1863 String errmsg;
1864 if (phamat_only) {
1865 this_threshold = threshold;
1866 errmsg =
1867 "T-reduced *pha_mat_data* norm (=sca xs) deviates too "
1868 "much from non-reduced *ext_mat_data* and *abs_vec_data*:";
1869 } else {
1870 this_threshold = 2 * threshold;
1871 errmsg =
1872 "T-reduced *scat_data* deviates too much from original "
1873 "*scat_data*:";
1874 }
1875
1876 // The norm-check code is copied and slightly adapted from scat_dataCheck.
1877 // Might be better to make a functon out of this and use in both places
1878 // for consistency.
1879 //
1880 // FIXME: no checks on higher Stokes elements are done. Should there?
1881 // Which?
1882 switch (PART_TYPE) {
1883 case PTYPE_TOTAL_RND: {
1884 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1885 // b) calculate norm of T-reduced pha mat
1886 Numeric Csca = AngIntegrate_trapezoid(
1887 phamat_tmp(f, 0, joker, 0, 0, 0, 0), ZA_DATAGRID);
1888 Numeric Cext_data = extmat_tmp(f, 0, 0, 0, 0);
1889 //Numeric Cabs = Cext_data - Csca;
1890 Numeric Cabs_data = absvec_tmp(f, 0, 0, 0, 0);
1891 Numeric Csca_data = Cext_data - Cabs_data;
1892
1893 /*
1894 cout << " Coefficients in data: "
1895 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1896 << " Csca: " << Csca_data << "\n"
1897 << " Calculated coefficients: "
1898 << "Cabs calc: " << Cabs
1899 << " Csca calc: " << Csca << "\n"
1900 << " Deviations "
1901 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1902 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1903 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1904 */
1905
1906 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1907 // albedo dev check)
1908 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1909 ostringstream os;
1910 os << " Deviations in T-reduced scat_data too large:\n"
1911 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1912 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1913 << " (" << Csca / Cext_data << ").\n"
1914 << " Problem occurs for scattering element #" << i_se
1915 << " at " << f << ".frequency!\n";
1916 throw runtime_error(os.str());
1917 }
1918 Numeric norm_dev = (Csca - Csca) / Cext_data;
1919
1920 // d) Ensure that T-reduced data is consistent/representative of all data.
1921 // below use theoretical (ext-abs derived) sca xs as reference.
1922 Csca = Csca_data;
1923 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1924 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1925 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
1926 Numeric xs_dev = (Csca - Csca_data) / Cext_data;
1927 if (abs(norm_dev + (Csca - Csca_data) / Cext_data) >
1928 this_threshold)
1929 cout << "Accumulated deviation (abs(" << norm_dev << "+"
1930 << xs_dev << ")=" << abs(norm_dev + xs_dev)
1931 << " exceeding threshold (" << this_threshold << ").\n";
1932 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
1933 ostringstream os;
1934 os << " " << errmsg << "\n"
1935 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1936 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1937 << " (" << Csca / Cext_data << ").\n"
1938 << " Problem occurs for scattering element #" << i_se
1939 << " at " << f << ".frequency and " << t
1940 << ".temperature!\n";
1941 throw runtime_error(os.str());
1942 }
1943 }
1944 }
1945 break;
1946 }
1947
1948 case PTYPE_AZIMUTH_RND: {
1949 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1950 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1951 // b) calculate norm of T-reduced pha mat
1952 Numeric Csca = 2 * AngIntegrate_trapezoid(
1953 phamat_tmp(f, 0, joker, joker, iza, 0, 0),
1955 AA_DATAGRID);
1956 Numeric Cext_data = extmat_tmp(f, 0, iza, 0, 0);
1957 //Numeric Cabs = Cext_data - Csca;
1958 Numeric Cabs_data = absvec_tmp(f, 0, iza, 0, 0);
1959 Numeric Csca_data = Cext_data - Cabs_data;
1960
1961 /*
1962 cout << " Coefficients in data: "
1963 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1964 << " Csca: " << Csca_data << "\n"
1965 << " Calculated coefficients: "
1966 << "Cabs calc: " << Cabs
1967 << " Csca calc: " << Csca << "\n"
1968 << " Deviations "
1969 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1970 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1971 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1972 */
1973
1974 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1975 // albedo dev check)
1976 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1977 ostringstream os;
1978 os << " Deviations in T-reduced scat_data too large:\n"
1979 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1980 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1981 << " (" << Csca / Cext_data << ").\n"
1982 << " Problem occurs for scattering element #" << i_se
1983 << " at " << f << ".frequency, and " << iza
1984 << ". incident polar angle!\n";
1985 throw runtime_error(os.str());
1986 }
1987
1988 // d) Ensure that T-reduced data is consistent/representative of all data.
1989 // below use theoretical (ext-abs derived) sca xs as reference.
1990 Csca = Csca_data;
1991 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1992 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1993 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
1994 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
1995 ostringstream os;
1996 os << " " << errmsg << "\n"
1997 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1998 << " at nominal (actual) albedo of "
1999 << Csca_data / Cext_data << " (" << Csca / Cext_data
2000 << ").\n"
2001 << " Problem occurs for scattering element #" << i_se
2002 << " at " << f << ".frequency and " << t
2003 << ".temperature, and " << iza
2004 << ". incident polar angle!\n";
2005 throw runtime_error(os.str());
2006 }
2007 }
2008 }
2009 }
2010 break;
2011 }
2012
2013 default: {
2014 // other ptype cases already excluded above. i.e. we shouldn't end up
2015 // here. If we do, that's a bug.
2016 ARTS_ASSERT(0);
2017 }
2018 }
2019
2020 PHA_MAT_DATA = phamat_tmp;
2021 //We don't need to reset the scat element's grids!
2022 //Except for T_grid in the case that we reduce ALL three ssd variables.
2023 if (!phamat_only) {
2024 T_DATAGRID.resize(1);
2025 T_DATAGRID = T;
2026 EXT_MAT_DATA = extmat_tmp;
2027 ABS_VEC_DATA = absvec_tmp;
2028 }
2029 }
2030 }
2031}
2032
2033/* Workspace method: Doxygen documentation will be auto-generated */
2035 const ArrayOfArrayOfSingleScatteringData& scat_data,
2036 const Vector& f_grid,
2037 const Index& f_index,
2038 const Verbosity&) {
2039 //Extrapolation factor:
2040 //const Numeric extpolfac = 0.5;
2041
2042 // Check, whether single scattering data contains the right frequencies:
2043 // The check was changed to allow extrapolation at the boundaries of the
2044 // frequency grid.
2045 for (Index h = 0; h < scat_data.nelem(); h++) {
2046 for (Index i = 0; i < scat_data[h].nelem(); i++) {
2047 // check with extrapolation
2048 chk_interpolation_grids("scat_data.f_grid to f_grid",
2049 scat_data[h][i].f_grid,
2050 f_grid[f_index]);
2051 }
2052 }
2053
2054 //Initialise scat_data_mono
2055 scat_data_mono.resize(scat_data.nelem());
2056
2057 // Loop over the included scattering species
2058 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2059 const Index N_se = scat_data[i_ss].nelem();
2060
2061 //Initialise scat_data_mono
2062 scat_data_mono[i_ss].resize(N_se);
2063
2064 // Loop over the included scattering elements
2065 for (Index i_se = 0; i_se < N_se; i_se++) {
2066 // Gridpositions:
2067 GridPos freq_gp;
2068 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
2069
2070 // Interpolation weights:
2071 Vector itw(2);
2072 interpweights(itw, freq_gp);
2073
2074 //Stuff that doesn't need interpolating
2075 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2076 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2077 scat_data_mono[i_ss][i_se].f_grid = f_grid[f_index];
2078 scat_data_mono[i_ss][i_se].T_grid = scat_data[i_ss][i_se].T_grid;
2079 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2080 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2081
2082 //Phase matrix data
2083 scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2084 PHA_MAT_DATA.nvitrines(),
2085 PHA_MAT_DATA.nshelves(),
2086 PHA_MAT_DATA.nbooks(),
2087 PHA_MAT_DATA.npages(),
2088 PHA_MAT_DATA.nrows(),
2089 PHA_MAT_DATA.ncols());
2090
2091 for (Index t_index = 0; t_index < PHA_MAT_DATA.nvitrines(); t_index++) {
2092 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2093 i_za_sca++) {
2094 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2095 i_aa_sca++) {
2096 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2097 i_za_inc++) {
2098 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2099 i_aa_inc++) {
2100 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++) {
2101 scat_data_mono[i_ss][i_se].pha_mat_data(
2102 0, t_index, i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2103 interp(itw,
2104 PHA_MAT_DATA(joker,
2105 t_index,
2106 i_za_sca,
2107 i_aa_sca,
2108 i_za_inc,
2109 i_aa_inc,
2110 i),
2111 freq_gp);
2112 }
2113 }
2114 }
2115 }
2116 }
2117 //Extinction matrix data
2118 scat_data_mono[i_ss][i_se].ext_mat_data.resize(1,
2119 T_DATAGRID.nelem(),
2120 EXT_MAT_DATA.npages(),
2121 EXT_MAT_DATA.nrows(),
2122 EXT_MAT_DATA.ncols());
2123 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages(); i_za_sca++) {
2124 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
2125 i_aa_sca++) {
2126 //
2127 // Interpolation of extinction matrix:
2128 //
2129 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
2130 scat_data_mono[i_ss][i_se].ext_mat_data(
2131 0, t_index, i_za_sca, i_aa_sca, i) =
2132 interp(itw,
2133 EXT_MAT_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2134 freq_gp);
2135 }
2136 }
2137 }
2138 //Absorption vector data
2139 scat_data_mono[i_ss][i_se].abs_vec_data.resize(1,
2140 T_DATAGRID.nelem(),
2141 ABS_VEC_DATA.npages(),
2142 ABS_VEC_DATA.nrows(),
2143 ABS_VEC_DATA.ncols());
2144 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages(); i_za_sca++) {
2145 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
2146 i_aa_sca++) {
2147 //
2148 // Interpolation of absorption vector:
2149 //
2150 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
2151 scat_data_mono[i_ss][i_se].abs_vec_data(
2152 0, t_index, i_za_sca, i_aa_sca, i) =
2153 interp(itw,
2154 ABS_VEC_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2155 freq_gp);
2156 }
2157 }
2158 }
2159 }
2160 }
2161 }
2162}
2163
2164/* Workspace method: Doxygen documentation will be auto-generated */
2166 const ArrayOfArrayOfSingleScatteringData& scat_data,
2167 const Index& f_index,
2168 const Verbosity&) {
2169 //Initialise scat_data_mono
2170 scat_data_mono.resize(scat_data.nelem());
2171
2172 // Loop over the included scattering species
2173 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2174 const Index N_se = scat_data[i_ss].nelem();
2175
2176 //Initialise scat_data_mono
2177 scat_data_mono[i_ss].resize(N_se);
2178
2179 // Loop over the included scattering elements
2180 for (Index i_se = 0; i_se < N_se; i_se++) {
2181 Index nf = F_DATAGRID.nelem();
2182 if (nf == 1) {
2183 scat_data_mono[i_ss][i_se] = scat_data[i_ss][i_se];
2184 } else {
2185 //Stuff that doesn't need interpolating
2186 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2187 scat_data_mono[i_ss][i_se].T_grid = T_DATAGRID;
2188 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2189 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2190
2191 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2192 scat_data_mono[i_ss][i_se].f_grid = F_DATAGRID[f_index];
2193
2194 Index this_f_index;
2195
2196 //Phase matrix data
2197 /*scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2198 PHA_MAT_DATA.nvitrines(),
2199 PHA_MAT_DATA.nshelves(),
2200 PHA_MAT_DATA.nbooks(),
2201 PHA_MAT_DATA.npages(),
2202 PHA_MAT_DATA.nrows(),
2203 PHA_MAT_DATA.ncols());*/
2204 this_f_index = (PHA_MAT_DATA.nlibraries() == 1) ? 0 : f_index;
2205 scat_data_mono[i_ss][i_se].pha_mat_data = PHA_MAT_DATA(
2206 Range(this_f_index, 1), joker, joker, joker, joker, joker, joker);
2207
2208 //Extinction matrix data
2209 /*scat_data_mono[i_ss][i_se].ext_mat_data.resize(1, T_DATAGRID.nelem(),
2210 EXT_MAT_DATA.npages(),
2211 EXT_MAT_DATA.nrows(),
2212 EXT_MAT_DATA.ncols());*/
2213 this_f_index = (EXT_MAT_DATA.nshelves() == 1) ? 0 : f_index;
2214 scat_data_mono[i_ss][i_se].ext_mat_data =
2215 EXT_MAT_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2216
2217 //Absorption vector data
2218 /*scat_data_mono[i_ss][i_se].abs_vec_data.resize(1, T_DATAGRID.nelem(),
2219 ABS_VEC_DATA.npages(),
2220 ABS_VEC_DATA.nrows(),
2221 ABS_VEC_DATA.ncols());*/
2222 this_f_index = (ABS_VEC_DATA.nshelves() == 1) ? 0 : f_index;
2223 scat_data_mono[i_ss][i_se].abs_vec_data =
2224 ABS_VEC_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2225 }
2226 }
2227 }
2228}
2229
2230/* Workspace method: Doxygen documentation will be auto-generated */
2231void opt_prop_sptFromMonoData( // Output and Input:
2232 ArrayOfPropagationMatrix& ext_mat_spt,
2233 ArrayOfStokesVector& abs_vec_spt,
2234 // Input:
2235 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2236 const Vector& za_grid,
2237 const Vector& aa_grid,
2238 const Index& za_index, // propagation directions
2239 const Index& aa_index,
2240 const Numeric& rtp_temperature,
2241 const Tensor4& pnd_field,
2242 const Index& scat_p_index,
2243 const Index& scat_lat_index,
2244 const Index& scat_lon_index,
2245 const Verbosity& verbosity) {
2246 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data_mono);)
2247 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
2248 const Numeric za_sca = za_grid[za_index];
2249 const Numeric aa_sca = aa_grid[aa_index];
2250
2251 if (stokes_dim > 4 or stokes_dim < 1) {
2252 throw runtime_error(
2253 "The dimension of the stokes vector \n"
2254 "must be 1,2,3 or 4");
2255 }
2256
2257 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
2258 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
2259
2260 // Check that we do indeed have scat_data_mono here. Only checking the first
2261 // scat element, assuming the other elements have been processed in the same
2262 // manner. That's save against having scat_data here if that originated from
2263 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2264 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2265 // produced from scat_data_singleTmatrix. That would be too costly here,
2266 // though.
2267 // Also, we can't check here whether data is at the correct frequency since we
2268 // don't know f_grid and f_index here (we could pass it in, though).
2269 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2270 ostringstream os;
2271 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2272 << "but *scat_data_mono* (1 freq point only) is expected here.";
2273 throw runtime_error(os.str());
2274 }
2275
2276 // Initialisation
2277 for (auto& pm : ext_mat_spt) pm.SetZero();
2278 for (auto& av : abs_vec_spt) av.SetZero();
2279
2280 GridPos t_gp;
2281
2282 Vector itw(2);
2283
2284 Index i_se_flat = 0;
2285 // Loop over the included scattering elements
2286 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2287 // Loop over the included scattering elements
2288 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2289 // If the particle number density at a specific point in the
2290 // atmosphere for the i_se scattering element is zero, we don't need
2291 // to do the transformation!
2292 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2293 PND_LIMIT) {
2294 // First we have to transform the data from the coordinate system
2295 // used in the database (depending on the kind of ptype) to the
2296 // laboratory coordinate system.
2297
2298 //
2299 // Do the transformation into the laboratory coordinate system.
2300 //
2301 // Extinction matrix:
2302 //
2303 Index ext_npages = scat_data_mono[i_ss][i_se].ext_mat_data.npages();
2304 Index ext_nrows = scat_data_mono[i_ss][i_se].ext_mat_data.nrows();
2305 Index ext_ncols = scat_data_mono[i_ss][i_se].ext_mat_data.ncols();
2306 Index abs_npages = scat_data_mono[i_ss][i_se].abs_vec_data.npages();
2307 Index abs_nrows = scat_data_mono[i_ss][i_se].abs_vec_data.nrows();
2308 Index abs_ncols = scat_data_mono[i_ss][i_se].abs_vec_data.ncols();
2309
2310 //Check that scattering data temperature range covers required temperature
2311 ConstVectorView t_grid = scat_data_mono[i_ss][i_se].T_grid;
2312
2313 if (t_grid.nelem() > 1) {
2314 ostringstream os;
2315 os << "In opt_prop_sptFromMonoData.\n"
2316 << "The temperature grid of the scattering data does not\n"
2317 << "cover the atmospheric temperature at cloud location.\n"
2318 << "The data should include the value T = " << rtp_temperature
2319 << " K.";
2320 chk_interpolation_grids(os.str(), t_grid, rtp_temperature);
2321
2322 //interpolate over temperature
2323 Tensor3 ext_mat_data1temp(ext_npages, ext_nrows, ext_ncols);
2324 gridpos(t_gp, t_grid, rtp_temperature);
2325 interpweights(itw, t_gp);
2326 for (Index i_p = 0; i_p < ext_npages; i_p++) {
2327 for (Index i_r = 0; i_r < ext_nrows; i_r++) {
2328 for (Index i_c = 0; i_c < ext_ncols; i_c++) {
2329 ext_mat_data1temp(i_p, i_r, i_c) =
2330 interp(itw,
2331 scat_data_mono[i_ss][i_se].ext_mat_data(
2332 0, joker, i_p, i_r, i_c),
2333 t_gp);
2334 }
2335 }
2336 }
2337 ext_matTransform(ext_mat_spt[i_se_flat],
2338 ext_mat_data1temp,
2339 scat_data_mono[i_ss][i_se].za_grid,
2340 scat_data_mono[i_ss][i_se].aa_grid,
2341 scat_data_mono[i_ss][i_se].ptype,
2342 za_sca,
2343 aa_sca,
2344 verbosity);
2345 } else {
2346 ext_matTransform(ext_mat_spt[i_se_flat],
2347 scat_data_mono[i_ss][i_se].ext_mat_data(
2348 0, 0, joker, joker, joker),
2349 scat_data_mono[i_ss][i_se].za_grid,
2350 scat_data_mono[i_ss][i_se].aa_grid,
2351 scat_data_mono[i_ss][i_se].ptype,
2352 za_sca,
2353 aa_sca,
2354 verbosity);
2355 }
2356 //
2357 // Absorption vector:
2358 //
2359
2360 if (t_grid.nelem() > 1) {
2361 Tensor3 abs_vec_data1temp(abs_npages, abs_nrows, abs_ncols);
2362 //interpolate over temperature
2363 for (Index i_p = 0; i_p < abs_npages; i_p++) {
2364 for (Index i_r = 0; i_r < abs_nrows; i_r++) {
2365 for (Index i_c = 0; i_c < abs_ncols; i_c++) {
2366 abs_vec_data1temp(i_p, i_r, i_c) =
2367 interp(itw,
2368 scat_data_mono[i_ss][i_se].abs_vec_data(
2369 0, joker, i_p, i_r, i_c),
2370 t_gp);
2371 }
2372 }
2373 }
2374 abs_vecTransform(abs_vec_spt[i_se_flat],
2375 abs_vec_data1temp,
2376 scat_data_mono[i_ss][i_se].za_grid,
2377 scat_data_mono[i_ss][i_se].aa_grid,
2378 scat_data_mono[i_ss][i_se].ptype,
2379 za_sca,
2380 aa_sca,
2381 verbosity);
2382 } else {
2383 abs_vecTransform(abs_vec_spt[i_se_flat],
2384 scat_data_mono[i_ss][i_se].abs_vec_data(
2385 0, 0, joker, joker, joker),
2386 scat_data_mono[i_ss][i_se].za_grid,
2387 scat_data_mono[i_ss][i_se].aa_grid,
2388 scat_data_mono[i_ss][i_se].ptype,
2389 za_sca,
2390 aa_sca,
2391 verbosity);
2392 }
2393 }
2394
2395 i_se_flat++;
2396 }
2397 }
2398}
2399
2400/* Workspace method: Doxygen documentation will be auto-generated */
2402 Tensor5& pha_mat_spt,
2403 // Input:
2404 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2405 const Index& doit_za_grid_size,
2406 const Vector& aa_grid,
2407 const Index& za_index, // propagation directions
2408 const Index& aa_index,
2409 const Numeric& rtp_temperature,
2410 const Tensor4& pnd_field,
2411 const Index& scat_p_index,
2412 const Index& scat_lat_index,
2413 const Index& scat_lon_index,
2414 const Verbosity& verbosity) {
2415 Vector za_grid;
2416 nlinspace(za_grid, 0, 180, doit_za_grid_size);
2417
2418 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
2419 if (N_se_total != pnd_field.nbooks()) {
2420 ostringstream os;
2421 os << "Total number of scattering elements in *scat_data_mono* "
2422 << "inconsistent with size of pnd_field.";
2423 throw runtime_error(os.str());
2424 }
2425 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2426 // checks the same as the runtime_error above. Still, we keep it to be on the
2427 // save side.
2428 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2429
2430 const Index stokes_dim = pha_mat_spt.ncols();
2431 if (stokes_dim > 4 || stokes_dim < 1) {
2432 throw runtime_error(
2433 "The dimension of the stokes vector \n"
2434 "must be 1,2,3 or 4");
2435 }
2436
2437 // Check that we do indeed have scat_data_mono here. Only checking the first
2438 // scat element, assuming the other elements have been processed in the same
2439 // manner. That's save against having scat_data here if that originated from
2440 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2441 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2442 // produced from scat_data_singleTmatrix. That would be too costly here,
2443 // though.
2444 // Also, we can't check here whether data is at the correct frequency since we
2445 // don't know f_grid and f_index here (we could pass it in, though).
2446 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2447 ostringstream os;
2448 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2449 << "but *scat_data_mono* (1 freq point only) is expected here.";
2450 throw runtime_error(os.str());
2451 }
2452
2453 GridPos T_gp = {0, {0, 1}}, Tred_gp;
2454 Vector itw(2);
2455
2456 // Initialisation
2457 pha_mat_spt = 0.;
2458
2459 Index i_se_flat = 0;
2460 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2461 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2462 // If the particle number density at a specific point in the
2463 // atmosphere for scattering element i_se is zero, we don't need to
2464 // do the transformation!
2465 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2466 PND_LIMIT) {
2467 // Temporary phase matrix which includes all temperatures.
2468 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
2469 Tensor3 pha_mat_spt_tmp(nT, pha_mat_spt.nrows(), pha_mat_spt.ncols());
2470
2471 pha_mat_spt_tmp = 0.;
2472
2473 Index ti = -1;
2474 if (nT == 1) // just 1 T_grid element
2475 {
2476 ti = 0;
2477 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2478 // pick one temperature'
2479 {
2480 if (rtp_temperature > -10.) // lowest T-point
2481 {
2482 ti = 0;
2483 } else if (rtp_temperature > -20.) // highest T-point
2484 {
2485 ti = nT - 1;
2486 } else // median T-point
2487 {
2488 ti = nT / 2;
2489 }
2490 } else {
2491 ostringstream os;
2492 os << "In pha_mat_sptFromMonoData.\n"
2493 << "The temperature grid of the scattering data does not\n"
2494 << "cover the atmospheric temperature at cloud location.\n"
2495 << "The data should include the value T = " << rtp_temperature
2496 << " K.";
2498 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2499
2500 // Gridpositions:
2501 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2502 gridpos_copy(Tred_gp, T_gp);
2503 Tred_gp.idx = 0;
2504 // Interpolation weights:
2505 interpweights(itw, Tred_gp);
2506 }
2507
2508 // Do the transformation into the laboratory coordinate system.
2509 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
2510 za_inc_idx++) {
2511 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2512 aa_inc_idx++) {
2513 if (ti < 0) // Temperature interpolation
2514 {
2515 for (Index t_idx = 0; t_idx < 2; t_idx++) {
2517 pha_mat_spt_tmp(t_idx, joker, joker),
2518 scat_data_mono[i_ss][i_se].pha_mat_data(
2519 0, t_idx + T_gp.idx, joker, joker, joker, joker, joker),
2520 scat_data_mono[i_ss][i_se].za_grid,
2521 scat_data_mono[i_ss][i_se].aa_grid,
2522 scat_data_mono[i_ss][i_se].ptype,
2523 za_index,
2524 aa_index,
2525 za_inc_idx,
2526 aa_inc_idx,
2527 za_grid,
2528 aa_grid,
2529 verbosity);
2530 }
2531
2532 for (Index i = 0; i < stokes_dim; i++) {
2533 for (Index j = 0; j < stokes_dim; j++) {
2534 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
2535 interp(itw, pha_mat_spt_tmp(joker, i, j), Tred_gp);
2536 }
2537 }
2538 } else // no temperature interpolation required
2539 {
2541 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2542 scat_data_mono[i_ss][i_se].pha_mat_data(
2543 0, ti, joker, joker, joker, joker, joker),
2544 scat_data_mono[i_ss][i_se].za_grid,
2545 scat_data_mono[i_ss][i_se].aa_grid,
2546 scat_data_mono[i_ss][i_se].ptype,
2547 za_index,
2548 aa_index,
2549 za_inc_idx,
2550 aa_inc_idx,
2551 za_grid,
2552 aa_grid,
2553 verbosity);
2554 }
2555 }
2556 }
2557 }
2558
2559 i_se_flat++;
2560 }
2561 }
2562}
2563
2564/* Workspace method: Doxygen documentation will be auto-generated */
2566 Tensor5& pha_mat_spt,
2567 // Input:
2568 const ArrayOfArrayOfSingleScatteringData& scat_data,
2569 const Index& scat_data_checked,
2570 const Vector& za_grid,
2571 const Vector& aa_grid,
2572 const Index& za_index, // propagation directions
2573 const Index& aa_index,
2574 const Index& f_index,
2575 const Numeric& rtp_temperature,
2576 const Tensor4& pnd_field,
2577 const Index& scat_p_index,
2578 const Index& scat_lat_index,
2579 const Index& scat_lon_index,
2580 const Verbosity& verbosity) {
2581 if (scat_data_checked != 1)
2582 throw runtime_error(
2583 "The scattering data must be flagged to have "
2584 "passed a consistency check (scat_data_checked=1).");
2585
2586 const Index stokes_dim = pha_mat_spt.ncols();
2587 if (stokes_dim > 4 || stokes_dim < 1) {
2588 throw runtime_error(
2589 "The dimension of the stokes vector \n"
2590 "must be 1,2,3 or 4");
2591 }
2592
2593 // Determine total number of scattering elements
2594 const Index N_se_total = TotalNumberOfElements(scat_data);
2595 if (N_se_total != pnd_field.nbooks()) {
2596 ostringstream os;
2597 os << "Total number of scattering elements in scat_data "
2598 << "inconsistent with size of pnd_field.";
2599 throw runtime_error(os.str());
2600 }
2601 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2602 // checks the same as the runtime_error above. Still, we keep it to be on the
2603 // save side.
2604 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2605
2606 const Index N_ss = scat_data.nelem();
2607
2608 // Phase matrix in laboratory coordinate system. Dimensions:
2609 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
2610 Tensor5 pha_mat_data_int;
2611
2612 Index this_f_index;
2613
2614 Index i_se_flat = 0;
2615 // Loop over scattering species
2616 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
2617 const Index N_se = scat_data[i_ss].nelem();
2618
2619 // Loop over the included scattering elements
2620 for (Index i_se = 0; i_se < N_se; i_se++) {
2621 // If the particle number density at a specific point in the
2622 // atmosphere for the i_se scattering element is zero, we don't need
2623 // to do the transfromation!
2624 if (abs(pnd_field(
2625 i_se_flat, scat_p_index, scat_lat_index, scat_lon_index)) >
2626 PND_LIMIT) {
2627 // First we have to transform the data from the coordinate system
2628 // used in the database (depending on the kind of ptype) to the
2629 // laboratory coordinate system.
2630
2631 // Resize the variables for the interpolated data (1freq, 1T):
2632 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
2633 PHA_MAT_DATA.nbooks(),
2634 PHA_MAT_DATA.npages(),
2635 PHA_MAT_DATA.nrows(),
2636 PHA_MAT_DATA.ncols());
2637
2638 // Frequency extraction and temperature interpolation
2639
2640 // Gridpositions and interpolation weights;
2641 GridPos t_gp;
2642 Vector itw;
2643 Index this_T_index = -1;
2644 if (PHA_MAT_DATA.nvitrines() == 1) {
2645 this_T_index = 0;
2646 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2647 // pick one temperature'
2648 {
2649 if (rtp_temperature > -10.) // lowest T-point
2650 {
2651 this_T_index = 0;
2652 } else if (rtp_temperature > -20.) // highest T-point
2653 {
2654 this_T_index = PHA_MAT_DATA.nvitrines() - 1;
2655 } else // median T-point
2656 {
2657 this_T_index = PHA_MAT_DATA.nvitrines() / 2;
2658 }
2659 } else {
2660 ostringstream os;
2661 os << "In pha_mat_sptFromScat_data.\n"
2662 << "The temperature grid of the scattering data does not\n"
2663 << "cover the atmospheric temperature at cloud location.\n"
2664 << "The data should include the value T = " << rtp_temperature
2665 << " K.";
2666 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
2667
2668 gridpos(t_gp, T_DATAGRID, rtp_temperature);
2669
2670 // Interpolation weights:
2671 itw.resize(2);
2672 interpweights(itw, t_gp);
2673 }
2674
2675 if (PHA_MAT_DATA.nlibraries() == 1)
2676 this_f_index = 0;
2677 else
2678 this_f_index = f_index;
2679
2680 if (this_T_index < 0) {
2681 // Interpolation of scattering matrix:
2682 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2683 i_za_sca++)
2684 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2685 i_aa_sca++)
2686 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2687 i_za_inc++)
2688 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2689 i_aa_inc++)
2690 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2691 pha_mat_data_int(
2692 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2693 interp(itw,
2694 PHA_MAT_DATA(this_f_index,
2695 joker,
2696 i_za_sca,
2697 i_aa_sca,
2698 i_za_inc,
2699 i_aa_inc,
2700 i),
2701 t_gp);
2702 } else {
2703 pha_mat_data_int = PHA_MAT_DATA(
2704 this_f_index, this_T_index, joker, joker, joker, joker, joker);
2705 /*
2706 for (Index i_za_sca = 0;
2707 i_za_sca < PHA_MAT_DATA.nshelves(); i_za_sca++)
2708 for (Index i_aa_sca = 0;
2709 i_aa_sca < PHA_MAT_DATA.nbooks(); i_aa_sca++)
2710 for (Index i_za_inc = 0;
2711 i_za_inc < PHA_MAT_DATA.npages(); i_za_inc++)
2712 for (Index i_aa_inc = 0;
2713 i_aa_inc < PHA_MAT_DATA.nrows(); i_aa_inc++)
2714 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2715 // Interpolation of phase matrix:
2716 pha_mat_data_int(i_za_sca, i_aa_sca,
2717 i_za_inc, i_aa_inc, i) =
2718 PHA_MAT_DATA(this_f_index, this_T_index,
2719 i_za_sca, i_aa_sca,
2720 */
2721 }
2722
2723 // Do the transformation into the laboratory coordinate system.
2724 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
2725 za_inc_idx++) {
2726 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2727 aa_inc_idx++) {
2729 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2730 pha_mat_data_int,
2733 PART_TYPE,
2734 za_index,
2735 aa_index,
2736 za_inc_idx,
2737 aa_inc_idx,
2738 za_grid,
2739 aa_grid,
2740 verbosity);
2741 }
2742 }
2743 }
2744 i_se_flat++;
2745 }
2746 }
2747}
2748
2749/* Workspace method: Doxygen documentation will be auto-generated */
2750void ScatSpeciesMerge( //WS Output:
2751 Tensor4& pnd_field,
2754 ArrayOfString& scat_species,
2755 Index& cloudbox_checked,
2756 //WS Input:
2757 const Index& atmosphere_dim,
2758 const Index& cloudbox_on,
2759 const ArrayOfIndex& cloudbox_limits,
2760 const Tensor3& t_field,
2761 const Tensor3& z_field,
2762 const Matrix& z_surface,
2763 const Verbosity& /*verbosity*/) {
2764 // FIXME:
2765 // so far, this works for both scat_data and scat_data_raw. Needs to be
2766 // adapted, though, once we have WSM that can create Z/K/a with different
2767 // f/T dimensions than scat_data_single.f/T_grid.
2768
2769 // cloudbox variable state should be ok before entering here
2770 if (!cloudbox_checked)
2771 throw std::runtime_error(
2772 "You must call *cloudbox_checkedCalc* before this method.");
2773 //however, we modify cloudbox variables. hence force re-checking the new
2774 //variables by resetting cloudbox_checked to False.
2775 cloudbox_checked = 0;
2776
2777 if (atmosphere_dim != 1)
2778 throw std::runtime_error(
2779 "Merging scattering elements only works with a 1D atmoshere");
2780
2781 // Cloudbox on/off?
2782 if (!cloudbox_on) {
2783 /* Must initialise pnd_field anyway; but empty */
2784 pnd_field.resize(0, 0, 0, 0);
2785 return;
2786 }
2787
2788 // ------- setup new pnd_field and scat_data -------------------
2789 ArrayOfIndex limits(2);
2790 //pressure
2791 limits[0] = cloudbox_limits[0];
2792 limits[1] = cloudbox_limits[1] + 1;
2793
2794 Tensor4 pnd_field_merged(
2795 limits[1] - limits[0], limits[1] - limits[0], 1, 1, 0.);
2796
2797 ArrayOfArrayOfSingleScatteringData scat_data_merged;
2798 scat_data_merged.resize(1);
2799 scat_data_merged[0].resize(pnd_field_merged.nbooks());
2800 ArrayOfArrayOfScatteringMetaData scat_meta_merged;
2801 scat_meta_merged.resize(1);
2802 scat_meta_merged[0].resize(pnd_field_merged.nbooks());
2803 ArrayOfString scat_species_merged;
2804 scat_species_merged.resize(1);
2805 scat_species_merged[0] = "mergedfield-mergedpsd";
2806 for (Index sp = 0; sp < scat_data_merged[0].nelem(); sp++) {
2807 SingleScatteringData& this_part = scat_data_merged[0][sp];
2808 this_part.ptype = scat_data[0][0].ptype;
2809 this_part.description = "Merged scattering elements";
2810 this_part.f_grid = scat_data[0][0].f_grid;
2811 this_part.za_grid = scat_data[0][0].za_grid;
2812 this_part.aa_grid = scat_data[0][0].aa_grid;
2813 this_part.pha_mat_data.resize(scat_data[0][0].pha_mat_data.nlibraries(),
2814 1,
2815 scat_data[0][0].pha_mat_data.nshelves(),
2816 scat_data[0][0].pha_mat_data.nbooks(),
2817 scat_data[0][0].pha_mat_data.npages(),
2818 scat_data[0][0].pha_mat_data.nrows(),
2819 scat_data[0][0].pha_mat_data.ncols());
2820 this_part.ext_mat_data.resize(scat_data[0][0].ext_mat_data.nshelves(),
2821 1,
2822 scat_data[0][0].ext_mat_data.npages(),
2823 scat_data[0][0].ext_mat_data.nrows(),
2824 scat_data[0][0].ext_mat_data.ncols());
2825 this_part.abs_vec_data.resize(scat_data[0][0].abs_vec_data.nshelves(),
2826 1,
2827 scat_data[0][0].abs_vec_data.npages(),
2828 scat_data[0][0].abs_vec_data.nrows(),
2829 scat_data[0][0].abs_vec_data.ncols());
2830 this_part.pha_mat_data = 0.;
2831 this_part.ext_mat_data = 0.;
2832 this_part.abs_vec_data = 0.;
2833 this_part.T_grid.resize(1);
2834 this_part.T_grid[0] = t_field(sp, 0, 0);
2835
2836 ScatteringMetaData& this_meta = scat_meta_merged[0][sp];
2837 ostringstream os;
2838 os << "Merged scattering element of cloudbox-level #" << sp;
2839 this_meta.description = os.str();
2840 this_meta.source = "ARTS internal";
2841 this_meta.refr_index = "Unknown";
2842 this_meta.mass = -1.;
2843 this_meta.diameter_max = -1.;
2844 this_meta.diameter_volume_equ = -1.;
2845 this_meta.diameter_area_equ_aerodynamical = -1.;
2846 }
2847
2848 // Check that all scattering elements have same ptype and data dimensions
2849 SingleScatteringData& first_part = scat_data[0][0];
2850 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2851 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2852 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2853
2854 if (orig_part.ptype != first_part.ptype)
2855 throw std::runtime_error(
2856 "All scattering elements must have the same type");
2857
2858 if (orig_part.f_grid.nelem() != first_part.f_grid.nelem())
2859 throw std::runtime_error(
2860 "All scattering elements must have the same f_grid");
2861
2862 if (!is_size(orig_part.pha_mat_data(
2863 joker, 0, joker, joker, joker, joker, joker),
2864 first_part.pha_mat_data.nlibraries(),
2865 first_part.pha_mat_data.nshelves(),
2866 first_part.pha_mat_data.nbooks(),
2867 first_part.pha_mat_data.npages(),
2868 first_part.pha_mat_data.nrows(),
2869 first_part.pha_mat_data.ncols()))
2870 throw std::runtime_error(
2871 "All scattering elements must have the same pha_mat_data size"
2872 " (except for temperature).");
2873
2874 if (!is_size(orig_part.ext_mat_data(joker, 0, joker, joker, joker),
2875 first_part.ext_mat_data.nshelves(),
2876 first_part.ext_mat_data.npages(),
2877 first_part.ext_mat_data.nrows(),
2878 first_part.ext_mat_data.ncols()))
2879 throw std::runtime_error(
2880 "All scattering elements must have the same ext_mat_data size"
2881 " (except for temperature).");
2882
2883 if (!is_size(orig_part.abs_vec_data(joker, 0, joker, joker, joker),
2884 first_part.abs_vec_data.nshelves(),
2885 first_part.abs_vec_data.npages(),
2886 first_part.abs_vec_data.nrows(),
2887 first_part.abs_vec_data.ncols()))
2888 throw std::runtime_error(
2889 "All scattering elements must have the same abs_vec_data size"
2890 " (except for temperature).");
2891 }
2892 }
2893
2894 //----- Start pnd_field_merged and scat_data_array_merged calculations -----
2895
2896 GridPos T_gp;
2897 Vector itw(2);
2898
2899 Index nlevels = pnd_field_merged.nbooks();
2900 // loop over pressure levels in cloudbox
2901 for (Index i_lv = 0; i_lv < nlevels - 1; i_lv++) {
2902 pnd_field_merged(i_lv, i_lv, 0, 0) = 1.;
2903
2904 SingleScatteringData& this_part = scat_data_merged[0][i_lv];
2905 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2906 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2907 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2908 const Index pnd_index = FlattenedIndex(scat_data, i_ss, i_se);
2909
2910 // If the particle number density at a specific point in the
2911 // atmosphere for the i_se scattering element is zero, we don't
2912 // need to do the transformation!
2913 if (pnd_field(pnd_index, i_lv, 0, 0) > PND_LIMIT) //TRS
2914 {
2915 Numeric temperature = this_part.T_grid[0];
2916 if (orig_part.T_grid.nelem() > 1) {
2917 ostringstream os;
2918 os << "The temperature grid of the scattering data "
2919 << "does not cover the\n"
2920 << "atmospheric temperature at cloud location. "
2921 << "The data should\n"
2922 << "include the value T = " << temperature << " K.\n"
2923 << "Offending particle is scat_data[" << i_ss << "][" << i_se
2924 << "]:\n"
2925 << "Description: " << orig_part.description << "\n";
2926 chk_interpolation_grids(os.str(), orig_part.T_grid, temperature);
2927
2928 // Gridpositions:
2929 gridpos(T_gp, orig_part.T_grid, temperature);
2930 // Interpolation weights:
2931 interpweights(itw, T_gp);
2932 }
2933
2935
2936 // Loop over frequencies
2937 for (Index i_f = 0; i_f < orig_part.pha_mat_data.nlibraries();
2938 i_f++) {
2939 // Loop over zenith angles
2940 for (Index i_za = 0; i_za < orig_part.ext_mat_data.npages();
2941 i_za++) {
2942 // Loop over azimuth angles
2943 for (Index i_aa = 0; i_aa < orig_part.ext_mat_data.nrows();
2944 i_aa++) {
2945 // Weighted sum of ext_mat_data and abs_vec_data
2946 if (orig_part.T_grid.nelem() == 1) {
2947 Vector v{orig_part.ext_mat_data(i_f, 0, i_za, i_aa, joker)};
2948 v *= pnd_field(pnd_index, i_lv, 0, 0);
2949 this_part.ext_mat_data(i_f, 0, i_za, 0, joker) += v;
2950
2951 v = orig_part.abs_vec_data(i_f, 0, i_za, i_aa, joker);
2952 v *= pnd_field(pnd_index, i_lv, 0, 0);
2953 this_part.abs_vec_data(i_f, 0, i_za, i_aa, joker) += v;
2954 } else {
2955 for (Index i = 0; i < orig_part.ext_mat_data.ncols(); i++) {
2956 // Temperature interpolation
2957 this_part.ext_mat_data(i_f, 0, i_za, i_aa, i) +=
2958 pnd_field(pnd_index, i_lv, 0, 0) *
2959 interp(
2960 itw,
2961 orig_part.ext_mat_data(i_f, joker, i_za, i_aa, i),
2962 T_gp);
2963 }
2964 for (Index i = 0; i < orig_part.abs_vec_data.ncols(); i++) {
2965 // Temperature interpolation
2966 this_part.abs_vec_data(i_f, 0, i_za, i_aa, i) +=
2967 pnd_field(pnd_index, i_lv, 0, 0) *
2968 interp(
2969 itw,
2970 orig_part.abs_vec_data(i_f, joker, i_za, i_aa, i),
2971 T_gp);
2972 }
2973 }
2974 }
2975 }
2976
2978
2979 // Loop over outgoing zenith angles
2980 for (Index i_za_out = 0;
2981 i_za_out < orig_part.pha_mat_data.nshelves();
2982 i_za_out++) {
2983 // Loop over outgoing azimuth angles
2984 for (Index i_aa_out = 0;
2985 i_aa_out < orig_part.pha_mat_data.nbooks();
2986 i_aa_out++) {
2987 // Loop over incoming zenith angles
2988 for (Index i_za_inc = 0;
2989 i_za_inc < orig_part.pha_mat_data.npages();
2990 i_za_inc++) {
2991 // Loop over incoming azimuth angles
2992 for (Index i_aa_inc = 0;
2993 i_aa_inc < orig_part.pha_mat_data.nrows();
2994 i_aa_inc++) {
2995 // Weighted sum of pha_mat_data
2996 if (orig_part.T_grid.nelem() == 1) {
2997 Vector v{orig_part.pha_mat_data(i_f,
2998 0,
2999 i_za_out,
3000 i_aa_out,
3001 i_za_inc,
3002 i_aa_inc,
3003 joker)};
3004 v *= pnd_field(pnd_index, i_lv, 0, 0);
3005 this_part.pha_mat_data(i_f,
3006 0,
3007 i_za_out,
3008 i_aa_out,
3009 i_za_inc,
3010 i_aa_inc,
3011 joker) = v;
3012 } else {
3013 // Temperature interpolation
3014 for (Index i = 0; i < orig_part.pha_mat_data.ncols();
3015 i++) {
3016 this_part.pha_mat_data(i_f,
3017 0,
3018 i_za_out,
3019 i_aa_out,
3020 i_za_inc,
3021 i_aa_inc,
3022 i) +=
3023 pnd_field(pnd_index, i_lv, 0, 0) *
3024 interp(itw,
3025 orig_part.pha_mat_data(i_f,
3026 joker,
3027 i_za_out,
3028 i_aa_out,
3029 i_za_inc,
3030 i_aa_inc,
3031 i),
3032 T_gp);
3033 }
3034 }
3035 }
3036 }
3037 }
3038 }
3039 }
3040 }
3041 }
3042 }
3043 }
3044
3045 // Set new pnd_field at lowest altitude to 0 if the cloudbox doesn't touch
3046 // the ground.
3047 // The consistency for the original pnd_field has already been ensured by
3048 // cloudbox_checkedCalc
3049 if (z_field(cloudbox_limits[0], 0, 0) > z_surface(0, 0))
3050 pnd_field_merged(0, 0, 0, 0) = 0.;
3051
3052 pnd_field = pnd_field_merged;
3053 scat_data = scat_data_merged;
3054 scat_meta = scat_meta_merged;
3055 scat_species = scat_species_merged;
3056}
3057
3058/* Workspace method: Doxygen documentation will be auto-generated */
3060 //WS Output:
3061 Vector& meta_param,
3062 //WS Input:
3063 const ArrayOfArrayOfScatteringMetaData& scat_meta,
3064 const String& meta_name,
3065 const Index& scat_species_index,
3066 const Verbosity& /*verbosity*/) {
3067 if (scat_species_index < 0) {
3068 ostringstream os;
3069 os << "scat_species_index can't be <0!";
3070 throw runtime_error(os.str());
3071 }
3072
3073 const Index nss = scat_meta.nelem();
3074
3075 // check that scat_meta actually has at least scat_species_index elements
3076 if (!(nss > scat_species_index)) {
3077 ostringstream os;
3078 os << "Can not extract data for scattering species #" << scat_species_index
3079 << "\n"
3080 << "because scat_meta has only " << nss << " elements.";
3081 throw runtime_error(os.str());
3082 }
3083
3084 const Index nse = scat_meta[scat_species_index].nelem();
3085 meta_param.resize(nse);
3086
3087 for (Index i = 0; i < nse; i++) {
3088 if (meta_name == "mass")
3089 meta_param[i] = scat_meta[scat_species_index][i].mass;
3090 else if (meta_name == "diameter_max")
3091 meta_param[i] = scat_meta[scat_species_index][i].diameter_max;
3092 else if (meta_name == "diameter_volume_equ")
3093 meta_param[i] = scat_meta[scat_species_index][i].diameter_volume_equ;
3094 else if (meta_name == "diameter_area_equ_aerodynamical")
3095 meta_param[i] =
3096 scat_meta[scat_species_index][i].diameter_area_equ_aerodynamical;
3097 else {
3098 ostringstream os;
3099 os << "Meta parameter \"" << meta_name << "\"is unknown.";
3100 throw runtime_error(os.str());
3101 }
3102 }
3103}
3104
3105
This file contains the definition of Array.
Index TotalNumberOfElements(const Array< Array< base > > &aa)
Determine total number of elements in an ArrayOfArray.
Definition: array.h:224
Index FlattenedIndex(const Array< Array< base > > &aa, Index outer, Index inner=0)
Determine the index of an element in a flattened version of the array.
Definition: array.h:235
The global header file for ARTS.
Constants of physical expressions as constexpr.
Common ARTS conversions.
void pha_mat_spt_agendaExecute(Workspace &ws, Tensor5 &pha_mat_spt, const Index za_index, const Index scat_lat_index, const Index scat_lon_index, const Index scat_p_index, const Index aa_index, const Numeric rtp_temperature, const Agenda &input_agenda)
Definition: auto_md.cc:25618
void chk_interpolation_grids(const String &which_interpolation, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac, const bool islog)
Check interpolation grids.
Definition: check_input.cc:887
The Agenda class.
Definition: agenda_class.h:52
This can be used to make arrays out of anything.
Definition: array.h:31
Index nelem() const ARTS_NOEXCEPT
Definition: array.h:75
Workspace class.
Definition: workspace_ng.h:36
void toupper()
Convert to upper case.
Definition: mystring.h:130
#define DEBUG_ONLY(...)
Definition: debug.h:53
#define ARTS_ASSERT(condition,...)
Definition: debug.h:84
#define ARTS_USER_ERROR_IF(condition,...)
Definition: debug.h:135
The declarations of all the exception classes.
void gridpos(ArrayOfGridPos &gp, ConstVectorView old_grid, ConstVectorView new_grid, const Numeric &extpolfac)
Set up a grid position Array.
void interpweights(VectorView itw, const GridPos &tc)
Red 1D interpolation weights.
Numeric interp(ConstVectorView itw, ConstVectorView a, const GridPos &tc)
Red 1D Interpolate.
void gridpos_copy(GridPos &gp_new, const GridPos &gp_old)
gridpos_copy
Header file for interpolation.cc.
void opt_prop_sptFromScat_data(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromScat_data.
void abs_vecAddGas(StokesVector &abs_vec, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: abs_vecAddGas.
void ext_matAddGas(PropagationMatrix &ext_mat, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: ext_matAddGas.
void scat_data_monoExtract(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoExtract.
void scat_dataCalc(ArrayOfArrayOfSingleScatteringData &scat_data, const ArrayOfArrayOfSingleScatteringData &scat_data_raw, const Vector &f_grid, const Index &interp_order, const Verbosity &)
WORKSPACE METHOD: scat_dataCalc.
void pha_matCalc(Tensor4 &pha_mat, const Tensor5 &pha_mat_spt, const Tensor4 &pnd_field, const Index &atmosphere_dim, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_matCalc.
void scat_dataReduceT(ArrayOfArrayOfSingleScatteringData &scat_data, const Index &i_ss, const Numeric &T, const Index &interp_order, const Index &phamat_only, const Numeric &threshold, const Verbosity &)
WORKSPACE METHOD: scat_dataReduceT.
#define PHA_MAT_DATA
constexpr Numeric DEG2RAD
#define PART_TYPE
void ExtractFromMetaSingleScatSpecies(Vector &meta_param, const ArrayOfArrayOfScatteringMetaData &scat_meta, const String &meta_name, const Index &scat_species_index, const Verbosity &)
WORKSPACE METHOD: ExtractFromMetaSingleScatSpecies.
void opt_prop_sptFromMonoData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromMonoData.
#define ABS_VEC_DATA
#define AA_DATAGRID
#define EXT_MAT_DATA
void pha_mat_sptFromMonoData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromMonoData.
void pha_mat_sptFromData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromData.
void pha_mat_sptFromDataDOITOpt(Tensor5 &pha_mat_spt, const ArrayOfTensor7 &pha_mat_sptDOITOpt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_mat_sptFromDataDOITOpt.
constexpr Numeric RAD2DEG
#define T_DATAGRID
void opt_prop_bulkCalc(PropagationMatrix &ext_mat, StokesVector &abs_vec, const ArrayOfPropagationMatrix &ext_mat_spt, const ArrayOfStokesVector &abs_vec_spt, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: opt_prop_bulkCalc.
#define PND_LIMIT
#define F_DATAGRID
void DoitScatteringDataPrepare(Workspace &ws, ArrayOfTensor7 &pha_mat_sptDOITOpt, ArrayOfArrayOfSingleScatteringData &scat_data_mono, Tensor7 &pha_mat_doit, Vector &aa_grid, const Index &doit_za_grid_size, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Index &f_index, const Index &atmosphere_dim, const Index &stokes_dim, const Tensor3 &t_field, const ArrayOfIndex &cloudbox_limits, const Tensor4 &pnd_field, const Agenda &pha_mat_spt_agenda, const Verbosity &verbosity)
WORKSPACE METHOD: DoitScatteringDataPrepare.
void opt_prop_sptFromData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromData.
void scat_data_monoCalc(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &f_grid, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoCalc.
void pha_mat_sptFromScat_data(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromScat_data.
constexpr Numeric PI
#define ZA_DATAGRID
void scat_dataCheck(const ArrayOfArrayOfSingleScatteringData &scat_data, const String &check_type, const Numeric &threshold, const Verbosity &verbosity)
WORKSPACE METHOD: scat_dataCheck.
void ScatSpeciesMerge(Tensor4 &pnd_field, ArrayOfArrayOfSingleScatteringData &scat_data, ArrayOfArrayOfScatteringMetaData &scat_meta, ArrayOfString &scat_species, Index &cloudbox_checked, const Index &atmosphere_dim, const Index &cloudbox_on, const ArrayOfIndex &cloudbox_limits, const Tensor3 &t_field, const Tensor3 &z_field, const Matrix &z_surface, const Verbosity &)
WORKSPACE METHOD: ScatSpeciesMerge.
void nlinspace(Vector &x, const Numeric start, const Numeric stop, const Index n)
nlinspace
Definition: math_funcs.cc:208
Numeric AngIntegrate_trapezoid(ConstMatrixView Integrand, ConstVectorView za_grid, ConstVectorView aa_grid)
AngIntegrate_trapezoid.
Definition: math_funcs.cc:318
Declarations having to do with the four output streams.
#define CREATE_OUT1
Definition: messages.h:187
#define CREATE_OUT2
Definition: messages.h:188
#define CREATE_OUT0
Definition: messages.h:186
constexpr Numeric pi
The following mathematical constants are generated in python Decimal package by the code:
constexpr auto deg2rad(auto x) noexcept
Converts degrees to radians.
constexpr auto rad2deg(auto x) noexcept
Converts radians to degrees.
void abs_vecTransform(StokesVector &abs_vec_lab, ConstTensor3View abs_vec_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of absorption vector.
void ext_matTransform(PropagationMatrix &ext_mat_lab, ConstTensor3View ext_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of extinction matrix.
void pha_matTransform(MatrixView pha_mat_lab, ConstTensor5View pha_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Index &za_sca_idx, const Index &aa_sca_idx, const Index &za_inc_idx, const Index &aa_inc_idx, ConstVectorView za_grid, ConstVectorView aa_grid, const Verbosity &verbosity)
Transformation of phase matrix.
Scattering database structure and functions.
@ PTYPE_AZIMUTH_RND
Definition: optproperties.h:37
@ PTYPE_TOTAL_RND
Definition: optproperties.h:38
Contains sorting routines.
Structure to store a grid position.
Definition: interpolation.h:56
Index idx
Definition: interpolation.h:57
Numeric diameter_area_equ_aerodynamical
Definition: optproperties.h:97
Numeric diameter_volume_equ
Definition: optproperties.h:96
#define v
This file contains basic functions to handle XML data files.