ARTS 2.5.4 (git: 31ce4f0e)
m_optproperties.cc
Go to the documentation of this file.
1/* Copyright (C) 2002-2012
2 Sreerekha T.R. <rekha@uni-bremen.de>
3 Claudia Emde <claudia.emde@dlr.de>
4 Cory Davies <cory@met.ed.ac.uk>
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA. */
20
35/*===========================================================================
36 === External declarations
37 ===========================================================================*/
38
39#include <cfloat>
40#include <cmath>
41#include "array.h"
42#include "arts.h"
43#include "arts_constants.h"
44#include "arts_conversions.h"
45#include "auto_md.h"
46#include "check_input.h"
47#include "exceptions.h"
48#include "interpolation.h"
50#include "logic.h"
51#include "math_funcs.h"
52#include "matpackIII.h"
53#include "matpackVII.h"
54#include "messages.h"
55#include "montecarlo.h"
56#include "optproperties.h"
57#include "sorting.h"
58#include "xml_io.h"
59
60inline constexpr Numeric PI=Constant::pi;
63
64#define PART_TYPE scat_data[i_ss][i_se].ptype
65#define F_DATAGRID scat_data[i_ss][i_se].f_grid
66#define T_DATAGRID scat_data[i_ss][i_se].T_grid
67#define ZA_DATAGRID scat_data[i_ss][i_se].za_grid
68#define AA_DATAGRID scat_data[i_ss][i_se].aa_grid
69#define PHA_MAT_DATA scat_data[i_ss][i_se].pha_mat_data
70#define EXT_MAT_DATA scat_data[i_ss][i_se].ext_mat_data
71#define ABS_VEC_DATA scat_data[i_ss][i_se].abs_vec_data
72
73// If particle number density is below this value,
74// no transformations will be performed
75#define PND_LIMIT 1e-12
76
77/* Workspace method: Doxygen documentation will be auto-generated */
78void pha_mat_sptFromData( // Output:
79 Tensor5& pha_mat_spt,
80 // Input:
82 const Vector& za_grid,
83 const Vector& aa_grid,
84 const Index& za_index, // propagation directions
85 const Index& aa_index,
86 const Index& f_index,
87 const Vector& f_grid,
88 const Numeric& rtp_temperature,
89 const Tensor4& pnd_field,
90 const Index& scat_p_index,
91 const Index& scat_lat_index,
92 const Index& scat_lon_index,
93 const Verbosity& verbosity) {
94 const Index stokes_dim = pha_mat_spt.ncols();
95 if (stokes_dim > 4 || stokes_dim < 1) {
96 throw runtime_error(
97 "The dimension of the stokes vector \n"
98 "must be 1,2,3 or 4");
99 }
100
101 // Determine total number of scattering elements
102 const Index N_se_total = TotalNumberOfElements(scat_data);
103 if (N_se_total != pnd_field.nbooks()) {
104 ostringstream os;
105 os << "Total number of scattering elements in scat_data "
106 << "inconsistent with size of pnd_field.";
107 throw runtime_error(os.str());
108 }
109 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
110 // checks the same as the runtime_error above. Still, we keep it to be on the
111 // save side.
112 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
113
114 // Check that we don't have scat_data_mono here. Only checking the first
115 // scat element, assuming the other elements have been processed in the same
116 // manner. That's save against having mono data, but not against having
117 // individual elements produced with only a single frequency. This, however,
118 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
119 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
120 // (partly) produced from scat_data_singleTmatrix.
121 if (scat_data[0][0].f_grid.nelem() < 2) {
122 ostringstream os;
123 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
124 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
125 << "is expected here.";
126 throw runtime_error(os.str());
127 }
128
129 const Index N_ss = scat_data.nelem();
130
131 // Phase matrix in laboratory coordinate system. Dimensions:
132 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
133 Tensor5 pha_mat_data_int;
134
135 Index i_se_flat = 0;
136 // Loop over scattering species
137 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
138 const Index N_se = scat_data[i_ss].nelem();
139
140 // Loop over the included scattering elements
141 for (Index i_se = 0; i_se < N_se; i_se++) {
142 // If the particle number density at a specific point in the
143 // atmosphere for the i_se scattering element is zero, we don't need
144 // to do the transfromation!
145 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
146 PND_LIMIT) {
147 // First we have to transform the data from the coordinate system
148 // used in the database (depending on the kind of ptype) to the
149 // laboratory coordinate system.
150
151 // Frequency and temperature interpolation:
152
153 // Container for data at one frequency and one temperature.
154 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
155 PHA_MAT_DATA.nbooks(),
156 PHA_MAT_DATA.npages(),
157 PHA_MAT_DATA.nrows(),
158 PHA_MAT_DATA.ncols());
159
160 // Gridpositions:
161 GridPos freq_gp;
162 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
163 GridPos t_gp;
164 Vector itw;
165
166 Index ti = -1;
167
168 if (PHA_MAT_DATA.nvitrines() == 1) // just 1 T_grid element
169 {
170 ti = 0;
171 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
172 // pick one temperature'
173 {
174 if (rtp_temperature > -10.) // lowest T-point
175 {
176 ti = 0;
177 } else if (rtp_temperature > -20.) // highest T-point
178 {
179 ti = T_DATAGRID.nelem() - 1;
180 } else // median T-point
181 {
182 ti = T_DATAGRID.nelem() / 2;
183 }
184 }
185
186 if (ti < 0) // temperature interpolation
187 {
188 ostringstream os;
189 os << "In pha_mat_sptFromData.\n"
190 << "The temperature grid of the scattering data does not\n"
191 << "cover the atmospheric temperature at cloud location.\n"
192 << "The data should include the value T = " << rtp_temperature
193 << " K.";
194 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
195
196 gridpos(t_gp, T_DATAGRID, rtp_temperature);
197
198 // Interpolation weights:
199 itw.resize(4);
200 interpweights(itw, freq_gp, t_gp);
201
202 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
203 i_za_sca++)
204 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
205 i_aa_sca++)
206 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
207 i_za_inc++)
208 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
209 i_aa_inc++)
210 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
211 // Interpolation of phase matrix:
212 pha_mat_data_int(
213 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
214 interp(itw,
216 joker,
217 i_za_sca,
218 i_aa_sca,
219 i_za_inc,
220 i_aa_inc,
221 i),
222 freq_gp,
223 t_gp);
224 } else {
225 // Interpolation weights:
226 itw.resize(2);
227 interpweights(itw, freq_gp);
228 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
229 i_za_sca++)
230 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
231 i_aa_sca++)
232 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
233 i_za_inc++)
234 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
235 i_aa_inc++)
236 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
237 // Interpolation of phase matrix:
238 pha_mat_data_int(
239 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
240 interp(itw,
242 ti,
243 i_za_sca,
244 i_aa_sca,
245 i_za_inc,
246 i_aa_inc,
247 i),
248 freq_gp);
249 }
250
251 // Do the transformation into the laboratory coordinate system.
252 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
253 za_inc_idx++) {
254 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
255 aa_inc_idx++) {
257 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
258 pha_mat_data_int,
261 PART_TYPE,
262 za_index,
263 aa_index,
264 za_inc_idx,
265 aa_inc_idx,
266 za_grid,
267 aa_grid,
268 verbosity);
269 }
270 }
271 }
272 i_se_flat++;
273 }
274 }
275}
276
277/* Workspace method: Doxygen documentation will be auto-generated */
279 Tensor5& pha_mat_spt,
280 // Input:
281 const ArrayOfTensor7& pha_mat_sptDOITOpt,
282 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
283 const Index& doit_za_grid_size,
284 const Vector& aa_grid,
285 const Index& za_index, // propagation directions
286 const Index& aa_index,
287 const Numeric& rtp_temperature,
288 const Tensor4& pnd_field,
289 const Index& scat_p_index,
290 const Index& scat_lat_index,
291 const Index& scat_lon_index,
292 const Verbosity&) {
293 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
294
295 if (N_se_total != pnd_field.nbooks()) {
296 ostringstream os;
297 os << "Total number of scattering elements in scat_data_mono "
298 << "inconsistent with size of pnd_field.";
299 throw runtime_error(os.str());
300 }
301 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
302 // checks the same as the runtime_error above. Still, we keep it to be on the
303 // save side.
304 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
305
306 // atmosphere_dim = 3
307 if (pnd_field.ncols() > 1) {
308 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == N_se_total);
309 // Assuming that if the size is o.k. for one scattering element, it will
310 // also be o.k. for the other scattering elements.
311 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
312 scat_data_mono[0][0].T_grid.nelem());
313 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
314 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == aa_grid.nelem());
315 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
316 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
317 }
318
319 // atmosphere_dim = 1, only zenith angle required for scattered directions.
320 else if (pnd_field.ncols() == 1) {
321 //ARTS_ASSERT(is_size(scat_theta, doit_za_grid_size, 1,
322 // doit_za_grid_size, aa_grid.nelem()));
323
324 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == TotalNumberOfElements(scat_data_mono));
325 // Assuming that if the size is o.k. for one scattering element, it will
326 // also be o.k. for the other scattering elements.
327 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
328 scat_data_mono[0][0].T_grid.nelem());
329 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
330 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == 1);
331 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
332 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
333 }
334
335 ARTS_ASSERT(doit_za_grid_size > 0);
336
337 // Check that we do indeed have scat_data_mono here. Only checking the first
338 // scat element, assuming the other elements have been processed in the same
339 // manner. That's save against having scat_data here if that originated from
340 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
341 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
342 // produced from scat_data_singleTmatrix. That would be too costly here,
343 // though.
344 // Also, we can't check here whether data is at the correct frequency since we
345 // don't know f_grid and f_index here (we could pass it in, though).
346 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
347 ostringstream os;
348 os << "Scattering data seems to be *scat_data* (several freq points),\n"
349 << "but *scat_data_mono* (1 freq point only) is expected here.";
350 throw runtime_error(os.str());
351 }
352
353 // Create equidistant zenith angle grid
354 Vector za_grid;
355 nlinspace(za_grid, 0, 180, doit_za_grid_size);
356
357 const Index N_ss = scat_data_mono.nelem();
358 const Index stokes_dim = pha_mat_spt.ncols();
359
360 if (stokes_dim > 4 || stokes_dim < 1) {
361 throw runtime_error(
362 "The dimension of the stokes vector \n"
363 "must be 1,2,3 or 4");
364 }
365
366 GridPos T_gp;
367 Vector itw(2);
368
369 // Initialisation
370 pha_mat_spt = 0.;
371
372 Index i_se_flat = 0;
373
374 // Do the transformation into the laboratory coordinate system.
375 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
376 const Index N_se = scat_data_mono[i_ss].nelem();
377
378 for (Index i_se = 0; i_se < N_se; i_se++) {
379 // If the particle number density at a specific point in the
380 // atmosphere for the i_se scattering element is zero, we don't need
381 // to do the transformation!
382 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
383 PND_LIMIT) //TRS
384 {
385 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
386 Index ti = -1;
387
388 if (nT == 1) // just 1 T_grid element
389 {
390 ti = 0;
391 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
392 // pick one temperature'
393 {
394 if (rtp_temperature > -10.) // lowest T-point
395 {
396 ti = 0;
397 } else if (rtp_temperature > -20.) // highest T-point
398 {
399 ti = nT - 1;
400 } else // median T-point
401 {
402 ti = nT / 2;
403 }
404 } else {
405 ostringstream os;
406 os << "In pha_mat_sptFromDataDOITOpt.\n"
407 << "The temperature grid of the scattering data does not\n"
408 << "cover the atmospheric temperature at cloud location.\n"
409 << "The data should include the value T = " << rtp_temperature
410 << " K.";
412 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
413
414 // Gridpositions:
415 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
416 // Interpolation weights:
417 interpweights(itw, T_gp);
418 }
419
420 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
421 za_inc_idx++) {
422 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
423 aa_inc_idx++) {
424 if (ti < 0) // Temperature interpolation
425 {
426 for (Index i = 0; i < stokes_dim; i++) {
427 for (Index j = 0; j < stokes_dim; j++) {
428 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
429 interp(itw,
430 pha_mat_sptDOITOpt[i_se_flat](joker,
431 za_index,
432 aa_index,
433 za_inc_idx,
434 aa_inc_idx,
435 i,
436 j),
437 T_gp);
438 }
439 }
440 } else {
441 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker) =
442 pha_mat_sptDOITOpt[i_se_flat](ti,
443 za_index,
444 aa_index,
445 za_inc_idx,
446 aa_inc_idx,
447 joker,
448 joker);
449 }
450 }
451 }
452 } // TRS
453
454 i_se_flat++;
455 }
456 }
457}
458
459/* Workspace method: Doxygen documentation will be auto-generated */
460void opt_prop_sptFromData( // Output and Input:
461 ArrayOfPropagationMatrix& ext_mat_spt,
462 ArrayOfStokesVector& abs_vec_spt,
463 // Input:
464 const ArrayOfArrayOfSingleScatteringData& scat_data,
465 const Vector& za_grid,
466 const Vector& aa_grid,
467 const Index& za_index, // propagation directions
468 const Index& aa_index,
469 const Index& f_index,
470 const Vector& f_grid,
471 const Numeric& rtp_temperature,
472 const Tensor4& pnd_field,
473 const Index& scat_p_index,
474 const Index& scat_lat_index,
475 const Index& scat_lon_index,
476 const Verbosity& verbosity) {
477 const Index N_ss = scat_data.nelem();
478 const Numeric za_sca = za_grid[za_index];
479 const Numeric aa_sca = aa_grid[aa_index];
480
481 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);
482 if (N_ss) {
483 ARTS_ASSERT(ext_mat_spt[0].NumberOfFrequencies() == N_se_total);
484 ARTS_ASSERT(abs_vec_spt[0].NumberOfFrequencies() == N_se_total);
485 });
486
487 // Check that we don't have scat_data_mono here. Only checking the first
488 // scat element, assuming the other elements have been processed in the same
489 // manner. That's save against having mono data, but not against having
490 // individual elements produced with only a single frequency. This, however,
491 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
492 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
493 // (partly) produced from scat_data_singleTmatrix.
494 if (scat_data[0][0].f_grid.nelem() < 2) {
495 ostringstream os;
496 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
497 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
498 << "is expected here.";
499 throw runtime_error(os.str());
500 }
501
502 // Phase matrix in laboratory coordinate system. Dimensions:
503 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
504 Tensor3 ext_mat_data_int;
505 Tensor3 abs_vec_data_int;
506
507 // Initialisation
508 ext_mat_spt = 0.;
509 abs_vec_spt = 0.;
510
511 Index i_se_flat = 0;
512 // Loop over the included scattering species
513 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
514 const Index N_se = scat_data[i_ss].nelem();
515
516 // Loop over the included scattering elements
517 for (Index i_se = 0; i_se < N_se; i_se++) {
518 // If the particle number density at a specific point in the
519 // atmosphere for the i_se scattering element is zero, we don't need
520 // to do the transformation
521
522 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
523 PND_LIMIT) {
524 // First we have to transform the data from the coordinate system
525 // used in the database (depending on the kind of ptype) to the
526 // laboratory coordinate system.
527
528 // Frequency interpolation:
529
530 // The data is interpolated on one frequency.
531 //
532 // Resize the variables for the interpolated data:
533 //
534 ext_mat_data_int.resize(
535 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
536 //
537 abs_vec_data_int.resize(
538 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
539
540 // Gridpositions:
541 GridPos freq_gp;
542 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
543 GridPos t_gp;
544 Vector itw;
545
546 if (T_DATAGRID.nelem() > 1) {
547 ostringstream os;
548 os << "In opt_prop_sptFromData.\n"
549 << "The temperature grid of the scattering data does not\n"
550 << "cover the atmospheric temperature at cloud location.\n"
551 << "The data should include the value T = " << rtp_temperature
552 << " K.";
553 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
554
555 gridpos(t_gp, T_DATAGRID, rtp_temperature);
556
557 // Interpolation weights:
558 itw.resize(4);
559 interpweights(itw, freq_gp, t_gp);
560
561 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
562 i_za_sca++) {
563 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
564 i_aa_sca++) {
565 //
566 // Interpolation of extinction matrix:
567 //
568 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
569 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
570 interp(itw,
571 EXT_MAT_DATA(joker, joker, i_za_sca, i_aa_sca, i),
572 freq_gp,
573 t_gp);
574 }
575 }
576 }
577
578 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
579 i_za_sca++) {
580 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
581 i_aa_sca++) {
582 //
583 // Interpolation of absorption vector:
584 //
585 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
586 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
587 interp(itw,
588 ABS_VEC_DATA(joker, joker, i_za_sca, i_aa_sca, i),
589 freq_gp,
590 t_gp);
591 }
592 }
593 }
594 } else {
595 // Interpolation weights:
596 itw.resize(2);
597 interpweights(itw, freq_gp);
598
599 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
600 i_za_sca++) {
601 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
602 i_aa_sca++) {
603 //
604 // Interpolation of extinction matrix:
605 //
606 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
607 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
608 interp(itw,
609 EXT_MAT_DATA(joker, 0, i_za_sca, i_aa_sca, i),
610 freq_gp);
611 }
612 }
613 }
614
615 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
616 i_za_sca++) {
617 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
618 i_aa_sca++) {
619 //
620 // Interpolation of absorption vector:
621 //
622 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
623 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
624 interp(itw,
625 ABS_VEC_DATA(joker, 0, i_za_sca, i_aa_sca, i),
626 freq_gp);
627 }
628 }
629 }
630 }
631
632 //
633 // Do the transformation into the laboratory coordinate system.
634 //
635 // Extinction matrix:
636 //
637 ext_matTransform(ext_mat_spt[i_se_flat],
638 ext_mat_data_int,
641 PART_TYPE,
642 za_sca,
643 aa_sca,
644 verbosity);
645 //
646 // Absorption vector:
647 //
648 abs_vecTransform(abs_vec_spt[i_se_flat],
649 abs_vec_data_int,
652 PART_TYPE,
653 za_sca,
654 aa_sca,
655 verbosity);
656 }
657
658 i_se_flat++;
659 }
660 }
661}
662
663/* Workspace method: Doxygen documentation will be auto-generated */
664void opt_prop_sptFromScat_data( // Output and Input:
665 ArrayOfPropagationMatrix& ext_mat_spt,
666 ArrayOfStokesVector& abs_vec_spt,
667 // Input:
668 const ArrayOfArrayOfSingleScatteringData& scat_data,
669 const Index& scat_data_checked,
670 const Vector& za_grid,
671 const Vector& aa_grid,
672 const Index& za_index, // propagation directions
673 const Index& aa_index,
674 const Index& f_index,
675 const Numeric& rtp_temperature,
676 const Tensor4& pnd_field,
677 const Index& scat_p_index,
678 const Index& scat_lat_index,
679 const Index& scat_lon_index,
680 const Verbosity& verbosity) {
681 if (scat_data_checked != 1)
682 throw runtime_error(
683 "The scattering data must be flagged to have "
684 "passed a consistency check (scat_data_checked=1).");
685
686 const Index N_ss = scat_data.nelem();
687 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
688 const Numeric za_sca = za_grid[za_index];
689 const Numeric aa_sca = aa_grid[aa_index];
690
691 if (stokes_dim > 4 || stokes_dim < 1) {
692 throw runtime_error(
693 "The dimension of the stokes vector \n"
694 "must be 1,2,3 or 4");
695 }
696
697 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);)
698 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
699 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
700
701 // Phase matrix in laboratory coordinate system. Dimensions:
702 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
703 Tensor3 ext_mat_data_int;
704 Tensor3 abs_vec_data_int;
705
706 // Initialisation
707 for (auto& pm : ext_mat_spt) pm.SetZero();
708 for (auto& sv : abs_vec_spt) sv.SetZero();
709
710 Index this_f_index;
711
712 Index i_se_flat = 0;
713 // Loop over the included scattering species
714 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
715 const Index N_se = scat_data[i_ss].nelem();
716
717 // Loop over the included scattering elements
718 for (Index i_se = 0; i_se < N_se; i_se++) {
719 // If the particle number density at a specific point in the
720 // atmosphere for the i_se scattering element is zero, we don't need
721 // to do the transformation
722
723 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
724 PND_LIMIT) {
725 // First we have to transform the data from the coordinate system
726 // used in the database (depending on the kind of ptype) to the
727 // laboratory coordinate system.
728
729 // Resize the variables for the interpolated data (1freq, 1T):
730 ext_mat_data_int.resize(
731 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
732 abs_vec_data_int.resize(
733 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
734
735 // Gridpositions and interpolation weights;
736 GridPos t_gp;
737 Vector itw;
738 if (EXT_MAT_DATA.nbooks() > 1 || ABS_VEC_DATA.nbooks() > 1) {
739 ostringstream os;
740 os << "In opt_prop_sptFromScat_data.\n"
741 << "The temperature grid of the scattering data does not\n"
742 << "cover the atmospheric temperature at cloud location.\n"
743 << "The data should include the value T = " << rtp_temperature
744 << " K.";
745 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
746
747 gridpos(t_gp, T_DATAGRID, rtp_temperature);
748
749 // Interpolation weights:
750 itw.resize(2);
751 interpweights(itw, t_gp);
752 }
753
754 // Frequency extraction and temperature interpolation
755
756 if (EXT_MAT_DATA.nshelves() == 1)
757 this_f_index = 0;
758 else
759 this_f_index = f_index;
760
761 if (EXT_MAT_DATA.nbooks() > 1) {
762 // Interpolation of extinction matrix:
763 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
764 i_za_sca++) {
765 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
766 i_aa_sca++) {
767 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
768 ext_mat_data_int(i_za_sca, i_aa_sca, i) = interp(
769 itw,
770 EXT_MAT_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
771 t_gp);
772 }
773 }
774 }
775 } else {
776 ext_mat_data_int = EXT_MAT_DATA(this_f_index, 0, joker, joker, joker);
777 /*
778 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
779 i_za_sca++)
780 {
781 for(Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
782 i_aa_sca++)
783 {
784 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++)
785 {
786 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
787 EXT_MAT_DATA(this_f_index, 0,
788 i_za_sca, i_aa_sca, i);
789 }
790 }
791 } */
792 }
793
794 if (ABS_VEC_DATA.nshelves() == 1)
795 this_f_index = 0;
796 else
797 this_f_index = f_index;
798
799 if (ABS_VEC_DATA.nbooks() > 1) {
800 // Interpolation of absorption vector:
801 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
802 i_za_sca++) {
803 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
804 i_aa_sca++) {
805 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
806 abs_vec_data_int(i_za_sca, i_aa_sca, i) = interp(
807 itw,
808 ABS_VEC_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
809 t_gp);
810 }
811 }
812 }
813 } else {
814 abs_vec_data_int = ABS_VEC_DATA(this_f_index, 0, joker, joker, joker);
815 /*
816 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
817 i_za_sca++)
818 {
819 for(Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
820 i_aa_sca++)
821 {
822 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++)
823 {
824 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
825 ABS_VEC_DATA(this_f_index, 0,
826 i_za_sca, i_aa_sca, i);
827 }
828 }
829 } */
830 }
831
832 //
833 // Do the transformation into the laboratory coordinate system.
834 //
835 // Extinction matrix:
836 ext_matTransform(ext_mat_spt[i_se_flat],
837 ext_mat_data_int,
840 PART_TYPE,
841 za_sca,
842 aa_sca,
843 verbosity);
844 // Absorption vector:
845 abs_vecTransform(abs_vec_spt[i_se_flat],
846 abs_vec_data_int,
849 PART_TYPE,
850 za_sca,
851 aa_sca,
852 verbosity);
853 }
854 i_se_flat++;
855 }
856 }
857}
858
859/* Workspace method: Doxygen documentation will be auto-generated */
860void opt_prop_bulkCalc( // Output and Input:
861 PropagationMatrix& ext_mat,
862 StokesVector& abs_vec,
863 // Input:
864 const ArrayOfPropagationMatrix& ext_mat_spt,
865 const ArrayOfStokesVector& abs_vec_spt,
866 const Tensor4& pnd_field,
867 const Index& scat_p_index,
868 const Index& scat_lat_index,
869 const Index& scat_lon_index,
870 const Verbosity&) {
871 Index N_se = abs_vec_spt.nelem();
872 //ARTS_ASSERT( ext_mat_spt.npages()==N_se )
873 if (ext_mat_spt.nelem() not_eq N_se) {
874 ostringstream os;
875 os << "Number of scattering elements in *abs_vec_spt* and *ext_mat_spt*\n"
876 << "does not agree.";
877 throw runtime_error(os.str());
878 }
879
880 Index stokes_dim = abs_vec_spt[0].StokesDimensions();
881 //ARTS_ASSERT( ext_mat_spt.ncols()==stokes_dim && ext_mat_spt.nrows()==stokes_dim )
882 if (ext_mat_spt[0].StokesDimensions() not_eq stokes_dim) {
883 ostringstream os;
884 os << "*stokes_dim* of *abs_vec_spt* and *ext_mat_spt* does not agree.";
885 throw runtime_error(os.str());
886 }
887 if (stokes_dim > 4 || stokes_dim < 1) {
888 ostringstream os;
889 os << "The dimension of stokes vector can only be 1, 2, 3, or 4.";
890 throw runtime_error(os.str());
891 }
892
893 ext_mat = PropagationMatrix(1, stokes_dim);
894 ext_mat.SetZero(); // Initialize to zero!
895 abs_vec = StokesVector(1, stokes_dim);
896 abs_vec.SetZero(); // Initialize to zero!
897
898 PropagationMatrix ext_mat_part(1, stokes_dim);
899 ext_mat_part.SetZero();
900 StokesVector abs_vec_part(1, stokes_dim);
901 abs_vec_part.SetZero();
902
903 // this is the loop over the different scattering elements
904 for (Index l = 0; l < N_se; l++) {
905 abs_vec_part.MultiplyAndAdd(
906 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
907 abs_vec_spt[l]);
908 ext_mat_part.MultiplyAndAdd(
909 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
910 ext_mat_spt[l]);
911 }
912
913 //Add absorption due single scattering element.
914 abs_vec += abs_vec_part;
915 //Add extinction matrix due single scattering element to *ext_mat*.
916 ext_mat += ext_mat_part;
917}
918
919/* Workspace method: Doxygen documentation will be auto-generated */
921 const PropagationMatrix& propmat_clearsky,
922 const Verbosity&) {
923 // Number of Stokes parameters:
924 const Index stokes_dim = ext_mat.StokesDimensions();
925
926 // The second dimension of ext_mat must also match the number of
927 // Stokes parameters:
928 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
929 "Col dimension of propmat_clearsky "
930 "inconsistent with col dimension in ext_mat.");
931
932 // Number of frequencies:
933 const Index f_dim = ext_mat.NumberOfFrequencies();
934
935 // This must be consistent with the second dimension of
936 // propmat_clearsky. Check this:
937 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
938 "Frequency dimension of ext_mat and propmat_clearsky\n"
939 "are inconsistent in ext_matAddGas.");
940
941 ext_mat += propmat_clearsky;
942}
943
944/* Workspace method: Doxygen documentation will be auto-generated */
946 const PropagationMatrix& propmat_clearsky,
947 const Verbosity&) {
948 // Number of frequencies:
949 const Index f_dim = abs_vec.NumberOfFrequencies();
950 const Index stokes_dim = abs_vec.StokesDimensions();
951
952 // This must be consistent with the second dimension of
953 // propmat_clearsky. Check this:
954 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
955 "Frequency dimension of abs_vec and propmat_clearsky\n"
956 "are inconsistent in abs_vecAddGas.");
957 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
958 "Stokes dimension of abs_vec and propmat_clearsky\n"
959 "are inconsistent in abs_vecAddGas.");
960
961 // Loop all frequencies. Of course this includes the special case
962 // that there is only one frequency.
963 abs_vec += propmat_clearsky; // Defined to only add to the
964
965 // We don't have to do anything about higher elements of abs_vec,
966 // since scalar gas absorption only influences the first element.
967}
968
969/* Workspace method: Doxygen documentation will be auto-generated */
970/*
971void ext_matAddGasZeeman( Tensor3& ext_mat,
972 const Tensor3& ext_mat_zee,
973 const Verbosity&)
974{
975 // Number of Stokes parameters:
976 const Index stokes_dim = ext_mat.ncols();
977
978 // The second dimension of ext_mat must also match the number of
979 // Stokes parameters:
980 if ( stokes_dim != ext_mat.nrows() )
981 throw runtime_error("Row dimension of ext_mat inconsistent with "
982 "column dimension.");
983
984 for ( Index i=0; i<stokes_dim; ++i )
985 {
986 for ( Index j=0; j<stokes_dim; ++j )
987 {
988 // Add the zeeman extinction to extinction matrix.
989 ext_mat(joker,i,j) += ext_mat_zee(joker, i, j);
990 }
991
992 }
993}
994*/
995
996/* Workspace method: Doxygen documentation will be auto-generated */
997/*
998void abs_vecAddGasZeeman( Matrix& abs_vec,
999 const Matrix& abs_vec_zee,
1000 const Verbosity&)
1001{
1002 // Number of Stokes parameters:
1003 const Index stokes_dim = abs_vec_zee.ncols();
1004 // that there is only one frequency.
1005 for ( Index j=0; j<stokes_dim; ++j )
1006 {
1007 abs_vec(joker,j) += abs_vec_zee(joker,j);
1008 }
1009}
1010*/
1011
1012/* Workspace method: Doxygen documentation will be auto-generated */
1013void pha_matCalc(Tensor4& pha_mat,
1014 const Tensor5& pha_mat_spt,
1015 const Tensor4& pnd_field,
1016 const Index& atmosphere_dim,
1017 const Index& scat_p_index,
1018 const Index& scat_lat_index,
1019 const Index& scat_lon_index,
1020 const Verbosity&) {
1021 Index N_se = pha_mat_spt.nshelves();
1022 Index Nza = pha_mat_spt.nbooks();
1023 Index Naa = pha_mat_spt.npages();
1024 Index stokes_dim = pha_mat_spt.nrows();
1025
1026 pha_mat.resize(Nza, Naa, stokes_dim, stokes_dim);
1027
1028 // Initialisation
1029 pha_mat = 0.0;
1030
1031 Index ilat = 0;
1032 Index ilon = 0;
1033 if (atmosphere_dim > 1) ilat = scat_lat_index;
1034 if (atmosphere_dim > 2) ilon = scat_lon_index;
1035
1036 if (atmosphere_dim == 1) {
1037 // For 1d atmospheres, we additinally integrate the phase matrix over the
1038 // azimuth, because there is no azimuth dependency of the incoming
1039 // field.
1040
1041 Numeric grid_step_size_azimuth = 360. / (Numeric)(Naa - 1) * DEG2RAD;
1042
1043 // this is a loop over the different scattering elements
1044 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1045 // these are loops over zenith angle and azimuth angle
1046 for (Index za_index = 0; za_index < Nza; ++za_index)
1047 for (Index aa_index = 0; aa_index < Naa - 1; ++aa_index)
1048 // now the last two loops over the stokes dimension.
1049 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1050 ++stokes_index_1)
1051 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1052 ++stokes_index_2)
1053 //summation of the product of pnd_field and
1054 //pha_mat_spt.
1055 pha_mat(za_index, 0, stokes_index_1, stokes_index_2) +=
1056 ((pha_mat_spt(pt_index,
1057 za_index,
1058 aa_index,
1059 stokes_index_1,
1060 stokes_index_2) +
1061 pha_mat_spt(pt_index,
1062 za_index,
1063 aa_index + 1,
1064 stokes_index_1,
1065 stokes_index_2)) /
1066 2 * grid_step_size_azimuth *
1067 pnd_field(pt_index, scat_p_index, ilat, ilon));
1068 } else {
1069 // this is a loop over the different scattering elements
1070 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1071 // these are loops over zenith angle and azimuth angle
1072 for (Index za_index = 0; za_index < Nza; ++za_index)
1073 for (Index aa_index = 0; aa_index < Naa; ++aa_index)
1074 // now the last two loops over the stokes dimension.
1075 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1076 ++stokes_index_1)
1077 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1078 ++stokes_index_2)
1079 //summation of the product of pnd_field and
1080 //pha_mat_spt.
1081 pha_mat(za_index, aa_index, stokes_index_1, stokes_index_2) +=
1082 (pha_mat_spt(pt_index,
1083 za_index,
1084 aa_index,
1085 stokes_index_1,
1086 stokes_index_2) *
1087 pnd_field(pt_index, scat_p_index, ilat, ilon));
1088 }
1089}
1090
1091/* Workspace method: Doxygen documentation will be auto-generated */
1092void scat_dataCheck( //Input:
1093 const ArrayOfArrayOfSingleScatteringData& scat_data,
1094 const String& check_type,
1095 const Numeric& threshold,
1096 const Verbosity& verbosity) {
1100 //CREATE_OUT3;
1101
1102 // FIXME:
1103 // so far, this works for both scat_data and scat_data_raw. Needs to be
1104 // adapted, though, once we have WSM that can create Z/K/a with different
1105 // f/T dimensions than scat_data_single.f/T_grid.
1106
1107 const Index N_ss = scat_data.nelem();
1108
1109 // 1) any negative values in Z11, K11, or a1? K11>=a1?
1110 // 2) scat_data containing any NaN?
1111 // 3) sca_mat norm sufficiently good (int(Z11)~=K11-a1?)
1112
1113 // Loop over the included scattering species
1114 out2 << " checking for negative values in Z11, K11, and a1, and for K11<a1\n";
1115 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1116 const Index N_se = scat_data[i_ss].nelem();
1117
1118 // Loop over the included scattering elements
1119 for (Index i_se = 0; i_se < N_se; i_se++) {
1120 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1121 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1122 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1123 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1124 if (EXT_MAT_DATA(f, t, zai, aai, 0) < 0 ||
1125 ABS_VEC_DATA(f, t, zai, aai, 0) < 0) {
1126 ostringstream os;
1127 os << "Scatt. species #" << i_ss << " element #" << i_se
1128 << " contains negative K11 or a1 at f#" << f << ", T#" << t
1129 << ", za#" << zai << ", aa#" << aai << "\n";
1130 throw runtime_error(os.str());
1131 }
1132 if (EXT_MAT_DATA(f, t, zai, aai, 0) <
1133 ABS_VEC_DATA(f, t, zai, aai, 0)) {
1134 ostringstream os;
1135 os << "Scatt. species #" << i_ss << " element #" << i_se
1136 << " has K11<a1 at f#" << f << ", T#" << t << ", za#" << zai
1137 << ", aa#" << aai << "\n";
1138 throw runtime_error(os.str());
1139 }
1140 }
1141 // Since allowing pha_mat to have a single T entry only (while
1142 // T_grid, ext_mat, abs_vec have more), we need a separate T loop
1143 // for pha_mat
1144 Index nTpha = PHA_MAT_DATA.nvitrines();
1145 for (Index t = 0; t < nTpha; t++) {
1146 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1147 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1148 if (PHA_MAT_DATA(f, t, zas, aas, zai, aai, 0) < 0) {
1149 ostringstream os;
1150 os << "Scatt. species #" << i_ss << " element #" << i_se
1151 << " contains negative Z11 at f#" << f << ", T#" << t
1152 << " (of " << nTpha << "), za_sca#" << zas << ", aa_sca#"
1153 << aas << ", za_inc#" << zai << ", aa_inc#" << aai
1154 << "\n";
1155 throw runtime_error(os.str());
1156 }
1157 }
1158 }
1159 }
1160 }
1161 }
1162
1163 // Loop over the included scattering species
1164 out2 << " checking for NaN anywhere in Z, K, and a\n";
1165 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1166 const Index N_se = scat_data[i_ss].nelem();
1167
1168 // Loop over the included scattering elements
1169 for (Index i_se = 0; i_se < N_se; i_se++) {
1170 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1171 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1172 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1173 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1174 for (Index st = 0; st < ABS_VEC_DATA.ncols(); st++)
1175 if (std::isnan(ABS_VEC_DATA(f, t, zai, aai, st))) {
1176 ostringstream os;
1177 os << "Scatt. species #" << i_ss << " element #" << i_se
1178 << " contains NaN in abs_vec at f#" << f << ", T#" << t
1179 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1180 << "\n";
1181 throw runtime_error(os.str());
1182 }
1183 for (Index st = 0; st < EXT_MAT_DATA.ncols(); st++)
1184 if (std::isnan(EXT_MAT_DATA(f, t, zai, aai, st))) {
1185 ostringstream os;
1186 os << "Scatt. species #" << i_ss << " element #" << i_se
1187 << " contains NaN in ext_mat at f#" << f << ", T#" << t
1188 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1189 << "\n";
1190 throw runtime_error(os.str());
1191 }
1192 }
1193 Index nTpha = PHA_MAT_DATA.nvitrines();
1194 for (Index t = 0; t < nTpha; t++) {
1195 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1196 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1197 for (Index st = 0; st < PHA_MAT_DATA.ncols(); st++)
1198 if (std::isnan(
1199 PHA_MAT_DATA(f, t, zas, aas, zai, aai, st))) {
1200 ostringstream os;
1201 os << "Scatt. species #" << i_ss << " element #" << i_se
1202 << " contains NaN in pha_mat at f#" << f << ", T#" << t
1203 << " (of " << nTpha << "), za_sca#" << zas
1204 << ", aa_sca#" << aas << ", za_inc#" << zai
1205 << ", aa_inc#" << aai << ", stokes #"
1206 << "\n";
1207 throw runtime_error(os.str());
1208 }
1209 }
1210 }
1211 }
1212 }
1213 }
1214
1215 if (check_type.toupper() == "ALL") {
1216 // Loop over the included scattering species
1217 out2 << " checking normalization of scattering matrix\n";
1218 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1219 const Index N_se = scat_data[i_ss].nelem();
1220
1221 // Loop over the included scattering elements
1222 for (Index i_se = 0; i_se < N_se; i_se++)
1223 if (T_DATAGRID.nelem() == PHA_MAT_DATA.nvitrines()) switch (PART_TYPE) {
1224 case PTYPE_TOTAL_RND: {
1225 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1226 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1228 PHA_MAT_DATA(f, t, joker, 0, 0, 0, 0), ZA_DATAGRID);
1229 Numeric Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1230 //Numeric Cabs = Cext_data - Csca;
1231 Numeric Cabs_data = ABS_VEC_DATA(f, t, 0, 0, 0);
1232 Numeric Csca_data = Cext_data - Cabs_data;
1233
1234 /*
1235 out3 << " Coefficients in database: "
1236 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1237 << " Csca: " << Csca_data << "\n"
1238 << " Calculated coefficients: "
1239 << "Cabs calc: " << Cabs
1240 << " Csca calc: " << Csca << "\n"
1241 << " Deviations "
1242 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1243 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1244 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1245 */
1246
1247 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1248 // below equivalent to the above
1249 // (it's actually the (absolute) albedo deviation!)
1250 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1251 ostringstream os;
1252 os << " Deviations in scat_data too large:\n"
1253 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1254 << " at nominal (actual) albedo of "
1255 << Csca_data / Cext_data << " (" << Csca / Cext_data
1256 << ").\n"
1257 << " Check entry for scattering element " << i_se
1258 << " of scattering species " << i_ss << " at " << f
1259 << ".frequency and " << t << ".temperature!\n";
1260 throw runtime_error(os.str());
1261 }
1262 }
1263 }
1264 break;
1265 }
1266
1267 case PTYPE_AZIMUTH_RND: {
1268 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1269 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1270 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1271 Numeric Csca =
1273 PHA_MAT_DATA(f, t, joker, joker, iza, 0, 0),
1275 AA_DATAGRID);
1276 Numeric Cext_data = EXT_MAT_DATA(f, t, iza, 0, 0);
1277 //Numeric Cabs = Cext_data - Csca;
1278 Numeric Cabs_data = ABS_VEC_DATA(f, t, iza, 0, 0);
1279 Numeric Csca_data = Cext_data - Cabs_data;
1280
1281 /*
1282 out3 << " Coefficients in database: "
1283 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1284 << " Csca: " << Csca_data << "\n"
1285 << " Calculated coefficients: "
1286 << "Cabs calc: " << Cabs
1287 << " Csca calc: " << Csca << "\n"
1288 << " Deviations "
1289 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1290 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1291 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1292 */
1293
1294 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1295 // below equivalent to the above
1296 // (it's actually the (absolute) albedo deviation!)
1297 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1298 ostringstream os;
1299 os << " Deviations in scat_data too large:\n"
1300 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1301 << " at nominal (actual) albedo of "
1302 << Csca_data / Cext_data << " (" << Csca / Cext_data
1303 << ").\n"
1304 << " Check entry for scattering element " << i_se
1305 << " of scattering species " << i_ss << " at " << f
1306 << ". frequency, " << t << ". temperature, and " << iza
1307 << ". incident polar angle!\n";
1308 throw runtime_error(os.str());
1309 }
1310 }
1311 }
1312 }
1313 break;
1314 }
1315
1316 default: {
1317 out0
1318 << " WARNING:\n"
1319 << " scat_data consistency check not implemented (yet?!) for\n"
1320 << " ptype " << PART_TYPE << "!\n";
1321 }
1322 }
1323 else
1324 out2 << " WARNING:\n"
1325 << " scat_data norm check can not be performed for pha_mat-only"
1326 << " T-reduced scattering elements\n"
1327 << " as found in scatt element #" << i_se
1328 << " of scatt species #" << i_ss << "!\n";
1329 }
1330 } else if (check_type.toupper() == "SANE") {
1331 out1 << " WARNING:\n"
1332 << " Normalization check on pha_mat switched off.\n"
1333 << " Scattering solution might be wrong.\n";
1334 } else {
1335 ostringstream os;
1336 os << "Invalid value for argument *check_type*: '" << check_type << "'.\n";
1337 os << "Valid values are 'all' or 'none'.";
1338 throw runtime_error(os.str());
1339 }
1340}
1341
1342/* Workspace method: Doxygen documentation will be auto-generated */
1344 Workspace& ws, //Output:
1345 ArrayOfTensor7& pha_mat_sptDOITOpt,
1346 ArrayOfArrayOfSingleScatteringData& scat_data_mono,
1347 Tensor7& pha_mat_doit,
1348 //Output and Input:
1349 Vector& aa_grid,
1350 //Input:
1351 const Index& doit_za_grid_size,
1352 const ArrayOfArrayOfSingleScatteringData& scat_data,
1353 const Index& scat_data_checked,
1354 const Index& f_index,
1355 const Index& atmosphere_dim,
1356 const Index& stokes_dim,
1357 const Tensor3& t_field,
1358 const ArrayOfIndex& cloudbox_limits,
1359 const Tensor4& pnd_field,
1360 const Agenda& pha_mat_spt_agenda,
1361 const Verbosity& verbosity) {
1362 if (scat_data_checked != 1)
1363 throw runtime_error(
1364 "The scattering data must be flagged to have "
1365 "passed a consistency check (scat_data_checked=1).");
1366
1367 // Number of azimuth angles.
1368 const Index Naa = aa_grid.nelem();
1369 Vector grid_stepsize(2);
1370 grid_stepsize[0] = 180. / (Numeric)(doit_za_grid_size - 1);
1371 //grid_stepsize[1] = 360./(Numeric)(Naa - 1);
1372
1373 // Initialize variable *pha_mat_spt*
1374 Tensor5 pha_mat_spt_local(pnd_field.nbooks(),
1375 doit_za_grid_size,
1376 aa_grid.nelem(),
1377 stokes_dim,
1378 stokes_dim,
1379 0.);
1380 Tensor4 pha_mat_local(doit_za_grid_size, Naa, stokes_dim, stokes_dim, 0.);
1381 Tensor6 pha_mat_local_out(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1382 doit_za_grid_size,
1383 doit_za_grid_size,
1384 Naa,
1385 stokes_dim,
1386 stokes_dim,
1387 0.);
1388
1389 // Interpolate all the data in frequency
1390 scat_data_monoExtract(scat_data_mono, scat_data, f_index, verbosity);
1391
1392 // For 1D calculation the scat_aa dimension is not required:
1393 Index N_aa_sca;
1394 if (atmosphere_dim == 1)
1395 N_aa_sca = 1;
1396 else
1397 N_aa_sca = aa_grid.nelem();
1398
1399 Vector za_grid;
1400 nlinspace(za_grid, 0, 180, doit_za_grid_size);
1401
1402 ARTS_ASSERT(scat_data.nelem() == scat_data_mono.nelem());
1403
1404 const Index N_ss = scat_data.nelem();
1405 // FIXME: We need this still as a workspace variable because pha_mat_spt_agenda
1406 // contains a WS method that requires it as input
1407 pha_mat_sptDOITOpt.resize(TotalNumberOfElements(scat_data));
1408
1409 Index i_se_flat = 0;
1410 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1411 const Index N_se = scat_data[i_ss].nelem();
1412
1413 for (Index i_se = 0; i_se < N_se; i_se++) {
1414 Index N_T = scat_data_mono[i_ss][i_se].T_grid.nelem();
1415 pha_mat_sptDOITOpt[i_se_flat].resize(N_T,
1416 doit_za_grid_size,
1417 N_aa_sca,
1418 doit_za_grid_size,
1419 aa_grid.nelem(),
1420 stokes_dim,
1421 stokes_dim);
1422
1423 // Initialize:
1424 pha_mat_sptDOITOpt[i_se_flat] = 0.;
1425
1426 // Calculate all scattering angles for all combinations of incoming
1427 // and scattered directions and interpolation.
1428 for (Index t_idx = 0; t_idx < N_T; t_idx++) {
1429 // These are the scattered directions as called in *scat_field_calc*
1430 for (Index za_sca_idx = 0; za_sca_idx < doit_za_grid_size;
1431 za_sca_idx++) {
1432 for (Index aa_sca_idx = 0; aa_sca_idx < N_aa_sca; aa_sca_idx++) {
1433 // Integration is performed over all incoming directions
1434 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
1435 za_inc_idx++) {
1436 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
1437 aa_inc_idx++) {
1439 pha_mat_sptDOITOpt[i_se_flat](t_idx,
1440 za_sca_idx,
1441 aa_sca_idx,
1442 za_inc_idx,
1443 aa_inc_idx,
1444 joker,
1445 joker),
1446 scat_data_mono[i_ss][i_se].pha_mat_data(
1447 0, t_idx, joker, joker, joker, joker, joker),
1448 scat_data_mono[i_ss][i_se].za_grid,
1449 scat_data_mono[i_ss][i_se].aa_grid,
1450 scat_data_mono[i_ss][i_se].ptype,
1451 za_sca_idx,
1452 aa_sca_idx,
1453 za_inc_idx,
1454 aa_inc_idx,
1455 za_grid,
1456 aa_grid,
1457 verbosity);
1458 }
1459 }
1460 }
1461 }
1462 }
1463
1464 i_se_flat++;
1465 }
1466 }
1467 // Interpolate phase matrix to current grid
1468 pha_mat_doit.resize(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1469 doit_za_grid_size,
1470 N_aa_sca,
1471 doit_za_grid_size,
1472 Naa,
1473 stokes_dim,
1474 stokes_dim);
1475 pha_mat_doit = 0;
1476
1477 if (atmosphere_dim == 1) {
1478 Index aa_index_local = 0;
1479
1480 // Get pha_mat at the grid positions
1481 // Since atmosphere_dim = 1, there is no loop over lat and lon grids
1482 for (Index p_index = 0; p_index <= cloudbox_limits[1] - cloudbox_limits[0];
1483 p_index++) {
1484 Numeric rtp_temperature_local =
1485 t_field(p_index + cloudbox_limits[0], 0, 0);
1486 //There is only loop over zenith angle grid ; no azimuth angle grid.
1487 for (Index za_index_local = 0;
1488 za_index_local < doit_za_grid_size;
1489 za_index_local++) {
1490 // Dummy index
1491 Index index_zero = 0;
1492
1493 // Calculate the phase matrix of individual scattering elements
1495 pha_mat_spt_local,
1496 za_index_local,
1497 index_zero,
1498 index_zero,
1499 p_index,
1500 aa_index_local,
1501 rtp_temperature_local,
1502 pha_mat_spt_agenda);
1503
1504 // Sum over all scattering elements
1505 pha_matCalc(pha_mat_local,
1506 pha_mat_spt_local,
1507 pnd_field,
1508 atmosphere_dim,
1509 p_index,
1510 0,
1511 0,
1512 verbosity);
1513 pha_mat_doit(
1514 p_index, za_index_local, 0, joker, joker, joker, joker) =
1515 pha_mat_local;
1516 }
1517 }
1518
1519 // Set incoming azimuth grid scattering to zero, because there is no
1520 // no azimuth dependcy for 1d atmospheres
1521 aa_grid.resize(1);
1522 aa_grid = 0;
1523 }
1524}
1525
1526/* Workspace method: Doxygen documentation will be auto-generated */
1528 const ArrayOfArrayOfSingleScatteringData& scat_data_raw,
1529 const Vector& f_grid,
1530 const Index& interp_order,
1531 const Verbosity&)
1532// FIXME: when we allow K, a, Z to be on different f and T grids, their use in
1533// the scatt solvers needs to be reviewed again and adaptedto this!
1534{
1535 //Extrapolation factor:
1536 //const Numeric extpolfac = 0.5;
1537
1538 Index nf = f_grid.nelem();
1539
1540 // Check, whether single scattering data contains the right frequencies:
1541 // The check was changed to allow extrapolation at the boundaries of the
1542 // frequency grid.
1543 const String which_interpolation = "scat_data_raw.f_grid to f_grid";
1544 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1545 for (Index i_se = 0; i_se < scat_data_raw[i_ss].nelem(); i_se++) {
1546 // Check for the special case that ssd.f_grid f_grid have only one
1547 // element. If identical, that's fine. If not, throw error.
1548 if (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1 && nf == 1)
1549 if (!is_same_within_epsilon(scat_data_raw[i_ss][i_se].f_grid[0],
1550 f_grid[0],
1551 2 * DBL_EPSILON)) {
1552 ostringstream os;
1553 os << "There is a problem with the grids for the following "
1554 << "interpolation:\n"
1555 << which_interpolation << "\n"
1556 << "If original grid has only 1 element, the new grid must also have\n"
1557 << "only a single element and hold the same value as the original grid.";
1558 throw runtime_error(os.str());
1559 }
1560
1561 // check with extrapolation
1562 chk_interpolation_grids(which_interpolation,
1563 scat_data_raw[i_ss][i_se].f_grid,
1564 f_grid,
1565 interp_order);
1566 }
1567 }
1568
1569 //Initialise scat_data
1570 scat_data.resize(scat_data_raw.nelem());
1571
1572 // Loop over the included scattering species
1573 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1574 const Index N_se = scat_data_raw[i_ss].nelem();
1575
1576 //Initialise scat_data
1577 scat_data[i_ss].resize(N_se);
1578
1579 // Loop over the included scattering elements
1580 for (Index i_se = 0; i_se < N_se; i_se++) {
1581 //Stuff that doesn't need interpolating
1582 PART_TYPE = scat_data_raw[i_ss][i_se].ptype;
1583 F_DATAGRID = f_grid;
1584 T_DATAGRID = scat_data_raw[i_ss][i_se].T_grid;
1585 ZA_DATAGRID = scat_data_raw[i_ss][i_se].za_grid;
1586 AA_DATAGRID = scat_data_raw[i_ss][i_se].aa_grid;
1587
1588 //Sizing SSD data containers
1589 PHA_MAT_DATA.resize(nf,
1590 scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines(),
1591 scat_data_raw[i_ss][i_se].pha_mat_data.nshelves(),
1592 scat_data_raw[i_ss][i_se].pha_mat_data.nbooks(),
1593 scat_data_raw[i_ss][i_se].pha_mat_data.npages(),
1594 scat_data_raw[i_ss][i_se].pha_mat_data.nrows(),
1595 scat_data_raw[i_ss][i_se].pha_mat_data.ncols());
1596 EXT_MAT_DATA.resize(nf,
1597 scat_data_raw[i_ss][i_se].ext_mat_data.nbooks(),
1598 scat_data_raw[i_ss][i_se].ext_mat_data.npages(),
1599 scat_data_raw[i_ss][i_se].ext_mat_data.nrows(),
1600 scat_data_raw[i_ss][i_se].ext_mat_data.ncols());
1601 ABS_VEC_DATA.resize(nf,
1602 scat_data_raw[i_ss][i_se].abs_vec_data.nbooks(),
1603 scat_data_raw[i_ss][i_se].abs_vec_data.npages(),
1604 scat_data_raw[i_ss][i_se].abs_vec_data.nrows(),
1605 scat_data_raw[i_ss][i_se].abs_vec_data.ncols());
1606
1607 const bool single_se_fgrid =
1608 (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1);
1609 if (!single_se_fgrid) {
1610 // Gridpositions:
1611 const auto lag_freq=Interpolation::LagrangeVector(f_grid, scat_data_raw[i_ss][i_se].f_grid, interp_order);
1612 const auto itw = interpweights(lag_freq);
1613
1614 //Phase matrix data
1615 for (Index t_index = 0;
1616 t_index < scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines();
1617 t_index++) {
1618 for (Index i_za_sca = 0;
1619 i_za_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nshelves();
1620 i_za_sca++) {
1621 for (Index i_aa_sca = 0;
1622 i_aa_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nbooks();
1623 i_aa_sca++) {
1624 for (Index i_za_inc = 0;
1625 i_za_inc < scat_data_raw[i_ss][i_se].pha_mat_data.npages();
1626 i_za_inc++) {
1627 for (Index i_aa_inc = 0;
1628 i_aa_inc < scat_data_raw[i_ss][i_se].pha_mat_data.nrows();
1629 i_aa_inc++) {
1630 for (Index i = 0;
1631 i < scat_data_raw[i_ss][i_se].pha_mat_data.ncols();
1632 i++) {
1633 reinterp(scat_data[i_ss][i_se].pha_mat_data(joker,
1634 t_index,
1635 i_za_sca,
1636 i_aa_sca,
1637 i_za_inc,
1638 i_aa_inc,
1639 i),
1640 scat_data_raw[i_ss][i_se].pha_mat_data(joker,
1641 t_index,
1642 i_za_sca,
1643 i_aa_sca,
1644 i_za_inc,
1645 i_aa_inc,
1646 i),
1647 itw,
1648 lag_freq);
1649 }
1650 }
1651 }
1652 }
1653 }
1654 }
1655
1656 //Extinction matrix data
1657 for (Index t_index = 0;
1658 t_index < scat_data_raw[i_ss][i_se].ext_mat_data.nbooks();
1659 t_index++) {
1660 for (Index i_za_sca = 0;
1661 i_za_sca < scat_data_raw[i_ss][i_se].ext_mat_data.npages();
1662 i_za_sca++) {
1663 for (Index i_aa_sca = 0;
1664 i_aa_sca < scat_data_raw[i_ss][i_se].ext_mat_data.nrows();
1665 i_aa_sca++) {
1666 for (Index i = 0;
1667 i < scat_data_raw[i_ss][i_se].ext_mat_data.ncols();
1668 i++) {
1669 reinterp(scat_data[i_ss][i_se].ext_mat_data(
1670 joker, t_index, i_za_sca, i_aa_sca, i),
1671 scat_data_raw[i_ss][i_se].ext_mat_data(
1672 joker, t_index, i_za_sca, i_aa_sca, i),
1673 itw,
1674 lag_freq);
1675 }
1676 }
1677 }
1678 }
1679
1680 //Absorption vector data
1681 for (Index t_index = 0;
1682 t_index < scat_data_raw[i_ss][i_se].abs_vec_data.nbooks();
1683 t_index++) {
1684 for (Index i_za_sca = 0;
1685 i_za_sca < scat_data_raw[i_ss][i_se].abs_vec_data.npages();
1686 i_za_sca++) {
1687 for (Index i_aa_sca = 0;
1688 i_aa_sca < scat_data_raw[i_ss][i_se].abs_vec_data.nrows();
1689 i_aa_sca++) {
1690 for (Index i = 0;
1691 i < scat_data_raw[i_ss][i_se].abs_vec_data.ncols();
1692 i++) {
1693 reinterp(scat_data[i_ss][i_se].abs_vec_data(
1694 joker, t_index, i_za_sca, i_aa_sca, i),
1695 scat_data_raw[i_ss][i_se].abs_vec_data(
1696 joker, t_index, i_za_sca, i_aa_sca, i),
1697 itw,
1698 lag_freq);
1699 }
1700 }
1701 }
1702 }
1703 } else {
1704 ARTS_ASSERT(nf == 1);
1705 // we do only have one f_grid value in old and new data (and they have
1706 // been confirmed to be the same), hence only need to copy over/reassign
1707 // the data.
1708 scat_data[i_ss][i_se].pha_mat_data =
1709 scat_data_raw[i_ss][i_se].pha_mat_data;
1710 scat_data[i_ss][i_se].ext_mat_data =
1711 scat_data_raw[i_ss][i_se].ext_mat_data;
1712 scat_data[i_ss][i_se].abs_vec_data =
1713 scat_data_raw[i_ss][i_se].abs_vec_data;
1714 }
1715 }
1716 }
1717}
1718
1719/* Workspace method: Doxygen documentation will be auto-generated */
1721 const Index& i_ss,
1722 const Numeric& T,
1723 const Index& interp_order,
1724 const Index& phamat_only,
1725 const Numeric& threshold,
1726 const Verbosity&) {
1727 // We are directly acting on the scat_data entries, modifying them
1728 // individually. That is, we don't need to resize these arrays. Only the
1729 // pha_mat and probably ext_mat and abs_vec Tensors (in the latter case also
1730 // T_grid!).
1731
1732 // Check that species i_ss exists at all in scat_data
1733 const Index nss = scat_data.nelem();
1734 if (nss <= i_ss) {
1735 ostringstream os;
1736 os << "Can not T-reduce scattering species #" << i_ss << ".\n"
1737 << "*scat_data* contains only " << nss << " scattering species.";
1738 throw runtime_error(os.str());
1739 }
1740
1741 // Loop over the included scattering elements
1742 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
1743 // At very first check validity of the scatt elements ptype (so far we only
1744 // handle PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND).
1746 ostringstream os;
1747 os << "Only ptypes " << PTYPE_TOTAL_RND << " and " << PTYPE_AZIMUTH_RND
1748 << " can be handled.\n"
1749 << "Scattering element #" << i_se << " has ptype " << PART_TYPE << ".";
1750 throw runtime_error(os.str());
1751 }
1752
1753 // If ssd.T_grid already has only a single point, we do nothing.
1754 // This is not necessarily expected behaviour. BUT, it is in line with
1755 // previous use (that if nT==1, then assume ssd constant in T).
1756 Index nT = T_DATAGRID.nelem();
1757 if (nT > 1) {
1758 // Check, that we not have data that has already been T-reduced (in
1759 // pha_mat only. complete ssd T-reduce should have been sorted away
1760 // already above).
1761 if (PHA_MAT_DATA.nvitrines() != nT) {
1762 ostringstream os;
1763 os << "Single scattering data of scat element #" << i_se
1764 << " of scat species #" << i_ss << "\n"
1765 << "seems to have undergone some temperature grid manipulation in\n"
1766 << "*pha_mat_data* already. That can not be done twice!";
1767 throw runtime_error(os.str());
1768 }
1769
1770 // Check that ext_mat and abs_vec have the same temp dimensions as T_grid.
1771 // This should always be true, if not it's a bug not a user mistake, hence
1772 // use ARTS_ASSERT.
1773 ARTS_ASSERT(EXT_MAT_DATA.nbooks() == nT and ABS_VEC_DATA.nbooks() == nT);
1774
1775 // Check that T_grid is consistent with requested interpolation order
1776 ostringstream ost;
1777 ost << "Scattering data temperature interpolation for\n"
1778 << "scat element #" << i_se << " of scat species #" << i_ss << ".";
1779 chk_interpolation_grids(ost.str(), T_DATAGRID, T, interp_order);
1780
1781 // Gridpositions:
1782 const LagrangeInterpolation lag_T(0, T, T_DATAGRID, interp_order);
1783 const auto itw = interpweights(lag_T);
1784
1785 //Sizing of temporary SSD data containers
1786 Tensor7 phamat_tmp(PHA_MAT_DATA.nlibraries(),
1787 1,
1788 PHA_MAT_DATA.nshelves(),
1789 PHA_MAT_DATA.nbooks(),
1790 PHA_MAT_DATA.npages(),
1791 PHA_MAT_DATA.nrows(),
1792 PHA_MAT_DATA.ncols(),
1793 0.);
1794 Tensor5 extmat_tmp(EXT_MAT_DATA.nshelves(),
1795 1,
1796 EXT_MAT_DATA.npages(),
1797 EXT_MAT_DATA.nrows(),
1798 EXT_MAT_DATA.ncols(),
1799 0.);
1800 Tensor5 absvec_tmp(ABS_VEC_DATA.nshelves(),
1801 1,
1802 ABS_VEC_DATA.npages(),
1803 ABS_VEC_DATA.nrows(),
1804 ABS_VEC_DATA.ncols(),
1805 0.);
1806
1807 // a1) temp interpol of pha mat
1808 //We have to apply the interpolation separately for each of the pha_mat
1809 //entries, i.e. loop over all remaining size dimensions
1810 //We don't apply any transformation here, but interpolate the actual
1811 //stored ssd (i.e. not the 4x4matrices, but the 7-16 elements separately).
1812 for (Index i_f = 0; i_f < PHA_MAT_DATA.nlibraries(); i_f++)
1813 for (Index i_za1 = 0; i_za1 < PHA_MAT_DATA.nshelves(); i_za1++)
1814 for (Index i_aa1 = 0; i_aa1 < PHA_MAT_DATA.nbooks(); i_aa1++)
1815 for (Index i_za2 = 0; i_za2 < PHA_MAT_DATA.npages(); i_za2++)
1816 for (Index i_aa2 = 0; i_aa2 < PHA_MAT_DATA.nrows(); i_aa2++)
1817 for (Index i_st = 0; i_st < PHA_MAT_DATA.ncols(); i_st++)
1818 phamat_tmp(i_f, 0, i_za1, i_aa1, i_za2, i_aa2, i_st) =
1820 i_f, joker, i_za1, i_aa1, i_za2, i_aa2, i_st),
1821 itw,
1822 lag_T);
1823
1824 // a2) temp interpol of ext and abs.
1825 //We do that regardless of whether they should be reduced or not, because
1826 //we need them also for norm checking / renorming.
1827 for (Index i_f = 0; i_f < EXT_MAT_DATA.nshelves(); i_f++)
1828 for (Index i_za = 0; i_za < EXT_MAT_DATA.npages(); i_za++)
1829 for (Index i_aa = 0; i_aa < EXT_MAT_DATA.nrows(); i_aa++) {
1830 for (Index i_st = 0; i_st < EXT_MAT_DATA.ncols(); i_st++)
1831 extmat_tmp(i_f, 0, i_za, i_aa, i_st) =
1832 interp(EXT_MAT_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1833 for (Index i_st = 0; i_st < ABS_VEC_DATA.ncols(); i_st++)
1834 absvec_tmp(i_f, 0, i_za, i_aa, i_st) =
1835 interp(ABS_VEC_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1836 }
1837
1838 // Norm & other consistency checks.
1839 // All done separately for PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND (the
1840 // latter needs to loop over scat_za_inc).
1841 //
1842 // b) calculate norm of T-reduced pha mat
1843 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol
1844 // d) Ensure that T-reduced data is consistent/representative of all data.
1845 // and throw error/disallow reduction if sca xs varying too much.
1846 // d1) in case of pha_mat only reduction, the scat xs (pha_mat norm) needs
1847 // to be consistent with the sca xs from over the ext/abs T_grid. This is
1848 // essentially an energy conservation issue. That is, we should be as
1849 // strict here as with pha_mat norm deviations in general (however, should
1850 // we allow the norm at T_grid to deviate by a threshold from the
1851 // T_interpol norm (that should be a little looser) or from the ext-abs
1852 // derived expected norm?).
1853 // d2) in case of all-ssp reduction, the data should still be
1854 // representative. b)&c) ensure data consistency in itself, making this
1855 // rather an error on the SSP as such. Hence, we might be a little more
1856 // loose here.
1857 // d) the resulting check for d1) and d2) is the same (ext-abs sca xs at
1858 // T_interpol vs ext-abs sca xs at T_grid), but we use different
1859 // thresholds.
1860 //
1861 // FIXME?
1862 // Regarding b)&c) should we also calc norm of original-T pha mats? To get
1863 // a measure how strong they already deviate from expected norm (As we can
1864 // not be more exact here than what is already in the original data...).
1865 // On the other hand, a certain accuracy should be guaranteed from
1866 // scat_dataCheck already.
1867 // Hence, for now we skip that (but maybe added later when proves
1868 // necessary).
1869 //
1870 // FIXME?
1871 // Regarding d1), we could alternatively make sure here that norm at
1872 // T_interpol is good. And later on ignore any deviations between norm and
1873 // ext-abs sca xs and instead blindly renorm to expected norm (would that
1874 // be ok? correct norm here, doesn't imply correct norm at whatever scat
1875 // angle grids the user is applying. for that, we could in place also calc
1876 // the original-data norm. but that might be expensive (as we can't do
1877 // that from ext-abs sca xs, because we don't know to which T that refers.
1878 // that would go away if we'd actually store pha_mat normed to 1 or 4Pi.
1879 // but that's prob not going to happen. is it? Another option would be to
1880 // introduce an additional T_grid, eg T_grid_phamat.). which we actually
1881 // want to avoid :-/
1882
1883 Numeric this_threshold;
1884 String errmsg;
1885 if (phamat_only) {
1886 this_threshold = threshold;
1887 errmsg =
1888 "T-reduced *pha_mat_data* norm (=sca xs) deviates too "
1889 "much from non-reduced *ext_mat_data* and *abs_vec_data*:";
1890 } else {
1891 this_threshold = 2 * threshold;
1892 errmsg =
1893 "T-reduced *scat_data* deviates too much from original "
1894 "*scat_data*:";
1895 }
1896
1897 // The norm-check code is copied and slightly adapted from scat_dataCheck.
1898 // Might be better to make a functon out of this and use in both places
1899 // for consistency.
1900 //
1901 // FIXME: no checks on higher Stokes elements are done. Should there?
1902 // Which?
1903 switch (PART_TYPE) {
1904 case PTYPE_TOTAL_RND: {
1905 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1906 // b) calculate norm of T-reduced pha mat
1908 phamat_tmp(f, 0, joker, 0, 0, 0, 0), ZA_DATAGRID);
1909 Numeric Cext_data = extmat_tmp(f, 0, 0, 0, 0);
1910 //Numeric Cabs = Cext_data - Csca;
1911 Numeric Cabs_data = absvec_tmp(f, 0, 0, 0, 0);
1912 Numeric Csca_data = Cext_data - Cabs_data;
1913
1914 /*
1915 cout << " Coefficients in data: "
1916 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1917 << " Csca: " << Csca_data << "\n"
1918 << " Calculated coefficients: "
1919 << "Cabs calc: " << Cabs
1920 << " Csca calc: " << Csca << "\n"
1921 << " Deviations "
1922 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1923 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1924 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1925 */
1926
1927 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1928 // albedo dev check)
1929 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1930 ostringstream os;
1931 os << " Deviations in T-reduced scat_data too large:\n"
1932 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1933 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1934 << " (" << Csca / Cext_data << ").\n"
1935 << " Problem occurs for scattering element #" << i_se
1936 << " at " << f << ".frequency!\n";
1937 throw runtime_error(os.str());
1938 }
1939 Numeric norm_dev = (Csca - Csca) / Cext_data;
1940
1941 // d) Ensure that T-reduced data is consistent/representative of all data.
1942 // below use theoretical (ext-abs derived) sca xs as reference.
1943 Csca = Csca_data;
1944 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1945 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1946 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
1947 Numeric xs_dev = (Csca - Csca_data) / Cext_data;
1948 if (abs(norm_dev + (Csca - Csca_data) / Cext_data) >
1949 this_threshold)
1950 cout << "Accumulated deviation (abs(" << norm_dev << "+"
1951 << xs_dev << ")=" << abs(norm_dev + xs_dev)
1952 << " exceeding threshold (" << this_threshold << ").\n";
1953 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
1954 ostringstream os;
1955 os << " " << errmsg << "\n"
1956 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1957 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1958 << " (" << Csca / Cext_data << ").\n"
1959 << " Problem occurs for scattering element #" << i_se
1960 << " at " << f << ".frequency and " << t
1961 << ".temperature!\n";
1962 throw runtime_error(os.str());
1963 }
1964 }
1965 }
1966 break;
1967 }
1968
1969 case PTYPE_AZIMUTH_RND: {
1970 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1971 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1972 // b) calculate norm of T-reduced pha mat
1974 phamat_tmp(f, 0, joker, joker, iza, 0, 0),
1976 AA_DATAGRID);
1977 Numeric Cext_data = extmat_tmp(f, 0, iza, 0, 0);
1978 //Numeric Cabs = Cext_data - Csca;
1979 Numeric Cabs_data = absvec_tmp(f, 0, iza, 0, 0);
1980 Numeric Csca_data = Cext_data - Cabs_data;
1981
1982 /*
1983 cout << " Coefficients in data: "
1984 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1985 << " Csca: " << Csca_data << "\n"
1986 << " Calculated coefficients: "
1987 << "Cabs calc: " << Cabs
1988 << " Csca calc: " << Csca << "\n"
1989 << " Deviations "
1990 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1991 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1992 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1993 */
1994
1995 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1996 // albedo dev check)
1997 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1998 ostringstream os;
1999 os << " Deviations in T-reduced scat_data too large:\n"
2000 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
2001 << " at nominal (actual) albedo of " << Csca_data / Cext_data
2002 << " (" << Csca / Cext_data << ").\n"
2003 << " Problem occurs for scattering element #" << i_se
2004 << " at " << f << ".frequency, and " << iza
2005 << ". incident polar angle!\n";
2006 throw runtime_error(os.str());
2007 }
2008
2009 // d) Ensure that T-reduced data is consistent/representative of all data.
2010 // below use theoretical (ext-abs derived) sca xs as reference.
2011 Csca = Csca_data;
2012 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
2013 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
2014 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
2015 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
2016 ostringstream os;
2017 os << " " << errmsg << "\n"
2018 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
2019 << " at nominal (actual) albedo of "
2020 << Csca_data / Cext_data << " (" << Csca / Cext_data
2021 << ").\n"
2022 << " Problem occurs for scattering element #" << i_se
2023 << " at " << f << ".frequency and " << t
2024 << ".temperature, and " << iza
2025 << ". incident polar angle!\n";
2026 throw runtime_error(os.str());
2027 }
2028 }
2029 }
2030 }
2031 break;
2032 }
2033
2034 default: {
2035 // other ptype cases already excluded above. i.e. we shouldn't end up
2036 // here. If we do, that's a bug.
2037 ARTS_ASSERT(0);
2038 }
2039 }
2040
2041 PHA_MAT_DATA = phamat_tmp;
2042 //We don't need to reset the scat element's grids!
2043 //Except for T_grid in the case that we reduce ALL three ssd variables.
2044 if (!phamat_only) {
2045 T_DATAGRID.resize(1);
2046 T_DATAGRID = T;
2047 EXT_MAT_DATA = extmat_tmp;
2048 ABS_VEC_DATA = absvec_tmp;
2049 }
2050 }
2051 }
2052}
2053
2054/* Workspace method: Doxygen documentation will be auto-generated */
2056 const ArrayOfArrayOfSingleScatteringData& scat_data,
2057 const Vector& f_grid,
2058 const Index& f_index,
2059 const Verbosity&) {
2060 //Extrapolation factor:
2061 //const Numeric extpolfac = 0.5;
2062
2063 // Check, whether single scattering data contains the right frequencies:
2064 // The check was changed to allow extrapolation at the boundaries of the
2065 // frequency grid.
2066 for (Index h = 0; h < scat_data.nelem(); h++) {
2067 for (Index i = 0; i < scat_data[h].nelem(); i++) {
2068 // check with extrapolation
2069 chk_interpolation_grids("scat_data.f_grid to f_grid",
2070 scat_data[h][i].f_grid,
2071 f_grid[f_index]);
2072 }
2073 }
2074
2075 //Initialise scat_data_mono
2076 scat_data_mono.resize(scat_data.nelem());
2077
2078 // Loop over the included scattering species
2079 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2080 const Index N_se = scat_data[i_ss].nelem();
2081
2082 //Initialise scat_data_mono
2083 scat_data_mono[i_ss].resize(N_se);
2084
2085 // Loop over the included scattering elements
2086 for (Index i_se = 0; i_se < N_se; i_se++) {
2087 // Gridpositions:
2088 GridPos freq_gp;
2089 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
2090
2091 // Interpolation weights:
2092 Vector itw(2);
2093 interpweights(itw, freq_gp);
2094
2095 //Stuff that doesn't need interpolating
2096 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2097 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2098 scat_data_mono[i_ss][i_se].f_grid = f_grid[f_index];
2099 scat_data_mono[i_ss][i_se].T_grid = scat_data[i_ss][i_se].T_grid;
2100 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2101 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2102
2103 //Phase matrix data
2104 scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2105 PHA_MAT_DATA.nvitrines(),
2106 PHA_MAT_DATA.nshelves(),
2107 PHA_MAT_DATA.nbooks(),
2108 PHA_MAT_DATA.npages(),
2109 PHA_MAT_DATA.nrows(),
2110 PHA_MAT_DATA.ncols());
2111
2112 for (Index t_index = 0; t_index < PHA_MAT_DATA.nvitrines(); t_index++) {
2113 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2114 i_za_sca++) {
2115 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2116 i_aa_sca++) {
2117 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2118 i_za_inc++) {
2119 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2120 i_aa_inc++) {
2121 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++) {
2122 scat_data_mono[i_ss][i_se].pha_mat_data(
2123 0, t_index, i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2124 interp(itw,
2126 t_index,
2127 i_za_sca,
2128 i_aa_sca,
2129 i_za_inc,
2130 i_aa_inc,
2131 i),
2132 freq_gp);
2133 }
2134 }
2135 }
2136 }
2137 }
2138 //Extinction matrix data
2139 scat_data_mono[i_ss][i_se].ext_mat_data.resize(1,
2140 T_DATAGRID.nelem(),
2141 EXT_MAT_DATA.npages(),
2142 EXT_MAT_DATA.nrows(),
2143 EXT_MAT_DATA.ncols());
2144 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages(); i_za_sca++) {
2145 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
2146 i_aa_sca++) {
2147 //
2148 // Interpolation of extinction matrix:
2149 //
2150 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
2151 scat_data_mono[i_ss][i_se].ext_mat_data(
2152 0, t_index, i_za_sca, i_aa_sca, i) =
2153 interp(itw,
2154 EXT_MAT_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2155 freq_gp);
2156 }
2157 }
2158 }
2159 //Absorption vector data
2160 scat_data_mono[i_ss][i_se].abs_vec_data.resize(1,
2161 T_DATAGRID.nelem(),
2162 ABS_VEC_DATA.npages(),
2163 ABS_VEC_DATA.nrows(),
2164 ABS_VEC_DATA.ncols());
2165 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages(); i_za_sca++) {
2166 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
2167 i_aa_sca++) {
2168 //
2169 // Interpolation of absorption vector:
2170 //
2171 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
2172 scat_data_mono[i_ss][i_se].abs_vec_data(
2173 0, t_index, i_za_sca, i_aa_sca, i) =
2174 interp(itw,
2175 ABS_VEC_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2176 freq_gp);
2177 }
2178 }
2179 }
2180 }
2181 }
2182 }
2183}
2184
2185/* Workspace method: Doxygen documentation will be auto-generated */
2187 const ArrayOfArrayOfSingleScatteringData& scat_data,
2188 const Index& f_index,
2189 const Verbosity&) {
2190 //Initialise scat_data_mono
2191 scat_data_mono.resize(scat_data.nelem());
2192
2193 // Loop over the included scattering species
2194 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2195 const Index N_se = scat_data[i_ss].nelem();
2196
2197 //Initialise scat_data_mono
2198 scat_data_mono[i_ss].resize(N_se);
2199
2200 // Loop over the included scattering elements
2201 for (Index i_se = 0; i_se < N_se; i_se++) {
2202 Index nf = F_DATAGRID.nelem();
2203 if (nf == 1) {
2204 scat_data_mono[i_ss][i_se] = scat_data[i_ss][i_se];
2205 } else {
2206 //Stuff that doesn't need interpolating
2207 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2208 scat_data_mono[i_ss][i_se].T_grid = T_DATAGRID;
2209 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2210 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2211
2212 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2213 scat_data_mono[i_ss][i_se].f_grid = F_DATAGRID[f_index];
2214
2215 Index this_f_index;
2216
2217 //Phase matrix data
2218 /*scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2219 PHA_MAT_DATA.nvitrines(),
2220 PHA_MAT_DATA.nshelves(),
2221 PHA_MAT_DATA.nbooks(),
2222 PHA_MAT_DATA.npages(),
2223 PHA_MAT_DATA.nrows(),
2224 PHA_MAT_DATA.ncols());*/
2225 this_f_index = (PHA_MAT_DATA.nlibraries() == 1) ? 0 : f_index;
2226 scat_data_mono[i_ss][i_se].pha_mat_data = PHA_MAT_DATA(
2227 Range(this_f_index, 1), joker, joker, joker, joker, joker, joker);
2228
2229 //Extinction matrix data
2230 /*scat_data_mono[i_ss][i_se].ext_mat_data.resize(1, T_DATAGRID.nelem(),
2231 EXT_MAT_DATA.npages(),
2232 EXT_MAT_DATA.nrows(),
2233 EXT_MAT_DATA.ncols());*/
2234 this_f_index = (EXT_MAT_DATA.nshelves() == 1) ? 0 : f_index;
2235 scat_data_mono[i_ss][i_se].ext_mat_data =
2236 EXT_MAT_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2237
2238 //Absorption vector data
2239 /*scat_data_mono[i_ss][i_se].abs_vec_data.resize(1, T_DATAGRID.nelem(),
2240 ABS_VEC_DATA.npages(),
2241 ABS_VEC_DATA.nrows(),
2242 ABS_VEC_DATA.ncols());*/
2243 this_f_index = (ABS_VEC_DATA.nshelves() == 1) ? 0 : f_index;
2244 scat_data_mono[i_ss][i_se].abs_vec_data =
2245 ABS_VEC_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2246 }
2247 }
2248 }
2249}
2250
2251/* Workspace method: Doxygen documentation will be auto-generated */
2252void opt_prop_sptFromMonoData( // Output and Input:
2253 ArrayOfPropagationMatrix& ext_mat_spt,
2254 ArrayOfStokesVector& abs_vec_spt,
2255 // Input:
2256 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2257 const Vector& za_grid,
2258 const Vector& aa_grid,
2259 const Index& za_index, // propagation directions
2260 const Index& aa_index,
2261 const Numeric& rtp_temperature,
2262 const Tensor4& pnd_field,
2263 const Index& scat_p_index,
2264 const Index& scat_lat_index,
2265 const Index& scat_lon_index,
2266 const Verbosity& verbosity) {
2267 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data_mono);)
2268 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
2269 const Numeric za_sca = za_grid[za_index];
2270 const Numeric aa_sca = aa_grid[aa_index];
2271
2272 if (stokes_dim > 4 or stokes_dim < 1) {
2273 throw runtime_error(
2274 "The dimension of the stokes vector \n"
2275 "must be 1,2,3 or 4");
2276 }
2277
2278 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
2279 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
2280
2281 // Check that we do indeed have scat_data_mono here. Only checking the first
2282 // scat element, assuming the other elements have been processed in the same
2283 // manner. That's save against having scat_data here if that originated from
2284 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2285 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2286 // produced from scat_data_singleTmatrix. That would be too costly here,
2287 // though.
2288 // Also, we can't check here whether data is at the correct frequency since we
2289 // don't know f_grid and f_index here (we could pass it in, though).
2290 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2291 ostringstream os;
2292 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2293 << "but *scat_data_mono* (1 freq point only) is expected here.";
2294 throw runtime_error(os.str());
2295 }
2296
2297 // Initialisation
2298 for (auto& pm : ext_mat_spt) pm.SetZero();
2299 for (auto& av : abs_vec_spt) av.SetZero();
2300
2301 GridPos t_gp;
2302
2303 Vector itw(2);
2304
2305 Index i_se_flat = 0;
2306 // Loop over the included scattering elements
2307 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2308 // Loop over the included scattering elements
2309 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2310 // If the particle number density at a specific point in the
2311 // atmosphere for the i_se scattering element is zero, we don't need
2312 // to do the transformation!
2313 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2314 PND_LIMIT) {
2315 // First we have to transform the data from the coordinate system
2316 // used in the database (depending on the kind of ptype) to the
2317 // laboratory coordinate system.
2318
2319 //
2320 // Do the transformation into the laboratory coordinate system.
2321 //
2322 // Extinction matrix:
2323 //
2324 Index ext_npages = scat_data_mono[i_ss][i_se].ext_mat_data.npages();
2325 Index ext_nrows = scat_data_mono[i_ss][i_se].ext_mat_data.nrows();
2326 Index ext_ncols = scat_data_mono[i_ss][i_se].ext_mat_data.ncols();
2327 Index abs_npages = scat_data_mono[i_ss][i_se].abs_vec_data.npages();
2328 Index abs_nrows = scat_data_mono[i_ss][i_se].abs_vec_data.nrows();
2329 Index abs_ncols = scat_data_mono[i_ss][i_se].abs_vec_data.ncols();
2330
2331 //Check that scattering data temperature range covers required temperature
2332 ConstVectorView t_grid = scat_data_mono[i_ss][i_se].T_grid;
2333
2334 if (t_grid.nelem() > 1) {
2335 ostringstream os;
2336 os << "In opt_prop_sptFromMonoData.\n"
2337 << "The temperature grid of the scattering data does not\n"
2338 << "cover the atmospheric temperature at cloud location.\n"
2339 << "The data should include the value T = " << rtp_temperature
2340 << " K.";
2341 chk_interpolation_grids(os.str(), t_grid, rtp_temperature);
2342
2343 //interpolate over temperature
2344 Tensor3 ext_mat_data1temp(ext_npages, ext_nrows, ext_ncols);
2345 gridpos(t_gp, t_grid, rtp_temperature);
2346 interpweights(itw, t_gp);
2347 for (Index i_p = 0; i_p < ext_npages; i_p++) {
2348 for (Index i_r = 0; i_r < ext_nrows; i_r++) {
2349 for (Index i_c = 0; i_c < ext_ncols; i_c++) {
2350 ext_mat_data1temp(i_p, i_r, i_c) =
2351 interp(itw,
2352 scat_data_mono[i_ss][i_se].ext_mat_data(
2353 0, joker, i_p, i_r, i_c),
2354 t_gp);
2355 }
2356 }
2357 }
2358 ext_matTransform(ext_mat_spt[i_se_flat],
2359 ext_mat_data1temp,
2360 scat_data_mono[i_ss][i_se].za_grid,
2361 scat_data_mono[i_ss][i_se].aa_grid,
2362 scat_data_mono[i_ss][i_se].ptype,
2363 za_sca,
2364 aa_sca,
2365 verbosity);
2366 } else {
2367 ext_matTransform(ext_mat_spt[i_se_flat],
2368 scat_data_mono[i_ss][i_se].ext_mat_data(
2369 0, 0, joker, joker, joker),
2370 scat_data_mono[i_ss][i_se].za_grid,
2371 scat_data_mono[i_ss][i_se].aa_grid,
2372 scat_data_mono[i_ss][i_se].ptype,
2373 za_sca,
2374 aa_sca,
2375 verbosity);
2376 }
2377 //
2378 // Absorption vector:
2379 //
2380
2381 if (t_grid.nelem() > 1) {
2382 Tensor3 abs_vec_data1temp(abs_npages, abs_nrows, abs_ncols);
2383 //interpolate over temperature
2384 for (Index i_p = 0; i_p < abs_npages; i_p++) {
2385 for (Index i_r = 0; i_r < abs_nrows; i_r++) {
2386 for (Index i_c = 0; i_c < abs_ncols; i_c++) {
2387 abs_vec_data1temp(i_p, i_r, i_c) =
2388 interp(itw,
2389 scat_data_mono[i_ss][i_se].abs_vec_data(
2390 0, joker, i_p, i_r, i_c),
2391 t_gp);
2392 }
2393 }
2394 }
2395 abs_vecTransform(abs_vec_spt[i_se_flat],
2396 abs_vec_data1temp,
2397 scat_data_mono[i_ss][i_se].za_grid,
2398 scat_data_mono[i_ss][i_se].aa_grid,
2399 scat_data_mono[i_ss][i_se].ptype,
2400 za_sca,
2401 aa_sca,
2402 verbosity);
2403 } else {
2404 abs_vecTransform(abs_vec_spt[i_se_flat],
2405 scat_data_mono[i_ss][i_se].abs_vec_data(
2406 0, 0, joker, joker, joker),
2407 scat_data_mono[i_ss][i_se].za_grid,
2408 scat_data_mono[i_ss][i_se].aa_grid,
2409 scat_data_mono[i_ss][i_se].ptype,
2410 za_sca,
2411 aa_sca,
2412 verbosity);
2413 }
2414 }
2415
2416 i_se_flat++;
2417 }
2418 }
2419}
2420
2421/* Workspace method: Doxygen documentation will be auto-generated */
2423 Tensor5& pha_mat_spt,
2424 // Input:
2425 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2426 const Index& doit_za_grid_size,
2427 const Vector& aa_grid,
2428 const Index& za_index, // propagation directions
2429 const Index& aa_index,
2430 const Numeric& rtp_temperature,
2431 const Tensor4& pnd_field,
2432 const Index& scat_p_index,
2433 const Index& scat_lat_index,
2434 const Index& scat_lon_index,
2435 const Verbosity& verbosity) {
2436 Vector za_grid;
2437 nlinspace(za_grid, 0, 180, doit_za_grid_size);
2438
2439 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
2440 if (N_se_total != pnd_field.nbooks()) {
2441 ostringstream os;
2442 os << "Total number of scattering elements in *scat_data_mono* "
2443 << "inconsistent with size of pnd_field.";
2444 throw runtime_error(os.str());
2445 }
2446 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2447 // checks the same as the runtime_error above. Still, we keep it to be on the
2448 // save side.
2449 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2450
2451 const Index stokes_dim = pha_mat_spt.ncols();
2452 if (stokes_dim > 4 || stokes_dim < 1) {
2453 throw runtime_error(
2454 "The dimension of the stokes vector \n"
2455 "must be 1,2,3 or 4");
2456 }
2457
2458 // Check that we do indeed have scat_data_mono here. Only checking the first
2459 // scat element, assuming the other elements have been processed in the same
2460 // manner. That's save against having scat_data here if that originated from
2461 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2462 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2463 // produced from scat_data_singleTmatrix. That would be too costly here,
2464 // though.
2465 // Also, we can't check here whether data is at the correct frequency since we
2466 // don't know f_grid and f_index here (we could pass it in, though).
2467 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2468 ostringstream os;
2469 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2470 << "but *scat_data_mono* (1 freq point only) is expected here.";
2471 throw runtime_error(os.str());
2472 }
2473
2474 GridPos T_gp = {0, {0, 1}}, Tred_gp;
2475 Vector itw(2);
2476
2477 // Initialisation
2478 pha_mat_spt = 0.;
2479
2480 Index i_se_flat = 0;
2481 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2482 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2483 // If the particle number density at a specific point in the
2484 // atmosphere for scattering element i_se is zero, we don't need to
2485 // do the transformation!
2486 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2487 PND_LIMIT) {
2488 // Temporary phase matrix which includes all temperatures.
2489 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
2490 Tensor3 pha_mat_spt_tmp(nT, pha_mat_spt.nrows(), pha_mat_spt.ncols());
2491
2492 pha_mat_spt_tmp = 0.;
2493
2494 Index ti = -1;
2495 if (nT == 1) // just 1 T_grid element
2496 {
2497 ti = 0;
2498 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2499 // pick one temperature'
2500 {
2501 if (rtp_temperature > -10.) // lowest T-point
2502 {
2503 ti = 0;
2504 } else if (rtp_temperature > -20.) // highest T-point
2505 {
2506 ti = nT - 1;
2507 } else // median T-point
2508 {
2509 ti = nT / 2;
2510 }
2511 } else {
2512 ostringstream os;
2513 os << "In pha_mat_sptFromMonoData.\n"
2514 << "The temperature grid of the scattering data does not\n"
2515 << "cover the atmospheric temperature at cloud location.\n"
2516 << "The data should include the value T = " << rtp_temperature
2517 << " K.";
2519 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2520
2521 // Gridpositions:
2522 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2523 gridpos_copy(Tred_gp, T_gp);
2524 Tred_gp.idx = 0;
2525 // Interpolation weights:
2526 interpweights(itw, Tred_gp);
2527 }
2528
2529 // Do the transformation into the laboratory coordinate system.
2530 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
2531 za_inc_idx++) {
2532 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2533 aa_inc_idx++) {
2534 if (ti < 0) // Temperature interpolation
2535 {
2536 for (Index t_idx = 0; t_idx < 2; t_idx++) {
2538 pha_mat_spt_tmp(t_idx, joker, joker),
2539 scat_data_mono[i_ss][i_se].pha_mat_data(
2540 0, t_idx + T_gp.idx, joker, joker, joker, joker, joker),
2541 scat_data_mono[i_ss][i_se].za_grid,
2542 scat_data_mono[i_ss][i_se].aa_grid,
2543 scat_data_mono[i_ss][i_se].ptype,
2544 za_index,
2545 aa_index,
2546 za_inc_idx,
2547 aa_inc_idx,
2548 za_grid,
2549 aa_grid,
2550 verbosity);
2551 }
2552
2553 for (Index i = 0; i < stokes_dim; i++) {
2554 for (Index j = 0; j < stokes_dim; j++) {
2555 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
2556 interp(itw, pha_mat_spt_tmp(joker, i, j), Tred_gp);
2557 }
2558 }
2559 } else // no temperature interpolation required
2560 {
2562 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2563 scat_data_mono[i_ss][i_se].pha_mat_data(
2564 0, ti, joker, joker, joker, joker, joker),
2565 scat_data_mono[i_ss][i_se].za_grid,
2566 scat_data_mono[i_ss][i_se].aa_grid,
2567 scat_data_mono[i_ss][i_se].ptype,
2568 za_index,
2569 aa_index,
2570 za_inc_idx,
2571 aa_inc_idx,
2572 za_grid,
2573 aa_grid,
2574 verbosity);
2575 }
2576 }
2577 }
2578 }
2579
2580 i_se_flat++;
2581 }
2582 }
2583}
2584
2585/* Workspace method: Doxygen documentation will be auto-generated */
2587 Tensor5& pha_mat_spt,
2588 // Input:
2589 const ArrayOfArrayOfSingleScatteringData& scat_data,
2590 const Index& scat_data_checked,
2591 const Vector& za_grid,
2592 const Vector& aa_grid,
2593 const Index& za_index, // propagation directions
2594 const Index& aa_index,
2595 const Index& f_index,
2596 const Numeric& rtp_temperature,
2597 const Tensor4& pnd_field,
2598 const Index& scat_p_index,
2599 const Index& scat_lat_index,
2600 const Index& scat_lon_index,
2601 const Verbosity& verbosity) {
2602 if (scat_data_checked != 1)
2603 throw runtime_error(
2604 "The scattering data must be flagged to have "
2605 "passed a consistency check (scat_data_checked=1).");
2606
2607 const Index stokes_dim = pha_mat_spt.ncols();
2608 if (stokes_dim > 4 || stokes_dim < 1) {
2609 throw runtime_error(
2610 "The dimension of the stokes vector \n"
2611 "must be 1,2,3 or 4");
2612 }
2613
2614 // Determine total number of scattering elements
2615 const Index N_se_total = TotalNumberOfElements(scat_data);
2616 if (N_se_total != pnd_field.nbooks()) {
2617 ostringstream os;
2618 os << "Total number of scattering elements in scat_data "
2619 << "inconsistent with size of pnd_field.";
2620 throw runtime_error(os.str());
2621 }
2622 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2623 // checks the same as the runtime_error above. Still, we keep it to be on the
2624 // save side.
2625 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2626
2627 const Index N_ss = scat_data.nelem();
2628
2629 // Phase matrix in laboratory coordinate system. Dimensions:
2630 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
2631 Tensor5 pha_mat_data_int;
2632
2633 Index this_f_index;
2634
2635 Index i_se_flat = 0;
2636 // Loop over scattering species
2637 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
2638 const Index N_se = scat_data[i_ss].nelem();
2639
2640 // Loop over the included scattering elements
2641 for (Index i_se = 0; i_se < N_se; i_se++) {
2642 // If the particle number density at a specific point in the
2643 // atmosphere for the i_se scattering element is zero, we don't need
2644 // to do the transfromation!
2645 if (abs(pnd_field(
2646 i_se_flat, scat_p_index, scat_lat_index, scat_lon_index)) >
2647 PND_LIMIT) {
2648 // First we have to transform the data from the coordinate system
2649 // used in the database (depending on the kind of ptype) to the
2650 // laboratory coordinate system.
2651
2652 // Resize the variables for the interpolated data (1freq, 1T):
2653 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
2654 PHA_MAT_DATA.nbooks(),
2655 PHA_MAT_DATA.npages(),
2656 PHA_MAT_DATA.nrows(),
2657 PHA_MAT_DATA.ncols());
2658
2659 // Frequency extraction and temperature interpolation
2660
2661 // Gridpositions and interpolation weights;
2662 GridPos t_gp;
2663 Vector itw;
2664 Index this_T_index = -1;
2665 if (PHA_MAT_DATA.nvitrines() == 1) {
2666 this_T_index = 0;
2667 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2668 // pick one temperature'
2669 {
2670 if (rtp_temperature > -10.) // lowest T-point
2671 {
2672 this_T_index = 0;
2673 } else if (rtp_temperature > -20.) // highest T-point
2674 {
2675 this_T_index = PHA_MAT_DATA.nvitrines() - 1;
2676 } else // median T-point
2677 {
2678 this_T_index = PHA_MAT_DATA.nvitrines() / 2;
2679 }
2680 } else {
2681 ostringstream os;
2682 os << "In pha_mat_sptFromScat_data.\n"
2683 << "The temperature grid of the scattering data does not\n"
2684 << "cover the atmospheric temperature at cloud location.\n"
2685 << "The data should include the value T = " << rtp_temperature
2686 << " K.";
2687 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
2688
2689 gridpos(t_gp, T_DATAGRID, rtp_temperature);
2690
2691 // Interpolation weights:
2692 itw.resize(2);
2693 interpweights(itw, t_gp);
2694 }
2695
2696 if (PHA_MAT_DATA.nlibraries() == 1)
2697 this_f_index = 0;
2698 else
2699 this_f_index = f_index;
2700
2701 if (this_T_index < 0) {
2702 // Interpolation of scattering matrix:
2703 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2704 i_za_sca++)
2705 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2706 i_aa_sca++)
2707 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2708 i_za_inc++)
2709 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2710 i_aa_inc++)
2711 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2712 pha_mat_data_int(
2713 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2714 interp(itw,
2715 PHA_MAT_DATA(this_f_index,
2716 joker,
2717 i_za_sca,
2718 i_aa_sca,
2719 i_za_inc,
2720 i_aa_inc,
2721 i),
2722 t_gp);
2723 } else {
2724 pha_mat_data_int = PHA_MAT_DATA(
2725 this_f_index, this_T_index, joker, joker, joker, joker, joker);
2726 /*
2727 for (Index i_za_sca = 0;
2728 i_za_sca < PHA_MAT_DATA.nshelves(); i_za_sca++)
2729 for (Index i_aa_sca = 0;
2730 i_aa_sca < PHA_MAT_DATA.nbooks(); i_aa_sca++)
2731 for (Index i_za_inc = 0;
2732 i_za_inc < PHA_MAT_DATA.npages(); i_za_inc++)
2733 for (Index i_aa_inc = 0;
2734 i_aa_inc < PHA_MAT_DATA.nrows(); i_aa_inc++)
2735 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2736 // Interpolation of phase matrix:
2737 pha_mat_data_int(i_za_sca, i_aa_sca,
2738 i_za_inc, i_aa_inc, i) =
2739 PHA_MAT_DATA(this_f_index, this_T_index,
2740 i_za_sca, i_aa_sca,
2741 */
2742 }
2743
2744 // Do the transformation into the laboratory coordinate system.
2745 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
2746 za_inc_idx++) {
2747 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2748 aa_inc_idx++) {
2750 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2751 pha_mat_data_int,
2754 PART_TYPE,
2755 za_index,
2756 aa_index,
2757 za_inc_idx,
2758 aa_inc_idx,
2759 za_grid,
2760 aa_grid,
2761 verbosity);
2762 }
2763 }
2764 }
2765 i_se_flat++;
2766 }
2767 }
2768}
2769
2770/* Workspace method: Doxygen documentation will be auto-generated */
2771void ScatSpeciesMerge( //WS Output:
2772 Tensor4& pnd_field,
2775 ArrayOfString& scat_species,
2776 Index& cloudbox_checked,
2777 //WS Input:
2778 const Index& atmosphere_dim,
2779 const Index& cloudbox_on,
2780 const ArrayOfIndex& cloudbox_limits,
2781 const Tensor3& t_field,
2782 const Tensor3& z_field,
2783 const Matrix& z_surface,
2784 const Verbosity& /*verbosity*/) {
2785 // FIXME:
2786 // so far, this works for both scat_data and scat_data_raw. Needs to be
2787 // adapted, though, once we have WSM that can create Z/K/a with different
2788 // f/T dimensions than scat_data_single.f/T_grid.
2789
2790 // cloudbox variable state should be ok before entering here
2791 if (!cloudbox_checked)
2792 throw std::runtime_error(
2793 "You must call *cloudbox_checkedCalc* before this method.");
2794 //however, we modify cloudbox variables. hence force re-checking the new
2795 //variables by resetting cloudbox_checked to False.
2796 cloudbox_checked = 0;
2797
2798 if (atmosphere_dim != 1)
2799 throw std::runtime_error(
2800 "Merging scattering elements only works with a 1D atmoshere");
2801
2802 // Cloudbox on/off?
2803 if (!cloudbox_on) {
2804 /* Must initialise pnd_field anyway; but empty */
2805 pnd_field.resize(0, 0, 0, 0);
2806 return;
2807 }
2808
2809 // ------- setup new pnd_field and scat_data -------------------
2810 ArrayOfIndex limits(2);
2811 //pressure
2812 limits[0] = cloudbox_limits[0];
2813 limits[1] = cloudbox_limits[1] + 1;
2814
2815 Tensor4 pnd_field_merged(
2816 limits[1] - limits[0], limits[1] - limits[0], 1, 1, 0.);
2817
2818 ArrayOfArrayOfSingleScatteringData scat_data_merged;
2819 scat_data_merged.resize(1);
2820 scat_data_merged[0].resize(pnd_field_merged.nbooks());
2821 ArrayOfArrayOfScatteringMetaData scat_meta_merged;
2822 scat_meta_merged.resize(1);
2823 scat_meta_merged[0].resize(pnd_field_merged.nbooks());
2824 ArrayOfString scat_species_merged;
2825 scat_species_merged.resize(1);
2826 scat_species_merged[0] = "mergedfield-mergedpsd";
2827 for (Index sp = 0; sp < scat_data_merged[0].nelem(); sp++) {
2828 SingleScatteringData& this_part = scat_data_merged[0][sp];
2829 this_part.ptype = scat_data[0][0].ptype;
2830 this_part.description = "Merged scattering elements";
2831 this_part.f_grid = scat_data[0][0].f_grid;
2832 this_part.za_grid = scat_data[0][0].za_grid;
2833 this_part.aa_grid = scat_data[0][0].aa_grid;
2834 this_part.pha_mat_data.resize(scat_data[0][0].pha_mat_data.nlibraries(),
2835 1,
2836 scat_data[0][0].pha_mat_data.nshelves(),
2837 scat_data[0][0].pha_mat_data.nbooks(),
2838 scat_data[0][0].pha_mat_data.npages(),
2839 scat_data[0][0].pha_mat_data.nrows(),
2840 scat_data[0][0].pha_mat_data.ncols());
2841 this_part.ext_mat_data.resize(scat_data[0][0].ext_mat_data.nshelves(),
2842 1,
2843 scat_data[0][0].ext_mat_data.npages(),
2844 scat_data[0][0].ext_mat_data.nrows(),
2845 scat_data[0][0].ext_mat_data.ncols());
2846 this_part.abs_vec_data.resize(scat_data[0][0].abs_vec_data.nshelves(),
2847 1,
2848 scat_data[0][0].abs_vec_data.npages(),
2849 scat_data[0][0].abs_vec_data.nrows(),
2850 scat_data[0][0].abs_vec_data.ncols());
2851 this_part.pha_mat_data = 0.;
2852 this_part.ext_mat_data = 0.;
2853 this_part.abs_vec_data = 0.;
2854 this_part.T_grid.resize(1);
2855 this_part.T_grid[0] = t_field(sp, 0, 0);
2856
2857 ScatteringMetaData& this_meta = scat_meta_merged[0][sp];
2858 ostringstream os;
2859 os << "Merged scattering element of cloudbox-level #" << sp;
2860 this_meta.description = os.str();
2861 this_meta.source = "ARTS internal";
2862 this_meta.refr_index = "Unknown";
2863 this_meta.mass = -1.;
2864 this_meta.diameter_max = -1.;
2865 this_meta.diameter_volume_equ = -1.;
2866 this_meta.diameter_area_equ_aerodynamical = -1.;
2867 }
2868
2869 // Check that all scattering elements have same ptype and data dimensions
2870 SingleScatteringData& first_part = scat_data[0][0];
2871 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2872 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2873 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2874
2875 if (orig_part.ptype != first_part.ptype)
2876 throw std::runtime_error(
2877 "All scattering elements must have the same type");
2878
2879 if (orig_part.f_grid.nelem() != first_part.f_grid.nelem())
2880 throw std::runtime_error(
2881 "All scattering elements must have the same f_grid");
2882
2883 if (!is_size(orig_part.pha_mat_data(
2884 joker, 0, joker, joker, joker, joker, joker),
2885 first_part.pha_mat_data.nlibraries(),
2886 first_part.pha_mat_data.nshelves(),
2887 first_part.pha_mat_data.nbooks(),
2888 first_part.pha_mat_data.npages(),
2889 first_part.pha_mat_data.nrows(),
2890 first_part.pha_mat_data.ncols()))
2891 throw std::runtime_error(
2892 "All scattering elements must have the same pha_mat_data size"
2893 " (except for temperature).");
2894
2895 if (!is_size(orig_part.ext_mat_data(joker, 0, joker, joker, joker),
2896 first_part.ext_mat_data.nshelves(),
2897 first_part.ext_mat_data.npages(),
2898 first_part.ext_mat_data.nrows(),
2899 first_part.ext_mat_data.ncols()))
2900 throw std::runtime_error(
2901 "All scattering elements must have the same ext_mat_data size"
2902 " (except for temperature).");
2903
2904 if (!is_size(orig_part.abs_vec_data(joker, 0, joker, joker, joker),
2905 first_part.abs_vec_data.nshelves(),
2906 first_part.abs_vec_data.npages(),
2907 first_part.abs_vec_data.nrows(),
2908 first_part.abs_vec_data.ncols()))
2909 throw std::runtime_error(
2910 "All scattering elements must have the same abs_vec_data size"
2911 " (except for temperature).");
2912 }
2913 }
2914
2915 //----- Start pnd_field_merged and scat_data_array_merged calculations -----
2916
2917 GridPos T_gp;
2918 Vector itw(2);
2919
2920 Index nlevels = pnd_field_merged.nbooks();
2921 // loop over pressure levels in cloudbox
2922 for (Index i_lv = 0; i_lv < nlevels - 1; i_lv++) {
2923 pnd_field_merged(i_lv, i_lv, 0, 0) = 1.;
2924
2925 SingleScatteringData& this_part = scat_data_merged[0][i_lv];
2926 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2927 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2928 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2929 const Index pnd_index = FlattenedIndex(scat_data, i_ss, i_se);
2930
2931 // If the particle number density at a specific point in the
2932 // atmosphere for the i_se scattering element is zero, we don't
2933 // need to do the transformation!
2934 if (pnd_field(pnd_index, i_lv, 0, 0) > PND_LIMIT) //TRS
2935 {
2936 Numeric temperature = this_part.T_grid[0];
2937 if (orig_part.T_grid.nelem() > 1) {
2938 ostringstream os;
2939 os << "The temperature grid of the scattering data "
2940 << "does not cover the\n"
2941 << "atmospheric temperature at cloud location. "
2942 << "The data should\n"
2943 << "include the value T = " << temperature << " K.\n"
2944 << "Offending particle is scat_data[" << i_ss << "][" << i_se
2945 << "]:\n"
2946 << "Description: " << orig_part.description << "\n";
2947 chk_interpolation_grids(os.str(), orig_part.T_grid, temperature);
2948
2949 // Gridpositions:
2950 gridpos(T_gp, orig_part.T_grid, temperature);
2951 // Interpolation weights:
2952 interpweights(itw, T_gp);
2953 }
2954
2956
2957 // Loop over frequencies
2958 for (Index i_f = 0; i_f < orig_part.pha_mat_data.nlibraries();
2959 i_f++) {
2960 // Loop over zenith angles
2961 for (Index i_za = 0; i_za < orig_part.ext_mat_data.npages();
2962 i_za++) {
2963 // Loop over azimuth angles
2964 for (Index i_aa = 0; i_aa < orig_part.ext_mat_data.nrows();
2965 i_aa++) {
2966 // Weighted sum of ext_mat_data and abs_vec_data
2967 if (orig_part.T_grid.nelem() == 1) {
2968 Vector v = orig_part.ext_mat_data(i_f, 0, i_za, i_aa, joker);
2969 v *= pnd_field(pnd_index, i_lv, 0, 0);
2970 this_part.ext_mat_data(i_f, 0, i_za, 0, joker) += v;
2971
2972 v = orig_part.abs_vec_data(i_f, 0, i_za, i_aa, joker);
2973 v *= pnd_field(pnd_index, i_lv, 0, 0);
2974 this_part.abs_vec_data(i_f, 0, i_za, i_aa, joker) += v;
2975 } else {
2976 for (Index i = 0; i < orig_part.ext_mat_data.ncols(); i++) {
2977 // Temperature interpolation
2978 this_part.ext_mat_data(i_f, 0, i_za, i_aa, i) +=
2979 pnd_field(pnd_index, i_lv, 0, 0) *
2980 interp(
2981 itw,
2982 orig_part.ext_mat_data(i_f, joker, i_za, i_aa, i),
2983 T_gp);
2984 }
2985 for (Index i = 0; i < orig_part.abs_vec_data.ncols(); i++) {
2986 // Temperature interpolation
2987 this_part.abs_vec_data(i_f, 0, i_za, i_aa, i) +=
2988 pnd_field(pnd_index, i_lv, 0, 0) *
2989 interp(
2990 itw,
2991 orig_part.abs_vec_data(i_f, joker, i_za, i_aa, i),
2992 T_gp);
2993 }
2994 }
2995 }
2996 }
2997
2999
3000 // Loop over outgoing zenith angles
3001 for (Index i_za_out = 0;
3002 i_za_out < orig_part.pha_mat_data.nshelves();
3003 i_za_out++) {
3004 // Loop over outgoing azimuth angles
3005 for (Index i_aa_out = 0;
3006 i_aa_out < orig_part.pha_mat_data.nbooks();
3007 i_aa_out++) {
3008 // Loop over incoming zenith angles
3009 for (Index i_za_inc = 0;
3010 i_za_inc < orig_part.pha_mat_data.npages();
3011 i_za_inc++) {
3012 // Loop over incoming azimuth angles
3013 for (Index i_aa_inc = 0;
3014 i_aa_inc < orig_part.pha_mat_data.nrows();
3015 i_aa_inc++) {
3016 // Weighted sum of pha_mat_data
3017 if (orig_part.T_grid.nelem() == 1) {
3018 Vector v = orig_part.pha_mat_data(i_f,
3019 0,
3020 i_za_out,
3021 i_aa_out,
3022 i_za_inc,
3023 i_aa_inc,
3024 joker);
3025 v *= pnd_field(pnd_index, i_lv, 0, 0);
3026 this_part.pha_mat_data(i_f,
3027 0,
3028 i_za_out,
3029 i_aa_out,
3030 i_za_inc,
3031 i_aa_inc,
3032 joker) = v;
3033 } else {
3034 // Temperature interpolation
3035 for (Index i = 0; i < orig_part.pha_mat_data.ncols();
3036 i++) {
3037 this_part.pha_mat_data(i_f,
3038 0,
3039 i_za_out,
3040 i_aa_out,
3041 i_za_inc,
3042 i_aa_inc,
3043 i) +=
3044 pnd_field(pnd_index, i_lv, 0, 0) *
3045 interp(itw,
3046 orig_part.pha_mat_data(i_f,
3047 joker,
3048 i_za_out,
3049 i_aa_out,
3050 i_za_inc,
3051 i_aa_inc,
3052 i),
3053 T_gp);
3054 }
3055 }
3056 }
3057 }
3058 }
3059 }
3060 }
3061 }
3062 }
3063 }
3064 }
3065
3066 // Set new pnd_field at lowest altitude to 0 if the cloudbox doesn't touch
3067 // the ground.
3068 // The consistency for the original pnd_field has already been ensured by
3069 // cloudbox_checkedCalc
3070 if (z_field(cloudbox_limits[0], 0, 0) > z_surface(0, 0))
3071 pnd_field_merged(0, 0, 0, 0) = 0.;
3072
3073 pnd_field = pnd_field_merged;
3074 scat_data = scat_data_merged;
3075 scat_meta = scat_meta_merged;
3076 scat_species = scat_species_merged;
3077}
3078
3079/* Workspace method: Doxygen documentation will be auto-generated */
3081 //WS Output:
3082 Vector& meta_param,
3083 //WS Input:
3084 const ArrayOfArrayOfScatteringMetaData& scat_meta,
3085 const String& meta_name,
3086 const Index& scat_species_index,
3087 const Verbosity& /*verbosity*/) {
3088 if (scat_species_index < 0) {
3089 ostringstream os;
3090 os << "scat_species_index can't be <0!";
3091 throw runtime_error(os.str());
3092 }
3093
3094 const Index nss = scat_meta.nelem();
3095
3096 // check that scat_meta actually has at least scat_species_index elements
3097 if (!(nss > scat_species_index)) {
3098 ostringstream os;
3099 os << "Can not extract data for scattering species #" << scat_species_index
3100 << "\n"
3101 << "because scat_meta has only " << nss << " elements.";
3102 throw runtime_error(os.str());
3103 }
3104
3105 const Index nse = scat_meta[scat_species_index].nelem();
3106 meta_param.resize(nse);
3107
3108 for (Index i = 0; i < nse; i++) {
3109 if (meta_name == "mass")
3110 meta_param[i] = scat_meta[scat_species_index][i].mass;
3111 else if (meta_name == "diameter_max")
3112 meta_param[i] = scat_meta[scat_species_index][i].diameter_max;
3113 else if (meta_name == "diameter_volume_equ")
3114 meta_param[i] = scat_meta[scat_species_index][i].diameter_volume_equ;
3115 else if (meta_name == "diameter_area_equ_aerodynamical")
3116 meta_param[i] =
3117 scat_meta[scat_species_index][i].diameter_area_equ_aerodynamical;
3118 else {
3119 ostringstream os;
3120 os << "Meta parameter \"" << meta_name << "\"is unknown.";
3121 throw runtime_error(os.str());
3122 }
3123 }
3124}
3125
3126
This file contains the definition of Array.
Index TotalNumberOfElements(const Array< Array< base > > &aa)
Determine total number of elements in an ArrayOfArray.
Definition: array.h:241
Index FlattenedIndex(const Array< Array< base > > &aa, Index outer, Index inner=0)
Determine the index of an element in a flattened version of the array.
Definition: array.h:252
The global header file for ARTS.
Constants of physical expressions as constexpr.
Common ARTS conversions.
void pha_mat_spt_agendaExecute(Workspace &ws, Tensor5 &pha_mat_spt, const Index za_index, const Index scat_lat_index, const Index scat_lon_index, const Index scat_p_index, const Index aa_index, const Numeric rtp_temperature, const Agenda &input_agenda)
Definition: auto_md.cc:25479
void chk_interpolation_grids(const String &which_interpolation, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac, const bool islog)
Check interpolation grids.
Definition: check_input.cc:906
The Agenda class.
Definition: agenda_class.h:69
This can be used to make arrays out of anything.
Definition: array.h:48
Index nelem() const ARTS_NOEXCEPT
Definition: array.h:92
Index ncols() const noexcept
Definition: matpackIV.h:142
Index nbooks() const noexcept
Definition: matpackIV.h:139
Index nrows() const noexcept
Definition: matpackV.h:152
Index ncols() const noexcept
Definition: matpackV.h:153
Index npages() const noexcept
Definition: matpackV.h:151
Index nbooks() const noexcept
Definition: matpackV.h:150
Index nshelves() const noexcept
Definition: matpackV.h:149
Index ncols() const noexcept
Definition: matpackVII.h:159
Index npages() const noexcept
Definition: matpackVII.h:157
Index nrows() const noexcept
Definition: matpackVII.h:158
Index nlibraries() const noexcept
Definition: matpackVII.h:153
Index nshelves() const noexcept
Definition: matpackVII.h:155
Index nbooks() const noexcept
Definition: matpackVII.h:156
A constant view of a Vector.
Definition: matpackI.h:512
Index nelem() const noexcept
Returns the number of elements.
Definition: matpackI.h:536
The Matrix class.
Definition: matpackI.h:1261
Index NumberOfFrequencies() const
The number of frequencies of the propagation matrix.
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Multiply input by scalar and add to this.
void SetZero()
Sets all data to zero.
Index StokesDimensions() const
The stokes dimension of the propagation matrix.
The range class.
Definition: matpackI.h:159
Stokes vector is as Propagation matrix but only has 4 possible values.
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Add a scaled version of the input.
The Tensor3 class.
Definition: matpackIII.h:346
void resize(Index p, Index r, Index c)
Resize function.
Definition: matpackIII.cc:661
The Tensor4 class.
Definition: matpackIV.h:429
void resize(Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackIV.cc:1054
The Tensor5 class.
Definition: matpackV.h:516
void resize(Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackV.cc:1741
The Tensor6 class.
Definition: matpackVI.h:1099
The Tensor7 class.
Definition: matpackVII.h:2399
void resize(Index l, Index v, Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackVII.cc:5488
The Vector class.
Definition: matpackI.h:899
void resize(Index n)
Resize function.
Definition: matpackI.cc:390
Workspace class.
Definition: workspace_ng.h:53
void toupper()
Convert to upper case.
Definition: mystring.h:147
#define DEBUG_ONLY(...)
Definition: debug.h:52
#define ARTS_ASSERT(condition,...)
Definition: debug.h:83
#define ARTS_USER_ERROR_IF(condition,...)
Definition: debug.h:134
The declarations of all the exception classes.
void gridpos(ArrayOfGridPos &gp, ConstVectorView old_grid, ConstVectorView new_grid, const Numeric &extpolfac)
Set up a grid position Array.
void interpweights(VectorView itw, const GridPos &tc)
Red 1D interpolation weights.
Numeric interp(ConstVectorView itw, ConstVectorView a, const GridPos &tc)
Red 1D Interpolate.
void gridpos_copy(GridPos &gp_new, const GridPos &gp_old)
gridpos_copy
Header file for interpolation.cc.
#define abs(x)
bool is_size(ConstVectorView x, const Index &n)
Verifies that the size of x is l.
Definition: logic.cc:82
bool is_same_within_epsilon(const Numeric &a, const Numeric &b, const Numeric &epsilon)
Check, if two numbers agree within a given epsilon.
Definition: logic.cc:354
Header file for logic.cc.
void opt_prop_sptFromScat_data(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromScat_data.
void abs_vecAddGas(StokesVector &abs_vec, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: abs_vecAddGas.
void ext_matAddGas(PropagationMatrix &ext_mat, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: ext_matAddGas.
void scat_data_monoExtract(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoExtract.
void scat_dataCalc(ArrayOfArrayOfSingleScatteringData &scat_data, const ArrayOfArrayOfSingleScatteringData &scat_data_raw, const Vector &f_grid, const Index &interp_order, const Verbosity &)
WORKSPACE METHOD: scat_dataCalc.
void pha_matCalc(Tensor4 &pha_mat, const Tensor5 &pha_mat_spt, const Tensor4 &pnd_field, const Index &atmosphere_dim, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_matCalc.
void scat_dataReduceT(ArrayOfArrayOfSingleScatteringData &scat_data, const Index &i_ss, const Numeric &T, const Index &interp_order, const Index &phamat_only, const Numeric &threshold, const Verbosity &)
WORKSPACE METHOD: scat_dataReduceT.
#define PHA_MAT_DATA
constexpr Numeric DEG2RAD
#define PART_TYPE
void ExtractFromMetaSingleScatSpecies(Vector &meta_param, const ArrayOfArrayOfScatteringMetaData &scat_meta, const String &meta_name, const Index &scat_species_index, const Verbosity &)
WORKSPACE METHOD: ExtractFromMetaSingleScatSpecies.
void opt_prop_sptFromMonoData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromMonoData.
#define ABS_VEC_DATA
#define AA_DATAGRID
#define EXT_MAT_DATA
void pha_mat_sptFromMonoData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromMonoData.
void pha_mat_sptFromData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromData.
void pha_mat_sptFromDataDOITOpt(Tensor5 &pha_mat_spt, const ArrayOfTensor7 &pha_mat_sptDOITOpt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_mat_sptFromDataDOITOpt.
constexpr Numeric RAD2DEG
#define T_DATAGRID
void opt_prop_bulkCalc(PropagationMatrix &ext_mat, StokesVector &abs_vec, const ArrayOfPropagationMatrix &ext_mat_spt, const ArrayOfStokesVector &abs_vec_spt, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: opt_prop_bulkCalc.
#define PND_LIMIT
#define F_DATAGRID
void DoitScatteringDataPrepare(Workspace &ws, ArrayOfTensor7 &pha_mat_sptDOITOpt, ArrayOfArrayOfSingleScatteringData &scat_data_mono, Tensor7 &pha_mat_doit, Vector &aa_grid, const Index &doit_za_grid_size, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Index &f_index, const Index &atmosphere_dim, const Index &stokes_dim, const Tensor3 &t_field, const ArrayOfIndex &cloudbox_limits, const Tensor4 &pnd_field, const Agenda &pha_mat_spt_agenda, const Verbosity &verbosity)
WORKSPACE METHOD: DoitScatteringDataPrepare.
void opt_prop_sptFromData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromData.
void scat_data_monoCalc(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &f_grid, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoCalc.
void pha_mat_sptFromScat_data(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromScat_data.
constexpr Numeric PI
#define ZA_DATAGRID
void scat_dataCheck(const ArrayOfArrayOfSingleScatteringData &scat_data, const String &check_type, const Numeric &threshold, const Verbosity &verbosity)
WORKSPACE METHOD: scat_dataCheck.
void ScatSpeciesMerge(Tensor4 &pnd_field, ArrayOfArrayOfSingleScatteringData &scat_data, ArrayOfArrayOfScatteringMetaData &scat_meta, ArrayOfString &scat_species, Index &cloudbox_checked, const Index &atmosphere_dim, const Index &cloudbox_on, const ArrayOfIndex &cloudbox_limits, const Tensor3 &t_field, const Tensor3 &z_field, const Matrix &z_surface, const Verbosity &)
WORKSPACE METHOD: ScatSpeciesMerge.
void nlinspace(Vector &x, const Numeric start, const Numeric stop, const Index n)
nlinspace
Definition: math_funcs.cc:227
Numeric AngIntegrate_trapezoid(ConstMatrixView Integrand, ConstVectorView za_grid, ConstVectorView aa_grid)
AngIntegrate_trapezoid.
Definition: math_funcs.cc:315
NUMERIC Numeric
The type to use for all floating point numbers.
Definition: matpack.h:33
INDEX Index
The type to use for all integer numbers and indices.
Definition: matpack.h:39
const Joker joker
Declarations having to do with the four output streams.
#define CREATE_OUT1
Definition: messages.h:204
#define CREATE_OUT2
Definition: messages.h:205
#define CREATE_OUT0
Definition: messages.h:203
constexpr Numeric pi
The following mathematical constants are generated in python Decimal package by the code:
constexpr Numeric h
Planck constant convenience name [J s].
constexpr auto deg2rad(auto x) noexcept
Converts degrees to radians.
constexpr auto rad2deg(auto x) noexcept
Converts radians to degrees.
constexpr Numeric l(const Index p0, const Index n, const Numeric x, const SortedVectorType &xi, const Index j, const std::pair< Numeric, Numeric > cycle={ -180, 180}) noexcept
void reinterp(VectorView out, const ConstVectorView &iy, const Grid< Vector, 1 > &iw, const Array< Lagrange > &dim0)
Array< Lagrange > LagrangeVector(const ConstVectorView &xs, const ConstVectorView &xi, const Index polyorder, const Numeric extrapol, const bool do_derivs, const GridType type, const std::pair< Numeric, Numeric > cycle)
constexpr bool isnan(double d) noexcept
Definition: nonstd.h:53
void abs_vecTransform(StokesVector &abs_vec_lab, ConstTensor3View abs_vec_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of absorption vector.
void ext_matTransform(PropagationMatrix &ext_mat_lab, ConstTensor3View ext_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of extinction matrix.
void pha_matTransform(MatrixView pha_mat_lab, ConstTensor5View pha_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Index &za_sca_idx, const Index &aa_sca_idx, const Index &za_inc_idx, const Index &aa_inc_idx, ConstVectorView za_grid, ConstVectorView aa_grid, const Verbosity &verbosity)
Transformation of phase matrix.
Scattering database structure and functions.
@ PTYPE_AZIMUTH_RND
Definition: optproperties.h:54
@ PTYPE_TOTAL_RND
Definition: optproperties.h:55
Contains sorting routines.
Structure to store a grid position.
Definition: interpolation.h:73
Index idx
Definition: interpolation.h:74
A Lagrange interpolation computer.
Numeric diameter_area_equ_aerodynamical
Numeric diameter_volume_equ
#define v
This file contains basic functions to handle XML data files.